├── .gitignore
├── Chapter03
├── 01-OneMax-long.py
├── 02-OneMax-short.py
└── 03-OneMax-short-hof.py
├── Chapter04
├── 01-solve-knapsack.py
├── 02-solve-tsp-first-attempt.py
├── 03-solve-tsp.py
├── 04-solve-vrp.py
├── elitism.py
├── knapsack.py
├── tsp.py
└── vrp.py
├── Chapter05
├── 01-solve-n-queens.py
├── 02-solve-nurses.py
├── 03-solve-graphs.py
├── elitism.py
├── graphs.py
├── nurses.py
├── queen-thumbnail.png
└── queens.py
├── Chapter06
├── 01-optimize-eggholder.py
├── 02-optimize-himmelblau.py
├── 03-optimize-himmelblau-sharing.py
├── 04-optimize-simionescu.py
├── 05-optimize-simionescu-second.py
└── elitism.py
├── Chapter07
├── 01-solve-friedman.py
├── 02-solve-zoo.py
├── elitism.py
├── friedman.py
└── zoo.py
├── Chapter08
├── 01-hyperparameter-tuning-grid.py
├── 02-hyperparameter-tuning-genetic.py
├── elitism.py
└── hyperparameter_tuning_genetic_test.py
├── Chapter09
├── 01-optimize-mlp-layers.py
├── 02-optimize-mlp-hyperparameters.py
├── elitism.py
├── mlp_hyperparameters_test.py
└── mlp_layers_test.py
├── Chapter10
├── 01-solve-mountain-car.py
├── 02-solve-cart-pole.py
├── cart_pole.py
├── elitism.py
└── mountain_car.py
├── Chapter11
├── 01-reconstruct-with-polygons.py
├── elitism_callback.py
├── image_test.py
└── images
│ └── Mona_Lisa_head.png
├── Chapter12
├── 01-gp-even-parity.py
├── 02-gp-even-parity-reduced.py
├── 03-pso-himmelblau.py
└── elitism.py
├── Feedback-and-Improvements
├── Chapter04
│ ├── 01-solve-knapsack.py
│ ├── 02-solve-tsp-first-attempt.py
│ ├── 03-solve-tsp.py
│ ├── 04-solve-vrp.py
│ ├── elitism.py
│ ├── knapsack.py
│ ├── tsp.py
│ └── vrp.py
└── README.md
├── LICENSE
└── README.md
/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | .idea/encodings.xml
3 | .idea/Hands-On-Genetic-Algorithms-with-Python.iml
4 | .idea/misc.xml
5 | .idea/modules.xml
6 | .idea/vcs.xml
7 | .idea/workspace.xml
8 |
--------------------------------------------------------------------------------
/Chapter03/01-OneMax-long.py:
--------------------------------------------------------------------------------
1 | from deap import base
2 | from deap import creator
3 | from deap import tools
4 |
5 | import random
6 |
7 | import matplotlib.pyplot as plt
8 | import seaborn as sns
9 |
10 | # problem constants:
11 | ONE_MAX_LENGTH = 100 # length of bit string to be optimized
12 |
13 | # Genetic Algorithm constants:
14 | POPULATION_SIZE = 200
15 | P_CROSSOVER = 0.9 # probability for crossover
16 | P_MUTATION = 0.1 # probability for mutating an individual
17 | MAX_GENERATIONS = 50
18 |
19 |
20 | # set the random seed:
21 | RANDOM_SEED = 42
22 | random.seed(RANDOM_SEED)
23 |
24 | toolbox = base.Toolbox()
25 |
26 | # create an operator that randomly returns 0 or 1:
27 | toolbox.register("zeroOrOne", random.randint, 0, 1)
28 |
29 | # define a single objective, maximizing fitness strategy:
30 | creator.create("FitnessMax", base.Fitness, weights=(1.0,))
31 |
32 | # create the Individual class based on list:
33 | creator.create("Individual", list, fitness=creator.FitnessMax)
34 | #creator.create("Individual", array.array, typecode='b', fitness=creator.FitnessMax)
35 |
36 | # create the individual operator to fill up an Individual instance:
37 | toolbox.register("individualCreator", tools.initRepeat, creator.Individual, toolbox.zeroOrOne, ONE_MAX_LENGTH)
38 |
39 | # create the population operator to generate a list of individuals:
40 | toolbox.register("populationCreator", tools.initRepeat, list, toolbox.individualCreator)
41 |
42 |
43 | # fitness calculation:
44 | # compute the number of '1's in the individual
45 | def oneMaxFitness(individual):
46 | return sum(individual), # return a tuple
47 |
48 |
49 | toolbox.register("evaluate", oneMaxFitness)
50 |
51 | # genetic operators:
52 |
53 | # Tournament selection with tournament size of 3:
54 | toolbox.register("select", tools.selTournament, tournsize=3)
55 |
56 | # Single-point crossover:
57 | toolbox.register("mate", tools.cxOnePoint)
58 |
59 | # Flip-bit mutation:
60 | # indpb: Independent probability for each attribute to be flipped
61 | toolbox.register("mutate", tools.mutFlipBit, indpb=1.0/ONE_MAX_LENGTH)
62 |
63 |
64 | # Genetic Algorithm flow:
65 | def main():
66 |
67 | # create initial population (generation 0):
68 | population = toolbox.populationCreator(n=POPULATION_SIZE)
69 | generationCounter = 0
70 |
71 | # calculate fitness tuple for each individual in the population:
72 | fitnessValues = list(map(toolbox.evaluate, population))
73 | for individual, fitnessValue in zip(population, fitnessValues):
74 | individual.fitness.values = fitnessValue
75 |
76 | # extract fitness values from all individuals in population:
77 | fitnessValues = [individual.fitness.values[0] for individual in population]
78 |
79 | # initialize statistics accumulators:
80 | maxFitnessValues = []
81 | meanFitnessValues = []
82 |
83 | # main evolutionary loop:
84 | # stop if max fitness value reached the known max value
85 | # OR if number of generations exceeded the preset value:
86 | while max(fitnessValues) < ONE_MAX_LENGTH and generationCounter < MAX_GENERATIONS:
87 | # update counter:
88 | generationCounter = generationCounter + 1
89 |
90 | # apply the selection operator, to select the next generation's individuals:
91 | offspring = toolbox.select(population, len(population))
92 | # clone the selected individuals:
93 | offspring = list(map(toolbox.clone, offspring))
94 |
95 | # apply the crossover operator to pairs of offspring:
96 | for child1, child2 in zip(offspring[::2], offspring[1::2]):
97 | if random.random() < P_CROSSOVER:
98 | toolbox.mate(child1, child2)
99 | del child1.fitness.values
100 | del child2.fitness.values
101 |
102 | for mutant in offspring:
103 | if random.random() < P_MUTATION:
104 | toolbox.mutate(mutant)
105 | del mutant.fitness.values
106 |
107 | # calculate fitness for the individuals with no previous calculated fitness value:
108 | freshIndividuals = [ind for ind in offspring if not ind.fitness.valid]
109 | freshFitnessValues = list(map(toolbox.evaluate, freshIndividuals))
110 | for individual, fitnessValue in zip(freshIndividuals, freshFitnessValues):
111 | individual.fitness.values = fitnessValue
112 |
113 | # replace the current population with the offspring:
114 | population[:] = offspring
115 |
116 | # collect fitnessValues into a list, update statistics and print:
117 | fitnessValues = [ind.fitness.values[0] for ind in population]
118 |
119 | maxFitness = max(fitnessValues)
120 | meanFitness = sum(fitnessValues) / len(population)
121 | maxFitnessValues.append(maxFitness)
122 | meanFitnessValues.append(meanFitness)
123 | print("- Generation {}: Max Fitness = {}, Avg Fitness = {}".format(generationCounter, maxFitness, meanFitness))
124 |
125 | # find and print best individual:
126 | best_index = fitnessValues.index(max(fitnessValues))
127 | print("Best Individual = ", *population[best_index], "\n")
128 |
129 | # Genetic Algorithm is done - plot statistics:
130 | sns.set_style("whitegrid")
131 | plt.plot(maxFitnessValues, color='red')
132 | plt.plot(meanFitnessValues, color='green')
133 | plt.xlabel('Generation')
134 | plt.ylabel('Max / Average Fitness')
135 | plt.title('Max and Average Fitness over Generations')
136 | plt.show()
137 |
138 |
139 | if __name__ == '__main__':
140 | main()
141 |
--------------------------------------------------------------------------------
/Chapter03/02-OneMax-short.py:
--------------------------------------------------------------------------------
1 | from deap import base
2 | from deap import creator
3 | from deap import tools
4 | from deap import algorithms
5 |
6 | import random
7 | import numpy
8 |
9 | import matplotlib.pyplot as plt
10 | import seaborn as sns
11 |
12 | # problem constants:
13 | ONE_MAX_LENGTH = 100 # length of bit string to be optimized
14 |
15 | # Genetic Algorithm constants:
16 | POPULATION_SIZE = 200
17 | P_CROSSOVER = 0.9 # probability for crossover
18 | P_MUTATION = 0.1 # probability for mutating an individual
19 | MAX_GENERATIONS = 50
20 |
21 |
22 | # set the random seed:
23 | RANDOM_SEED = 42
24 | random.seed(RANDOM_SEED)
25 |
26 | toolbox = base.Toolbox()
27 |
28 | # create an operator that randomly returns 0 or 1:
29 | toolbox.register("zeroOrOne", random.randint, 0, 1)
30 |
31 | # define a single objective, maximizing fitness strategy:
32 | creator.create("FitnessMax", base.Fitness, weights=(1.0,))
33 |
34 | # create the Individual class based on list:
35 | creator.create("Individual", list, fitness=creator.FitnessMax)
36 |
37 | # create the individual operator to fill up an Individual instance:
38 | toolbox.register("individualCreator", tools.initRepeat, creator.Individual, toolbox.zeroOrOne, ONE_MAX_LENGTH)
39 |
40 | # create the population operator to generate a list of individuals:
41 | toolbox.register("populationCreator", tools.initRepeat, list, toolbox.individualCreator)
42 |
43 |
44 | # fitness calculation:
45 | # compute the number of '1's in the individual
46 | def oneMaxFitness(individual):
47 | return sum(individual), # return a tuple
48 |
49 |
50 | toolbox.register("evaluate", oneMaxFitness)
51 |
52 | # genetic operators:mutFlipBit
53 |
54 | # Tournament selection with tournament size of 3:
55 | toolbox.register("select", tools.selTournament, tournsize=3)
56 |
57 | # Single-point crossover:
58 | toolbox.register("mate", tools.cxOnePoint)
59 |
60 | # Flip-bit mutation:
61 | # indpb: Independent probability for each attribute to be flipped
62 | toolbox.register("mutate", tools.mutFlipBit, indpb=1.0/ONE_MAX_LENGTH)
63 |
64 |
65 | # Genetic Algorithm flow:
66 | def main():
67 |
68 | # create initial population (generation 0):
69 | population = toolbox.populationCreator(n=POPULATION_SIZE)
70 |
71 | # prepare the statistics object:
72 | stats = tools.Statistics(lambda ind: ind.fitness.values)
73 | stats.register("max", numpy.max)
74 | stats.register("avg", numpy.mean)
75 |
76 | # perform the Genetic Algorithm flow:
77 | population, logbook = algorithms.eaSimple(population, toolbox, cxpb=P_CROSSOVER, mutpb=P_MUTATION, ngen=MAX_GENERATIONS,
78 | stats=stats, verbose=True)
79 |
80 |
81 | # Genetic Algorithm is done - extract statistics:
82 | maxFitnessValues, meanFitnessValues = logbook.select("max", "avg")
83 |
84 | # plot statistics:
85 | sns.set_style("whitegrid")
86 | plt.plot(maxFitnessValues, color='red')
87 | plt.plot(meanFitnessValues, color='green')
88 | plt.xlabel('Generation')
89 | plt.ylabel('Max / Average Fitness')
90 | plt.title('Max and Average Fitness over Generations')
91 | plt.show()
92 |
93 |
94 | if __name__ == "__main__":
95 | main()
--------------------------------------------------------------------------------
/Chapter03/03-OneMax-short-hof.py:
--------------------------------------------------------------------------------
1 | from deap import base
2 | from deap import creator
3 | from deap import tools
4 | from deap import algorithms
5 |
6 | import random
7 | import numpy
8 |
9 | import matplotlib.pyplot as plt
10 | import seaborn as sns
11 |
12 | # problem constants:
13 | ONE_MAX_LENGTH = 100 # length of bit string to be optimized
14 |
15 | # Genetic Algorithm constants:
16 | POPULATION_SIZE = 200
17 | P_CROSSOVER = 0.9 # probability for crossover
18 | P_MUTATION = 0.1 # probability for mutating an individual
19 | MAX_GENERATIONS = 50
20 | HALL_OF_FAME_SIZE = 10
21 |
22 |
23 | # set the random seed:
24 | RANDOM_SEED = 42
25 | random.seed(RANDOM_SEED)
26 |
27 | toolbox = base.Toolbox()
28 |
29 | # create an operator that randomly returns 0 or 1:
30 | toolbox.register("zeroOrOne", random.randint, 0, 1)
31 |
32 | # define a single objective, maximizing fitness strategy:
33 | creator.create("FitnessMax", base.Fitness, weights=(1.0,))
34 |
35 | # create the Individual class based on list:
36 | creator.create("Individual", list, fitness=creator.FitnessMax)
37 |
38 | # create the individual operator to fill up an Individual instance:
39 | toolbox.register("individualCreator", tools.initRepeat, creator.Individual, toolbox.zeroOrOne, ONE_MAX_LENGTH)
40 |
41 | # create the population operator to generate a list of individuals:
42 | toolbox.register("populationCreator", tools.initRepeat, list, toolbox.individualCreator)
43 |
44 |
45 | # fitness calculation:
46 | # compute the number of '1's in the individual
47 | def oneMaxFitness(individual):
48 | return sum(individual), # return a tuple
49 |
50 |
51 | toolbox.register("evaluate", oneMaxFitness)
52 |
53 | # genetic operators:mutFlipBit
54 |
55 | # Tournament selection with tournament size of 3:
56 | toolbox.register("select", tools.selTournament, tournsize=3)
57 |
58 | # Single-point crossover:
59 | toolbox.register("mate", tools.cxOnePoint)
60 |
61 | # Flip-bit mutation:
62 | # indpb: Independent probability for each attribute to be flipped
63 | toolbox.register("mutate", tools.mutFlipBit, indpb=1.0/ONE_MAX_LENGTH)
64 |
65 |
66 | # Genetic Algorithm flow:
67 | def main():
68 |
69 | # create initial population (generation 0):
70 | population = toolbox.populationCreator(n=POPULATION_SIZE)
71 |
72 | # prepare the statistics object:
73 | stats = tools.Statistics(lambda ind: ind.fitness.values)
74 | stats.register("max", numpy.max)
75 | stats.register("avg", numpy.mean)
76 |
77 | # define the hall-of-fame object:
78 | hof = tools.HallOfFame(HALL_OF_FAME_SIZE)
79 |
80 | # perform the Genetic Algorithm flow with hof feature added:
81 | population, logbook = algorithms.eaSimple(population, toolbox, cxpb=P_CROSSOVER, mutpb=P_MUTATION,
82 | ngen=MAX_GENERATIONS, stats=stats, halloffame=hof, verbose=True)
83 |
84 | # print Hall of Fame info:
85 | print("Hall of Fame Individuals = ", *hof.items, sep="\n")
86 | print("Best Ever Individual = ", hof.items[0])
87 |
88 | # extract statistics:
89 | maxFitnessValues, meanFitnessValues = logbook.select("max", "avg")
90 |
91 | # plot statistics:
92 | sns.set_style("whitegrid")
93 | plt.plot(maxFitnessValues, color='red')
94 | plt.plot(meanFitnessValues, color='green')
95 | plt.xlabel('Generation')
96 | plt.ylabel('Max / Average Fitness')
97 | plt.title('Max and Average Fitness over Generations')
98 |
99 | plt.show()
100 |
101 |
102 | if __name__ == "__main__":
103 | main()
--------------------------------------------------------------------------------
/Chapter04/01-solve-knapsack.py:
--------------------------------------------------------------------------------
1 | from deap import base
2 | from deap import creator
3 | from deap import tools
4 | from deap import algorithms
5 |
6 | import random
7 | import numpy
8 |
9 | import matplotlib.pyplot as plt
10 | import seaborn as sns
11 |
12 | import knapsack
13 |
14 | # problem constants:
15 | # create the knapsack problem instance to be used:
16 | knapsack = knapsack.Knapsack01Problem()
17 |
18 | # Genetic Algorithm constants:
19 | POPULATION_SIZE = 50
20 | P_CROSSOVER = 0.9 # probability for crossover
21 | P_MUTATION = 0.1 # probability for mutating an individual
22 | MAX_GENERATIONS = 50
23 | HALL_OF_FAME_SIZE = 1
24 |
25 |
26 | # set the random seed:
27 | RANDOM_SEED = 42
28 | random.seed(RANDOM_SEED)
29 |
30 | toolbox = base.Toolbox()
31 |
32 | # create an operator that randomly returns 0 or 1:
33 | toolbox.register("zeroOrOne", random.randint, 0, 1)
34 |
35 | # define a single objective, maximizing fitness strategy:
36 | creator.create("FitnessMax", base.Fitness, weights=(1.0,))
37 |
38 | # create the Individual class based on list:
39 | creator.create("Individual", list, fitness=creator.FitnessMax)
40 |
41 | # create the individual operator to fill up an Individual instance:
42 | toolbox.register("individualCreator", tools.initRepeat, creator.Individual, toolbox.zeroOrOne, len(knapsack))
43 |
44 | # create the population operator to generate a list of individuals:
45 | toolbox.register("populationCreator", tools.initRepeat, list, toolbox.individualCreator)
46 |
47 |
48 | # fitness calculation
49 | def knapsackValue(individual):
50 | return knapsack.getValue(individual), # return a tuple
51 |
52 |
53 | toolbox.register("evaluate", knapsackValue)
54 |
55 | # genetic operators:mutFlipBit
56 |
57 | # Tournament selection with tournament size of 3:
58 | toolbox.register("select", tools.selTournament, tournsize=3)
59 |
60 | # Single-point crossover:
61 | toolbox.register("mate", tools.cxTwoPoint)
62 |
63 | # Flip-bit mutation:
64 | # indpb: Independent probability for each attribute to be flipped
65 | toolbox.register("mutate", tools.mutFlipBit, indpb=1.0/len(knapsack))
66 |
67 |
68 | # Genetic Algorithm flow:
69 | def main():
70 |
71 | # create initial population (generation 0):
72 | population = toolbox.populationCreator(n=POPULATION_SIZE)
73 |
74 | # prepare the statistics object:
75 | stats = tools.Statistics(lambda ind: ind.fitness.values)
76 | stats.register("max", numpy.max)
77 | stats.register("avg", numpy.mean)
78 |
79 | # define the hall-of-fame object:
80 | hof = tools.HallOfFame(HALL_OF_FAME_SIZE)
81 |
82 | # perform the Genetic Algorithm flow with hof feature added:
83 | population, logbook = algorithms.eaSimple(population, toolbox, cxpb=P_CROSSOVER, mutpb=P_MUTATION,
84 | ngen=MAX_GENERATIONS, stats=stats, halloffame=hof, verbose=True)
85 |
86 | # print best solution found:
87 | best = hof.items[0]
88 | print("-- Best Ever Individual = ", best)
89 | print("-- Best Ever Fitness = ", best.fitness.values[0])
90 |
91 | print("-- Knapsack Items = ")
92 | knapsack.printItems(best)
93 |
94 | # extract statistics:
95 | maxFitnessValues, meanFitnessValues = logbook.select("max", "avg")
96 |
97 | # plot statistics:
98 | sns.set_style("whitegrid")
99 | plt.plot(maxFitnessValues, color='red')
100 | plt.plot(meanFitnessValues, color='green')
101 | plt.xlabel('Generation')
102 | plt.ylabel('Max / Average Fitness')
103 | plt.title('Max and Average fitness over Generations')
104 | plt.show()
105 |
106 |
107 | if __name__ == "__main__":
108 | main()
--------------------------------------------------------------------------------
/Chapter04/02-solve-tsp-first-attempt.py:
--------------------------------------------------------------------------------
1 | from deap import base
2 | from deap import creator
3 | from deap import tools
4 | from deap import algorithms
5 |
6 | import random
7 | import array
8 |
9 | import numpy as np
10 | import matplotlib.pyplot as plt
11 | import seaborn as sns
12 |
13 | import tsp
14 |
15 | # set the random seed for repeatable results
16 | RANDOM_SEED = 42
17 | random.seed(RANDOM_SEED)
18 |
19 | # create the desired traveling salesman problem instace:
20 | TSP_NAME = "bayg29" # name of problem
21 | tsp = tsp.TravelingSalesmanProblem(TSP_NAME)
22 |
23 | # Genetic Algorithm constants:
24 | POPULATION_SIZE = 300
25 | MAX_GENERATIONS = 200
26 | HALL_OF_FAME_SIZE = 1
27 | P_CROSSOVER = 0.9 # probability for crossover
28 | P_MUTATION = 0.1 # probability for mutating an individual
29 |
30 | toolbox = base.Toolbox()
31 |
32 | # define a single objective, minimizing fitness strategy:
33 | creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
34 |
35 | # create the Individual class based on list of integers:
36 | creator.create("Individual", array.array, typecode='i', fitness=creator.FitnessMin)
37 |
38 | # create an operator that generates randomly shuffled indices:
39 | toolbox.register("randomOrder", random.sample, range(len(tsp)), len(tsp))
40 |
41 | # create the individual creation operator to fill up an Individual instance with shuffled indices:
42 | toolbox.register("individualCreator", tools.initIterate, creator.Individual, toolbox.randomOrder)
43 |
44 | # create the population creation operator to generate a list of individuals:
45 | toolbox.register("populationCreator", tools.initRepeat, list, toolbox.individualCreator)
46 |
47 |
48 | # fitness calculation - compute the total distance of the list of cities represented by indices:
49 | def tpsDistance(individual):
50 | return tsp.getTotalDistance(individual), # return a tuple
51 |
52 |
53 | toolbox.register("evaluate", tpsDistance)
54 |
55 |
56 | # Genetic operators:
57 | toolbox.register("select", tools.selTournament, tournsize=3)
58 | toolbox.register("mate", tools.cxOrdered)
59 | toolbox.register("mutate", tools.mutShuffleIndexes, indpb=1.0/len(tsp))
60 |
61 |
62 | # Genetic Algorithm flow:
63 | def main():
64 |
65 | # create initial population (generation 0):
66 | population = toolbox.populationCreator(n=POPULATION_SIZE)
67 |
68 | # prepare the statistics object:
69 | stats = tools.Statistics(lambda ind: ind.fitness.values)
70 | stats.register("min", np.min)
71 | stats.register("avg", np.mean)
72 |
73 | # define the hall-of-fame object:
74 | hof = tools.HallOfFame(HALL_OF_FAME_SIZE)
75 |
76 | # perform the Genetic Algorithm flow with hof feature added:
77 | population, logbook = algorithms.eaSimple(population, toolbox, cxpb=P_CROSSOVER, mutpb=P_MUTATION,
78 | ngen=MAX_GENERATIONS, stats=stats, halloffame=hof, verbose=True)
79 |
80 | # print best individual info:
81 | best = hof.items[0]
82 | print("-- Best Ever Individual = ", best)
83 | print("-- Best Ever Fitness = ", best.fitness.values[0])
84 |
85 | # plot best solution:
86 | plt.figure(1)
87 | tsp.plotData(best)
88 |
89 | # plot statistics:
90 | minFitnessValues, meanFitnessValues = logbook.select("min", "avg")
91 | plt.figure(2)
92 | sns.set_style("whitegrid")
93 | plt.plot(minFitnessValues, color='red')
94 | plt.plot(meanFitnessValues, color='green')
95 | plt.xlabel('Generation')
96 | plt.ylabel('Min / Average Fitness')
97 | plt.title('Min and Average fitness over Generations')
98 |
99 | # show both plots:
100 | plt.show()
101 |
102 |
103 | if __name__ == "__main__":
104 | main()
105 |
--------------------------------------------------------------------------------
/Chapter04/03-solve-tsp.py:
--------------------------------------------------------------------------------
1 | from deap import base
2 | from deap import creator
3 | from deap import tools
4 |
5 | import random
6 | import array
7 |
8 | import numpy as np
9 | import matplotlib.pyplot as plt
10 | import seaborn as sns
11 |
12 | import tsp
13 | import elitism
14 |
15 | # set the random seed for repeatable results
16 | RANDOM_SEED = 42
17 | random.seed(RANDOM_SEED)
18 |
19 | # create the desired traveling salesman problem instace:
20 | TSP_NAME = "bayg29" # name of problem
21 | tsp = tsp.TravelingSalesmanProblem(TSP_NAME)
22 |
23 | # Genetic Algorithm constants:
24 | POPULATION_SIZE = 300
25 | MAX_GENERATIONS = 200
26 | HALL_OF_FAME_SIZE = 30
27 | P_CROSSOVER = 0.9 # probability for crossover
28 | P_MUTATION = 0.1 # probability for mutating an individual
29 |
30 | toolbox = base.Toolbox()
31 |
32 | # define a single objective, minimizing fitness strategy:
33 | creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
34 |
35 | # create the Individual class based on list of integers:
36 | creator.create("Individual", array.array, typecode='i', fitness=creator.FitnessMin)
37 |
38 | # create an operator that generates randomly shuffled indices:
39 | toolbox.register("randomOrder", random.sample, range(len(tsp)), len(tsp))
40 |
41 | # create the individual creation operator to fill up an Individual instance with shuffled indices:
42 | toolbox.register("individualCreator", tools.initIterate, creator.Individual, toolbox.randomOrder)
43 |
44 | # create the population creation operator to generate a list of individuals:
45 | toolbox.register("populationCreator", tools.initRepeat, list, toolbox.individualCreator)
46 |
47 |
48 | # fitness calculation - compute the total distance of the list of cities represented by indices:
49 | def tpsDistance(individual):
50 | return tsp.getTotalDistance(individual), # return a tuple
51 |
52 |
53 | toolbox.register("evaluate", tpsDistance)
54 |
55 |
56 | # Genetic operators:
57 | toolbox.register("select", tools.selTournament, tournsize=2)
58 | toolbox.register("mate", tools.cxOrdered)
59 | toolbox.register("mutate", tools.mutShuffleIndexes, indpb=1.0/len(tsp))
60 |
61 |
62 | # Genetic Algorithm flow:
63 | def main():
64 |
65 | # create initial population (generation 0):
66 | population = toolbox.populationCreator(n=POPULATION_SIZE)
67 |
68 | # prepare the statistics object:
69 | stats = tools.Statistics(lambda ind: ind.fitness.values)
70 | stats.register("min", np.min)
71 | stats.register("avg", np.mean)
72 |
73 | # define the hall-of-fame object:
74 | hof = tools.HallOfFame(HALL_OF_FAME_SIZE)
75 |
76 | # perform the Genetic Algorithm flow with hof feature added:
77 | population, logbook = elitism.eaSimpleWithElitism(population, toolbox, cxpb=P_CROSSOVER, mutpb=P_MUTATION,
78 | ngen=MAX_GENERATIONS, stats=stats, halloffame=hof, verbose=True)
79 |
80 | # print best individual info:
81 | best = hof.items[0]
82 | print("-- Best Ever Individual = ", best)
83 | print("-- Best Ever Fitness = ", best.fitness.values[0])
84 |
85 | # plot best solution:
86 | plt.figure(1)
87 | tsp.plotData(best)
88 |
89 | # plot statistics:
90 | minFitnessValues, meanFitnessValues = logbook.select("min", "avg")
91 | plt.figure(2)
92 | sns.set_style("whitegrid")
93 | plt.plot(minFitnessValues, color='red')
94 | plt.plot(meanFitnessValues, color='green')
95 | plt.xlabel('Generation')
96 | plt.ylabel('Min / Average Fitness')
97 | plt.title('Min and Average fitness over Generations')
98 |
99 | # show both plots:
100 | plt.show()
101 |
102 |
103 | if __name__ == "__main__":
104 | main()
105 |
--------------------------------------------------------------------------------
/Chapter04/04-solve-vrp.py:
--------------------------------------------------------------------------------
1 | from deap import base
2 | from deap import creator
3 | from deap import tools
4 |
5 | import random
6 | import array
7 |
8 | import numpy as np
9 | import matplotlib.pyplot as plt
10 | import seaborn as sns
11 |
12 | import vrp
13 | import elitism
14 |
15 | # set the random seed:
16 | RANDOM_SEED = 42
17 | random.seed(RANDOM_SEED)
18 |
19 | # create the desired vehicle routing problem using a traveling salesman problem instance:
20 | TSP_NAME = "bayg29"
21 | NUM_OF_VEHICLES = 3
22 | DEPOT_LOCATION = 12
23 | vrp = vrp.VehicleRoutingProblem(TSP_NAME, NUM_OF_VEHICLES, DEPOT_LOCATION)
24 |
25 | # Genetic Algorithm constants:
26 | POPULATION_SIZE = 500
27 | P_CROSSOVER = 0.9 # probability for crossover
28 | P_MUTATION = 0.2 # probability for mutating an individual
29 | MAX_GENERATIONS = 1000
30 | HALL_OF_FAME_SIZE = 30
31 |
32 | toolbox = base.Toolbox()
33 |
34 | # define a single objective, minimizing fitness strategy:
35 | creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
36 |
37 | # create the Individual class based on list of integers:
38 | creator.create("Individual", array.array, typecode='i', fitness=creator.FitnessMin)
39 |
40 | # create an operator that generates randomly shuffled indices:
41 | toolbox.register("randomOrder", random.sample, range(len(vrp)), len(vrp))
42 |
43 | # create the individual creation operator to fill up an Individual instance with shuffled indices:
44 | toolbox.register("individualCreator", tools.initIterate, creator.Individual, toolbox.randomOrder)
45 |
46 | # create the population creation operator to generate a list of individuals:
47 | toolbox.register("populationCreator", tools.initRepeat, list, toolbox.individualCreator)
48 |
49 |
50 | # fitness calculation - compute the max distance that the vehicles covered
51 | # for the given list of cities represented by indices:
52 | def vrpDistance(individual):
53 | return vrp.getMaxDistance(individual), # return a tuple
54 |
55 |
56 | toolbox.register("evaluate", vrpDistance)
57 |
58 | # Genetic operators:
59 | toolbox.register("select", tools.selTournament, tournsize=2)
60 | toolbox.register("mutate", tools.mutShuffleIndexes, indpb=1.0/len(vrp))
61 | toolbox.register("mate", tools.cxUniformPartialyMatched, indpb=2.0/len(vrp))
62 |
63 |
64 | # Genetic Algorithm flow:
65 | def main():
66 |
67 | # create initial population (generation 0):
68 | population = toolbox.populationCreator(n=POPULATION_SIZE)
69 |
70 | # prepare the statistics object:
71 | stats = tools.Statistics(lambda ind: ind.fitness.values)
72 | stats.register("min", np.min)
73 | stats.register("avg", np.mean)
74 |
75 | # define the hall-of-fame object:
76 | hof = tools.HallOfFame(HALL_OF_FAME_SIZE)
77 |
78 | # perform the Genetic Algorithm flow with hof feature added:
79 | population, logbook = elitism.eaSimpleWithElitism(population, toolbox, cxpb=P_CROSSOVER, mutpb=P_MUTATION,
80 | ngen=MAX_GENERATIONS, stats=stats, halloffame=hof, verbose=True)
81 |
82 | # print best individual info:
83 | best = hof.items[0]
84 | print("-- Best Ever Individual = ", best)
85 | print("-- Best Ever Fitness = ", best.fitness.values[0])
86 |
87 | print("-- Route Breakdown = ", vrp.getRoutes(best))
88 | print("-- total distance = ", vrp.getTotalDistance(best))
89 | print("-- max distance = ", vrp.getMaxDistance(best))
90 |
91 | # plot best solution:
92 | plt.figure(1)
93 | vrp.plotData(best)
94 |
95 | # plot statistics:
96 | minFitnessValues, meanFitnessValues = logbook.select("min", "avg")
97 | plt.figure(2)
98 | sns.set_style("whitegrid")
99 | plt.plot(minFitnessValues, color='red')
100 | plt.plot(meanFitnessValues, color='green')
101 | plt.xlabel('Generation')
102 | plt.ylabel('Min / Average Fitness')
103 | plt.title('Min and Average fitness over Generations')
104 |
105 | # show both plots:
106 | plt.show()
107 |
108 |
109 | if __name__ == "__main__":
110 | main()
111 |
112 |
--------------------------------------------------------------------------------
/Chapter04/elitism.py:
--------------------------------------------------------------------------------
1 | from deap import tools
2 | from deap import algorithms
3 |
4 | def eaSimpleWithElitism(population, toolbox, cxpb, mutpb, ngen, stats=None,
5 | halloffame=None, verbose=__debug__):
6 | """This algorithm is similar to DEAP eaSimple() algorithm, with the modification that
7 | halloffame is used to implement an elitism mechanism. The individuals contained in the
8 | halloffame are directly injected into the next generation and are not subject to the
9 | genetic operators of selection, crossover and mutation.
10 | """
11 | logbook = tools.Logbook()
12 | logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])
13 |
14 | # Evaluate the individuals with an invalid fitness
15 | invalid_ind = [ind for ind in population if not ind.fitness.valid]
16 | fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
17 | for ind, fit in zip(invalid_ind, fitnesses):
18 | ind.fitness.values = fit
19 |
20 | if halloffame is None:
21 | raise ValueError("halloffame parameter must not be empty!")
22 |
23 | halloffame.update(population)
24 | hof_size = len(halloffame.items) if halloffame.items else 0
25 |
26 | record = stats.compile(population) if stats else {}
27 | logbook.record(gen=0, nevals=len(invalid_ind), **record)
28 | if verbose:
29 | print(logbook.stream)
30 |
31 | # Begin the generational process
32 | for gen in range(1, ngen + 1):
33 |
34 | # Select the next generation individuals
35 | offspring = toolbox.select(population, len(population) - hof_size)
36 |
37 | # Vary the pool of individuals
38 | offspring = algorithms.varAnd(offspring, toolbox, cxpb, mutpb)
39 |
40 | # Evaluate the individuals with an invalid fitness
41 | invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
42 | fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
43 | for ind, fit in zip(invalid_ind, fitnesses):
44 | ind.fitness.values = fit
45 |
46 | # add the best back to population:
47 | offspring.extend(halloffame.items)
48 |
49 | # Update the hall of fame with the generated individuals
50 | halloffame.update(offspring)
51 |
52 | # Replace the current population by the offspring
53 | population[:] = offspring
54 |
55 | # Append the current generation statistics to the logbook
56 | record = stats.compile(population) if stats else {}
57 | logbook.record(gen=gen, nevals=len(invalid_ind), **record)
58 | if verbose:
59 | print(logbook.stream)
60 |
61 | return population, logbook
62 |
63 |
--------------------------------------------------------------------------------
/Chapter04/knapsack.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | class Knapsack01Problem:
4 | """This class encapsulates the Knapsack 0-1 Problem from RosettaCode.org
5 | """
6 |
7 | def __init__(self):
8 |
9 | # initialize instance variables:
10 | self.items = []
11 | self.maxCapacity = 0
12 |
13 | # initialize the data:
14 | self.__initData()
15 |
16 | def __len__(self):
17 | """
18 | :return: the total number of items defined in the problem
19 | """
20 | return len(self.items)
21 |
22 | def __initData(self):
23 | """initializes the RosettaCode.org knapsack 0-1 problem data
24 | """
25 | self.items = [
26 | ("map", 9, 150),
27 | ("compass", 13, 35),
28 | ("water", 153, 200),
29 | ("sandwich", 50, 160),
30 | ("glucose", 15, 60),
31 | ("tin", 68, 45),
32 | ("banana", 27, 60),
33 | ("apple", 39, 40),
34 | ("cheese", 23, 30),
35 | ("beer", 52, 10),
36 | ("suntan cream", 11, 70),
37 | ("camera", 32, 30),
38 | ("t-shirt", 24, 15),
39 | ("trousers", 48, 10),
40 | ("umbrella", 73, 40),
41 | ("waterproof trousers", 42, 70),
42 | ("waterproof overclothes", 43, 75),
43 | ("note-case", 22, 80),
44 | ("sunglasses", 7, 20),
45 | ("towel", 18, 12),
46 | ("socks", 4, 50),
47 | ("book", 30, 10)
48 | ]
49 |
50 | self.maxCapacity = 400
51 |
52 | def getValue(self, zeroOneList):
53 | """
54 | Calculates the value of the selected items in the list, while ignoring items that will cause the accumulating weight to exceed the maximum weight
55 | :param zeroOneList: a list of 0/1 values corresponding to the list of the problem's items. '1' means that item was selected.
56 | :return: the calculated value
57 | """
58 |
59 | totalWeight = totalValue = 0
60 |
61 | for i in range(len(zeroOneList)):
62 | item, weight, value = self.items[i]
63 | if totalWeight + weight <= self.maxCapacity:
64 | totalWeight += zeroOneList[i] * weight
65 | totalValue += zeroOneList[i] * value
66 | return totalValue
67 |
68 | def printItems(self, zeroOneList):
69 | """
70 | Prints the selected items in the list, while ignoring items that will cause the accumulating weight to exceed the maximum weight
71 | :param zeroOneList: a list of 0/1 values corresponding to the list of the problem's items. '1' means that item was selected.
72 | """
73 | totalWeight = totalValue = 0
74 |
75 | for i in range(len(zeroOneList)):
76 | item, weight, value = self.items[i]
77 | if totalWeight + weight <= self.maxCapacity:
78 | if zeroOneList[i] > 0:
79 | totalWeight += weight
80 | totalValue += value
81 | print("- Adding {}: weight = {}, value = {}, accumulated weight = {}, accumulated value = {}".format(item, weight, value, totalWeight, totalValue))
82 | print("- Total weight = {}, Total value = {}".format(totalWeight, totalValue))
83 |
84 |
85 | # testing the class:
86 | def main():
87 | # create a problem instance:
88 | knapsack = Knapsack01Problem()
89 |
90 | # creaete a random solution and evaluate it:
91 | randomSolution = np.random.randint(2, size=len(knapsack))
92 | print("Random Solution = ")
93 | print(randomSolution)
94 | knapsack.printItems(randomSolution)
95 |
96 |
97 | if __name__ == "__main__":
98 | main()
--------------------------------------------------------------------------------
/Chapter04/tsp.py:
--------------------------------------------------------------------------------
1 | import csv
2 | import pickle
3 | import os
4 | import codecs
5 |
6 | import numpy as np
7 |
8 | from urllib.request import urlopen
9 |
10 | import matplotlib.pyplot as plt
11 |
12 |
13 | class TravelingSalesmanProblem:
14 | """This class encapsulates the Traveling Salesman Problem.
15 | City coordinates are read from an online file and distance matrix is calculated.
16 | The data is serialized to disk.
17 | The total distance can be calculated for a path represented by a list of city indices.
18 | A plot can be created for a path represented by a list of city indices.
19 |
20 | :param name: The name of the corresponding TSPLIB problem, e.g. 'burma14' or 'bayg29'.
21 | """
22 |
23 | def __init__(self, name):
24 | """
25 | Creates an instance of a TSP
26 |
27 | :param name: name of the TSP problem
28 | """
29 |
30 | # initialize instance variables:
31 | self.name = name
32 | self.locations = []
33 | self.distances = []
34 | self.tspSize = 0
35 |
36 | # initialize the data:
37 | self.__initData()
38 |
39 | def __len__(self):
40 | """
41 | returns the length of the underlying TSP
42 | :return: the length of the underlying TSP (number of cities)
43 | """
44 | return self.tspSize
45 |
46 | def __initData(self):
47 | """Reads the serialized data, and if not available - calls __create_data() to prepare it
48 | """
49 |
50 | # attempt to read serialized data:
51 | try:
52 | self.locations = pickle.load(open(os.path.join("tsp-data", self.name + "-loc.pickle"), "rb"))
53 | self.distances = pickle.load(open(os.path.join("tsp-data", self.name + "-dist.pickle"), "rb"))
54 | except (OSError, IOError):
55 | pass
56 |
57 | # serailized data not found - create the data from scratch:
58 | if not self.locations or not self.distances:
59 | self.__createData()
60 |
61 | # set the problem 'size':
62 | self.tspSize = len(self.locations)
63 |
64 | def __createData(self):
65 | """Reads the desired TSP file from the Internet, extracts the city coordinates, calculates the distances
66 | between every two cities and uses them to populate a distance matrix (two-dimensional array).
67 | It then serializes the city locations and the calculated distances to disk using the pickle utility.
68 | """
69 | self.locations = []
70 |
71 | # open whitespace-delimited file from url and read lines from it:
72 | with urlopen("http://elib.zib.de/pub/mp-testdata/tsp/tsplib/tsp/" + self.name + ".tsp") as f:
73 | reader = csv.reader(codecs.iterdecode(f, 'utf-8'), delimiter=" ", skipinitialspace=True)
74 |
75 | # skip lines until one of these lines is found:
76 | for row in reader:
77 | if row[0] in ('DISPLAY_DATA_SECTION', 'NODE_COORD_SECTION'):
78 | break
79 |
80 | # read data lines until 'EOF' found:
81 | for row in reader:
82 | if row[0] != 'EOF':
83 | # remove index at beginning of line:
84 | del row[0]
85 |
86 | # convert x,y coordinates to ndarray:
87 | self.locations.append(np.asarray(row, dtype=np.float32))
88 | else:
89 | break
90 |
91 | # set the problem 'size':
92 | self.tspSize = len(self.locations)
93 |
94 | # print data:
95 | print("length = {}, locations = {}".format(self.tspSize, self.locations))
96 |
97 | # initialize distance matrix by filling it with 0's:
98 | self.distances = [[0] * self.tspSize for _ in range(self.tspSize)]
99 |
100 | # populate the distance matrix with calculated distances:
101 | for i in range(self.tspSize):
102 | for j in range(i + 1, self.tspSize):
103 | # calculate euclidean distance between two ndarrays:
104 | distance = np.linalg.norm(self.locations[j] - self.locations[i])
105 | self.distances[i][j] = distance
106 | self.distances[j][i] = distance
107 | print("{}, {}: location1 = {}, location2 = {} => distance = {}".format(i, j, self.locations[i], self.locations[j], distance))
108 |
109 | # serialize locations and distances:
110 | if not os.path.exists("tsp-data"):
111 | os.makedirs("tsp-data")
112 | pickle.dump(self.locations, open(os.path.join("tsp-data", self.name + "-loc.pickle"), "wb"))
113 | pickle.dump(self.distances, open(os.path.join("tsp-data", self.name + "-dist.pickle"), "wb"))
114 |
115 | def getTotalDistance(self, indices):
116 | """Calculates the total distance of the path described by the given indices of the cities
117 |
118 | :param indices: A list of ordered city indices describing the given path.
119 | :return: total distance of the path described by the given indices
120 | """
121 | # distance between th elast and first city:
122 | distance = self.distances[indices[-1]][indices[0]]
123 |
124 | # add the distance between each pair of consequtive cities:
125 | for i in range(len(indices) - 1):
126 | distance += self.distances[indices[i]][indices[i + 1]]
127 |
128 | return distance
129 |
130 | def plotData(self, indices):
131 | """plots the path described by the given indices of the cities
132 |
133 | :param indices: A list of ordered city indices describing the given path.
134 | :return: the resulting plot
135 | """
136 |
137 | # plot the dots representing the cities:
138 | plt.scatter(*zip(*self.locations), marker='.', color='red')
139 |
140 | # create a list of the corresponding city locations:
141 | locs = [self.locations[i] for i in indices]
142 | locs.append(locs[0])
143 |
144 | # plot a line between each pair of consequtive cities:
145 | plt.plot(*zip(*locs), linestyle='-', color='blue')
146 |
147 | return plt
148 |
149 |
150 | # testing the class:
151 | def main():
152 | # create a problem instance:
153 | tsp = TravelingSalesmanProblem("bayg29")
154 |
155 | # generate a random solution and evaluate it:
156 | #randomSolution = random.sample(range(len(tsp)), len(tsp))
157 |
158 | # see http://elib.zib.de/pub/mp-testdata/tsp/tsplib/tsp/bayg29.opt.tour
159 | optimalSolution = [0, 27, 5, 11, 8, 25, 2, 28, 4, 20, 1, 19, 9, 3, 14, 17, 13, 16, 21, 10, 18, 24, 6, 22, 7, 26, 15, 12, 23]
160 |
161 | print("Problem name: " + tsp.name)
162 | print("Optimal solution = ", optimalSolution)
163 | print("Optimal distance = ", tsp.getTotalDistance(optimalSolution))
164 |
165 | # plot the solution:
166 | plot = tsp.plotData(optimalSolution)
167 | plot.show()
168 |
169 |
170 | if __name__ == "__main__":
171 | main()
172 |
--------------------------------------------------------------------------------
/Chapter05/01-solve-n-queens.py:
--------------------------------------------------------------------------------
1 | from deap import base
2 | from deap import creator
3 | from deap import tools
4 |
5 | import random
6 | import array
7 |
8 | import numpy as np
9 | import matplotlib.pyplot as plt
10 | import seaborn as sns
11 |
12 | import elitism
13 | import queens
14 |
15 | # problem constants:
16 | NUM_OF_QUEENS = 16
17 |
18 | # Genetic Algorithm constants:
19 | POPULATION_SIZE = 300
20 | MAX_GENERATIONS = 100
21 | HALL_OF_FAME_SIZE = 30
22 | P_CROSSOVER = 0.9 # probability for crossover
23 | P_MUTATION = 0.1 # probability for mutating an individual
24 |
25 | # set the random seed for repeatable results
26 | RANDOM_SEED = 42
27 | random.seed(RANDOM_SEED)
28 |
29 | # create the desired N-
30 | nQueens = queens.NQueensProblem(NUM_OF_QUEENS)
31 |
32 | toolbox = base.Toolbox()
33 |
34 | # define a single objective, minimizing fitness strategy:
35 | creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
36 |
37 | # create the Individual class based on list of integers:
38 | creator.create("Individual", array.array, typecode='i', fitness=creator.FitnessMin)
39 |
40 | # create an operator that generates randomly shuffled indices:
41 | toolbox.register("randomOrder", random.sample, range(len(nQueens)), len(nQueens))
42 |
43 | # create the individual creation operator to fill up an Individual instance with shuffled indices:
44 | toolbox.register("individualCreator", tools.initIterate, creator.Individual, toolbox.randomOrder)
45 |
46 | # create the population creation operator to generate a list of individuals:
47 | toolbox.register("populationCreator", tools.initRepeat, list, toolbox.individualCreator)
48 |
49 |
50 | # fitness calculation - compute the total distance of the list of cities represented by indices:
51 | def getViolationsCount(individual):
52 | return nQueens.getViolationsCount(individual), # return a tuple
53 |
54 |
55 | toolbox.register("evaluate", getViolationsCount)
56 |
57 |
58 | # Genetic operators:
59 | toolbox.register("select", tools.selTournament, tournsize=2)
60 | toolbox.register("mate", tools.cxUniformPartialyMatched, indpb=2.0/len(nQueens))
61 | toolbox.register("mutate", tools.mutShuffleIndexes, indpb=1.0/len(nQueens))
62 |
63 |
64 | # Genetic Algorithm flow:
65 | def main():
66 |
67 | # create initial population (generation 0):
68 | population = toolbox.populationCreator(n=POPULATION_SIZE)
69 |
70 | # prepare the statistics object:
71 | stats = tools.Statistics(lambda ind: ind.fitness.values)
72 | stats.register("min", np.min)
73 | stats.register("avg", np.mean)
74 |
75 | # define the hall-of-fame object:
76 | hof = tools.HallOfFame(HALL_OF_FAME_SIZE)
77 |
78 | # perform the Genetic Algorithm flow with hof feature added:
79 | population, logbook = elitism.eaSimpleWithElitism(population, toolbox, cxpb=P_CROSSOVER, mutpb=P_MUTATION,
80 | ngen=MAX_GENERATIONS, stats=stats, halloffame=hof, verbose=True)
81 |
82 | # print hall of fame members info:
83 | print("- Best solutions are:")
84 | for i in range(HALL_OF_FAME_SIZE):
85 | print(i, ": ", hof.items[i].fitness.values[0], " -> ", hof.items[i])
86 |
87 | # plot statistics:
88 | minFitnessValues, meanFitnessValues = logbook.select("min", "avg")
89 | plt.figure(1)
90 | sns.set_style("whitegrid")
91 | plt.plot(minFitnessValues, color='red')
92 | plt.plot(meanFitnessValues, color='green')
93 | plt.xlabel('Generation')
94 | plt.ylabel('Min / Average Fitness')
95 | plt.title('Min and Average fitness over Generations')
96 |
97 | # plot best solution:
98 | sns.set_style("whitegrid", {'axes.grid' : False})
99 | nQueens.plotBoard(hof.items[0])
100 |
101 | # show both plots:
102 | plt.show()
103 |
104 |
105 | if __name__ == "__main__":
106 | main()
107 |
--------------------------------------------------------------------------------
/Chapter05/02-solve-nurses.py:
--------------------------------------------------------------------------------
1 | from deap import base
2 | from deap import creator
3 | from deap import tools
4 |
5 | import random
6 | import numpy
7 |
8 | import matplotlib.pyplot as plt
9 | import seaborn as sns
10 |
11 | import elitism
12 | import nurses
13 |
14 | # problem constants:
15 | HARD_CONSTRAINT_PENALTY = 10 # the penalty factor for a hard-constraint violation
16 |
17 | # Genetic Algorithm constants:
18 | POPULATION_SIZE = 300
19 | P_CROSSOVER = 0.9 # probability for crossover
20 | P_MUTATION = 0.1 # probability for mutating an individual
21 | MAX_GENERATIONS = 200
22 | HALL_OF_FAME_SIZE = 30
23 |
24 | # set the random seed:
25 | RANDOM_SEED = 42
26 | random.seed(RANDOM_SEED)
27 |
28 | toolbox = base.Toolbox()
29 |
30 | # create the nurse scheduling problem instance to be used:
31 | nsp = nurses.NurseSchedulingProblem(HARD_CONSTRAINT_PENALTY)
32 |
33 | # define a single objective, maximizing fitness strategy:
34 | creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
35 |
36 | # create the Individual class based on list:
37 | creator.create("Individual", list, fitness=creator.FitnessMin)
38 |
39 | # create an operator that randomly returns 0 or 1:
40 | toolbox.register("zeroOrOne", random.randint, 0, 1)
41 |
42 | # create the individual operator to fill up an Individual instance:
43 | toolbox.register("individualCreator", tools.initRepeat, creator.Individual, toolbox.zeroOrOne, len(nsp))
44 |
45 | # create the population operator to generate a list of individuals:
46 | toolbox.register("populationCreator", tools.initRepeat, list, toolbox.individualCreator)
47 |
48 |
49 | # fitness calculation
50 | def getCost(individual):
51 | return nsp.getCost(individual), # return a tuple
52 |
53 |
54 | toolbox.register("evaluate", getCost)
55 |
56 | # genetic operators:
57 | toolbox.register("select", tools.selTournament, tournsize=2)
58 | toolbox.register("mate", tools.cxTwoPoint)
59 | toolbox.register("mutate", tools.mutFlipBit, indpb=1.0/len(nsp))
60 |
61 |
62 | # Genetic Algorithm flow:
63 | def main():
64 |
65 | # create initial population (generation 0):
66 | population = toolbox.populationCreator(n=POPULATION_SIZE)
67 |
68 | # prepare the statistics object:
69 | stats = tools.Statistics(lambda ind: ind.fitness.values)
70 | stats.register("min", numpy.min)
71 | stats.register("avg", numpy.mean)
72 |
73 | # define the hall-of-fame object:
74 | hof = tools.HallOfFame(HALL_OF_FAME_SIZE)
75 |
76 | # perform the Genetic Algorithm flow with hof feature added:
77 | population, logbook = elitism.eaSimpleWithElitism(population, toolbox, cxpb=P_CROSSOVER, mutpb=P_MUTATION,
78 | ngen=MAX_GENERATIONS, stats=stats, halloffame=hof, verbose=True)
79 |
80 | # print best solution found:
81 | best = hof.items[0]
82 | print("-- Best Individual = ", best)
83 | print("-- Best Fitness = ", best.fitness.values[0])
84 | print()
85 | print("-- Schedule = ")
86 | nsp.printScheduleInfo(best)
87 |
88 | # extract statistics:
89 | minFitnessValues, meanFitnessValues = logbook.select("min", "avg")
90 |
91 | # plot statistics:
92 | sns.set_style("whitegrid")
93 | plt.plot(minFitnessValues, color='red')
94 | plt.plot(meanFitnessValues, color='green')
95 | plt.xlabel('Generation')
96 | plt.ylabel('Min / Average Fitness')
97 | plt.title('Min and Average fitness over Generations')
98 | plt.show()
99 |
100 |
101 | if __name__ == "__main__":
102 | main()
103 |
--------------------------------------------------------------------------------
/Chapter05/03-solve-graphs.py:
--------------------------------------------------------------------------------
1 | from deap import base
2 | from deap import creator
3 | from deap import tools
4 |
5 | import random
6 | import numpy
7 |
8 | import matplotlib.pyplot as plt
9 | import seaborn as sns
10 | import networkx as nx
11 |
12 | import elitism
13 | import graphs
14 |
15 |
16 |
17 | # problem constants:
18 | HARD_CONSTRAINT_PENALTY = 10 # the penalty factor for a hard-constraint violation
19 |
20 | # Genetic Algorithm constants:
21 | POPULATION_SIZE = 100
22 | P_CROSSOVER = 0.9 # probability for crossover
23 | P_MUTATION = 0.1 # probability for mutating an individual
24 | MAX_GENERATIONS = 100
25 | HALL_OF_FAME_SIZE = 5
26 | MAX_COLORS = 10
27 |
28 | # set the random seed:
29 | RANDOM_SEED = 42
30 | random.seed(RANDOM_SEED)
31 |
32 | toolbox = base.Toolbox()
33 |
34 | # create the graph coloring problem instance to be used:
35 | gcp = graphs.GraphColoringProblem(nx.petersen_graph(), HARD_CONSTRAINT_PENALTY)
36 | #gcp = graphs.GraphColoringProblem(nx.mycielski_graph(5), HARD_CONSTRAINT_PENALTY)
37 |
38 | # define a single objective, maximizing fitness strategy:
39 | creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
40 |
41 | # create the Individual class based on list:
42 | creator.create("Individual", list, fitness=creator.FitnessMin)
43 |
44 | # create an operator that randomly returns an integer in the range of participating colors:
45 | toolbox.register("Integers", random.randint, 0, MAX_COLORS - 1)
46 |
47 | # create the individual operator to fill up an Individual instance:
48 | toolbox.register("individualCreator", tools.initRepeat, creator.Individual, toolbox.Integers, len(gcp))
49 |
50 | # create the population operator to generate a list of individuals:
51 | toolbox.register("populationCreator", tools.initRepeat, list, toolbox.individualCreator)
52 |
53 |
54 | # fitness calculation: cost of the suggested olution
55 | def getCost(individual):
56 | return gcp.getCost(individual), # return a tuple
57 |
58 |
59 | toolbox.register("evaluate", getCost)
60 |
61 | # genetic operators:
62 | toolbox.register("select", tools.selTournament, tournsize=2)
63 | toolbox.register("mate", tools.cxTwoPoint)
64 | toolbox.register("mutate", tools.mutUniformInt, low=0, up=MAX_COLORS - 1, indpb=1.0/len(gcp))
65 |
66 |
67 | # Genetic Algorithm flow:
68 | def main():
69 |
70 | # create initial population (generation 0):
71 | population = toolbox.populationCreator(n=POPULATION_SIZE)
72 |
73 | # prepare the statistics object:
74 | stats = tools.Statistics(lambda ind: ind.fitness.values)
75 | stats.register("min", numpy.min)
76 | stats.register("avg", numpy.mean)
77 |
78 | # define the hall-of-fame object:
79 | hof = tools.HallOfFame(HALL_OF_FAME_SIZE)
80 |
81 | # perform the Genetic Algorithm flow with elitism:
82 | population, logbook = elitism.eaSimpleWithElitism(population, toolbox, cxpb=P_CROSSOVER, mutpb=P_MUTATION,
83 | ngen=MAX_GENERATIONS, stats=stats, halloffame=hof, verbose=True)
84 |
85 | # print info for best solution found:
86 | best = hof.items[0]
87 | print("-- Best Individual = ", best)
88 | print("-- Best Fitness = ", best.fitness.values[0])
89 | print()
90 | print("number of colors = ", gcp.getNumberOfColors(best))
91 | print("Number of violations = ", gcp.getViolationsCount(best))
92 | print("Cost = ", gcp.getCost(best))
93 |
94 | # plot best solution:
95 | plt.figure(1)
96 | gcp.plotGraph(best)
97 |
98 | # extract statistics:
99 | minFitnessValues, meanFitnessValues = logbook.select("min", "avg")
100 |
101 | # plot statistics:
102 | plt.figure(2)
103 | sns.set_style("whitegrid")
104 | plt.plot(minFitnessValues, color='red')
105 | plt.plot(meanFitnessValues, color='green')
106 | plt.xlabel('Generation')
107 | plt.ylabel('Min / Average Fitness')
108 | plt.title('Min and Average fitness over Generations')
109 |
110 | plt.show()
111 |
112 |
113 | if __name__ == "__main__":
114 | main()
115 |
--------------------------------------------------------------------------------
/Chapter05/elitism.py:
--------------------------------------------------------------------------------
1 | from deap import tools
2 | from deap import algorithms
3 |
4 | def eaSimpleWithElitism(population, toolbox, cxpb, mutpb, ngen, stats=None,
5 | halloffame=None, verbose=__debug__):
6 | """This algorithm is similar to DEAP eaSimple() algorithm, with the modification that
7 | halloffame is used to implement an elitism mechanism. The individuals contained in the
8 | halloffame are directly injected into the next generation and are not subject to the
9 | genetic operators of selection, crossover and mutation.
10 | """
11 | logbook = tools.Logbook()
12 | logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])
13 |
14 | # Evaluate the individuals with an invalid fitness
15 | invalid_ind = [ind for ind in population if not ind.fitness.valid]
16 | fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
17 | for ind, fit in zip(invalid_ind, fitnesses):
18 | ind.fitness.values = fit
19 |
20 | if halloffame is None:
21 | raise ValueError("halloffame parameter must not be empty!")
22 |
23 | halloffame.update(population)
24 | hof_size = len(halloffame.items) if halloffame.items else 0
25 |
26 | record = stats.compile(population) if stats else {}
27 | logbook.record(gen=0, nevals=len(invalid_ind), **record)
28 | if verbose:
29 | print(logbook.stream)
30 |
31 | # Begin the generational process
32 | for gen in range(1, ngen + 1):
33 |
34 | # Select the next generation individuals
35 | offspring = toolbox.select(population, len(population) - hof_size)
36 |
37 | # Vary the pool of individuals
38 | offspring = algorithms.varAnd(offspring, toolbox, cxpb, mutpb)
39 |
40 | # Evaluate the individuals with an invalid fitness
41 | invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
42 | fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
43 | for ind, fit in zip(invalid_ind, fitnesses):
44 | ind.fitness.values = fit
45 |
46 | # add the best back to population:
47 | offspring.extend(halloffame.items)
48 |
49 | # Update the hall of fame with the generated individuals
50 | halloffame.update(offspring)
51 |
52 | # Replace the current population by the offspring
53 | population[:] = offspring
54 |
55 | # Append the current generation statistics to the logbook
56 | record = stats.compile(population) if stats else {}
57 | logbook.record(gen=gen, nevals=len(invalid_ind), **record)
58 | if verbose:
59 | print(logbook.stream)
60 |
61 | return population, logbook
62 |
63 |
--------------------------------------------------------------------------------
/Chapter05/graphs.py:
--------------------------------------------------------------------------------
1 | import networkx as nx
2 | import matplotlib.pyplot as plt
3 | import numpy as np
4 |
5 | class GraphColoringProblem:
6 | """This class encapsulates the Graph Coloring problem
7 | """
8 |
9 | def __init__(self, graph, hardConstraintPenalty):
10 | """
11 | :param graph: a NetworkX graph to be colored
12 | :param hardConstraintPenalty: penalty for hard constraint (coloring violation)
13 | """
14 |
15 | # initialize instance variables:
16 | self.graph = graph
17 | self.hardConstraintPenalty = hardConstraintPenalty
18 |
19 | # a list of the nodes in the graph:
20 | self.nodeList = list(self.graph.nodes)
21 |
22 | # adjacency matrix of the nodes -
23 | # matrix[i,j] equals '1' if nodes i and j are connected, or '0' otherwise:
24 | self.adjMatrix = nx.adjacency_matrix(graph).todense()
25 |
26 | def __len__(self):
27 | """
28 | :return: the number of nodes in the graph
29 | """
30 | return nx.number_of_nodes(self.graph)
31 |
32 | def getCost(self, colorArrangement):
33 | """
34 | Calculates the cost of the suggested color arrangement
35 | :param colorArrangement: a list of integers representing the suggested color arrangement for the nodes,
36 | one color per node in the graph
37 | :return: Calculated cost of the arrangement.
38 | """
39 |
40 | return self.hardConstraintPenalty * self.getViolationsCount(colorArrangement) + self.getNumberOfColors(colorArrangement)
41 |
42 | def getViolationsCount(self, colorArrangement):
43 | """
44 | Calculates the number of violations in the given color arrangement. Each pair of interconnected nodes
45 | with the same color counts as one violation.
46 | :param colorArrangement: a list of integers representing the suggested color arrangement for the nodes,
47 | one color per node in the graph
48 | :return: the calculated value
49 | """
50 |
51 | if len(colorArrangement) != self.__len__():
52 | raise ValueError("size of color arrangement should be equal to ", self.__len__())
53 |
54 | violations = 0
55 |
56 | # iterate over every pair of nodes and find if they are adjacent AND share the same color:
57 | for i in range(len(colorArrangement)):
58 | for j in range(i + 1, len(colorArrangement)):
59 |
60 | if self.adjMatrix[i, j]: # these are adjacent nodes
61 | if colorArrangement[i] == colorArrangement[j]:
62 | violations += 1
63 |
64 | return violations
65 |
66 | def getNumberOfColors(self, colorArrangement):
67 | """
68 | returns the number of different colors in the suggested color arrangement
69 | :param colorArrangement: a list of integers representing the suggested color arrangement fpo the nodes,
70 | one color per node in the graph
71 | :return: number of different colors
72 | """
73 | return len(set(colorArrangement))
74 |
75 | def plotGraph(self, colorArrangement):
76 | """
77 | Plots the graph with the nodes colored according to the given color arrangement
78 | :param colorArrangement: a list of integers representing the suggested color arrangement fpo the nodes,
79 | one color per node in the graph
80 | """
81 |
82 | if len(colorArrangement) != self.__len__():
83 | raise ValueError("size of color list should be equal to ", self.__len__())
84 |
85 | # create a list of the unique colors in the arrangement:
86 | colorList = list(set(colorArrangement))
87 |
88 | # create the actual colors for the integers in the color list:
89 | colors = plt.cm.rainbow(np.linspace(0, 1, len(colorList)))
90 |
91 | # iterate over the nodes, and give each one of them its corresponding color:
92 | colorMap = []
93 | for i in range(self.__len__()):
94 | color = colors[colorList.index(colorArrangement[i])]
95 | colorMap.append(color)
96 |
97 | # plot the nodes with their labels and matching colors:
98 | nx.draw_kamada_kawai(self.graph, node_color=colorMap, with_labels=True)
99 | #nx.draw_circular(self.graph, node_color=color_map, with_labels=True)
100 |
101 | return plt
102 |
103 |
104 | # testing the class:
105 | def main():
106 | # create a problem instance with petersen graph:
107 | gcp = GraphColoringProblem(nx.petersen_graph(), 10)
108 |
109 | # generate a random solution with up to 5 different colors:
110 | solution = np.random.randint(5, size=len(gcp))
111 |
112 | print("solution = ", solution)
113 | print("number of colors = ", gcp.getNumberOfColors(solution))
114 | print("Number of violations = ", gcp.getViolationsCount(solution))
115 | print("Cost = ", gcp.getCost(solution))
116 |
117 | plot = gcp.plotGraph(solution)
118 | plot.show()
119 |
120 |
121 | if __name__ == "__main__":
122 | main()
123 |
124 |
--------------------------------------------------------------------------------
/Chapter05/queen-thumbnail.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Hands-On-Genetic-Algorithms-with-Python/cc5a1b8711a3e6cc6d3835335f2d09b8822e97f5/Chapter05/queen-thumbnail.png
--------------------------------------------------------------------------------
/Chapter05/queens.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import matplotlib.pyplot as plt
3 | import matplotlib as mpl
4 |
5 | class NQueensProblem:
6 | """This class encapsulates the N-Queens problem
7 | """
8 |
9 | def __init__(self, numOfQueens):
10 | """
11 | :param numOfQueens: the number of queens in the problem
12 | """
13 | self.numOfQueens = numOfQueens
14 |
15 | def __len__(self):
16 | """
17 | :return: the number of queens
18 | """
19 | return self.numOfQueens
20 |
21 | def getViolationsCount(self, positions):
22 | """
23 | Calculates the number of violations in the given solution
24 | Since the input contains unique indices of columns for each row, no row or column violations are possible,
25 | Only the diagonal violations need to be counted.
26 | :param positions: a list of indices corresponding to the positions of the queens in each row
27 | :return: the calculated value
28 | """
29 |
30 | if len(positions) != self.numOfQueens:
31 | raise ValueError("size of positions list should be equal to ", self.numOfQueens)
32 |
33 | violations = 0
34 |
35 | # iterate over every pair of queens and find if they are on the same diagonal:
36 | for i in range(len(positions)):
37 | for j in range(i + 1, len(positions)):
38 |
39 | # first queen in pair:
40 | column1 = i
41 | row1 = positions[i]
42 |
43 | # second queen in pair:
44 | column2 = j
45 | row2 = positions[j]
46 |
47 | # look for diagonal threat for th ecurrent pair:
48 | if abs(column1 - column2) == abs(row1 - row2):
49 | violations += 1
50 |
51 | return violations
52 |
53 | def plotBoard(self, positions):
54 | """
55 | Plots the positions of the queens on the board according to the given solution
56 | :param positions: a list of indices corresponding to the positions of the queens in each row.
57 | """
58 |
59 | if len(positions) != self.numOfQueens:
60 | raise ValueError("size of positions list should be equal to ", self.numOfQueens)
61 |
62 | fig, ax = plt.subplots()
63 |
64 | # start with the board's squares:
65 | board = np.zeros((self.numOfQueens, self.numOfQueens))
66 | # change color of every other square:
67 | board[::2, 1::2] = 1
68 | board[1::2, ::2] = 1
69 |
70 | # draw the squares with two different colors:
71 | ax.imshow(board, interpolation='none', cmap=mpl.colors.ListedColormap(['#ffc794', '#4c2f27']))
72 |
73 | # read the queen image thumbnail and give it a spread of 70% of the square dimensions:
74 | queenThumbnail = plt.imread('queen-thumbnail.png')
75 | thumbnailSpread = 0.70 * np.array([-1, 1, -1, 1]) / 2 # spread is [left, right, bottom, top]
76 |
77 | # iterate over the queen positions - i is the row, j is the column:
78 | for i, j in enumerate(positions):
79 | # place the thumbnail on the matching square:
80 | ax.imshow(queenThumbnail, extent=[j, j, i, i] + thumbnailSpread)
81 |
82 | # show the row and column indexes:
83 | ax.set(xticks=list(range(self.numOfQueens)), yticks=list(range(self.numOfQueens)))
84 |
85 | ax.axis('image') # scale the plot as square-shaped
86 |
87 | return plt
88 |
89 |
90 | # testing the class:
91 | def main():
92 | # create a problem instance:
93 | nQueens = NQueensProblem(8)
94 |
95 | # a known good solution:
96 | #solution = [5, 0, 4, 1, 7, 2, 6, 3]
97 |
98 | # a solution with 3 violations:
99 | solution = [1, 2, 7, 5, 0, 3, 4, 6]
100 |
101 | print("Number of violations = ", nQueens.getViolationsCount(solution))
102 |
103 | plot = nQueens.plotBoard(solution)
104 | plot.show()
105 |
106 |
107 | if __name__ == "__main__":
108 | main()
109 |
110 |
--------------------------------------------------------------------------------
/Chapter06/01-optimize-eggholder.py:
--------------------------------------------------------------------------------
1 | from deap import base
2 | from deap import creator
3 | from deap import tools
4 |
5 | import random
6 | import numpy as np
7 |
8 | import matplotlib.pyplot as plt
9 | import seaborn as sns
10 |
11 | import elitism
12 |
13 | # problem constants:
14 | DIMENSIONS = 2 # number of dimensions
15 | BOUND_LOW, BOUND_UP = -512.0, 512.0 # boundaries for all dimensions
16 |
17 | # Genetic Algorithm constants:
18 | POPULATION_SIZE = 300
19 | P_CROSSOVER = 0.9 # probability for crossover
20 | P_MUTATION = 0.1 # (try also 0.5) probability for mutating an individual
21 | MAX_GENERATIONS = 300
22 | HALL_OF_FAME_SIZE = 30
23 | CROWDING_FACTOR = 20.0 # crowding factor for crossover and mutation
24 |
25 | # set the random seed:
26 | RANDOM_SEED = 42
27 | random.seed(RANDOM_SEED)
28 |
29 | toolbox = base.Toolbox()
30 |
31 | # define a single objective, minimizing fitness strategy:
32 | creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
33 |
34 | # create the Individual class based on list:
35 | creator.create("Individual", list, fitness=creator.FitnessMin)
36 |
37 |
38 | # helper function for creating random real numbers uniformly distributed within a given range [low, up]
39 | # it assumes that the range is the same for every dimension
40 | def randomFloat(low, up):
41 | return [random.uniform(l, u) for l, u in zip([low] * DIMENSIONS, [up] * DIMENSIONS)]
42 |
43 | # create an operator that randomly returns a float in the desired range and dimension:
44 | toolbox.register("attrFloat", randomFloat, BOUND_LOW, BOUND_UP)
45 |
46 | # create the individual operator to fill up an Individual instance:
47 | toolbox.register("individualCreator", tools.initIterate, creator.Individual, toolbox.attrFloat)
48 |
49 | # create the population operator to generate a list of individuals:
50 | toolbox.register("populationCreator", tools.initRepeat, list, toolbox.individualCreator)
51 |
52 |
53 | # Eggholder function as the given individual's fitness:
54 | def eggholder(individual):
55 | x = individual[0]
56 | y = individual[1]
57 | f = (-(y + 47.0) * np.sin(np.sqrt(abs(x/2.0 + (y + 47.0)))) - x * np.sin(np.sqrt(abs(x - (y + 47.0)))))
58 | return f, # return a tuple
59 |
60 | toolbox.register("evaluate", eggholder)
61 |
62 | # genetic operators:
63 | toolbox.register("select", tools.selTournament, tournsize=2)
64 | toolbox.register("mate", tools.cxSimulatedBinaryBounded, low=BOUND_LOW, up=BOUND_UP, eta=CROWDING_FACTOR)
65 | toolbox.register("mutate", tools.mutPolynomialBounded, low=BOUND_LOW, up=BOUND_UP, eta=CROWDING_FACTOR, indpb=1.0/DIMENSIONS)
66 |
67 |
68 | # Genetic Algorithm flow:
69 | def main():
70 |
71 | # create initial population (generation 0):
72 | population = toolbox.populationCreator(n=POPULATION_SIZE)
73 |
74 | # prepare the statistics object:
75 | stats = tools.Statistics(lambda ind: ind.fitness.values)
76 | stats.register("min", np.min)
77 | stats.register("avg", np.mean)
78 |
79 | # define the hall-of-fame object:
80 | hof = tools.HallOfFame(HALL_OF_FAME_SIZE)
81 |
82 | # perform the Genetic Algorithm flow with elitism:
83 | population, logbook = elitism.eaSimpleWithElitism(population, toolbox, cxpb=P_CROSSOVER, mutpb=P_MUTATION,
84 | ngen=MAX_GENERATIONS, stats=stats, halloffame=hof, verbose=True)
85 |
86 | # print info for best solution found:
87 | best = hof.items[0]
88 | print("-- Best Individual = ", best)
89 | print("-- Best Fitness = ", best.fitness.values[0])
90 |
91 | # extract statistics:
92 | minFitnessValues, meanFitnessValues = logbook.select("min", "avg")
93 |
94 | # plot statistics:
95 | sns.set_style("whitegrid")
96 | plt.plot(minFitnessValues, color='red')
97 | plt.plot(meanFitnessValues, color='green')
98 | plt.xlabel('Generation')
99 | plt.ylabel('Min / Average Fitness')
100 | plt.title('Min and Average fitness over Generations')
101 |
102 | plt.show()
103 |
104 |
105 | if __name__ == "__main__":
106 | main()
107 |
--------------------------------------------------------------------------------
/Chapter06/02-optimize-himmelblau.py:
--------------------------------------------------------------------------------
1 | from deap import base
2 | from deap import creator
3 | from deap import tools
4 |
5 | import random
6 | import numpy as np
7 |
8 | import matplotlib.pyplot as plt
9 | import seaborn as sns
10 |
11 | import elitism
12 |
13 | # problem constants:
14 | DIMENSIONS = 2 # number of dimensions
15 | BOUND_LOW, BOUND_UP = -5.0, 5.0 # boundaries for all dimensions
16 |
17 | # Genetic Algorithm constants:
18 | POPULATION_SIZE = 300
19 | P_CROSSOVER = 0.9 # probability for crossover
20 | P_MUTATION = 0.5 # (try also 0.5) probability for mutating an individual
21 | MAX_GENERATIONS = 300
22 | HALL_OF_FAME_SIZE = 30
23 | CROWDING_FACTOR = 20.0 # crowding factor for crossover and mutation
24 |
25 | # set the random seed:
26 | RANDOM_SEED = 42 # try also 17, 13,...
27 | random.seed(RANDOM_SEED)
28 |
29 | toolbox = base.Toolbox()
30 |
31 | # define a single objective, minimizing fitness strategy:
32 | creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
33 |
34 | # create the Individual class based on list:
35 | creator.create("Individual", list, fitness=creator.FitnessMin)
36 |
37 |
38 | # helper function for creating random float numbers uniformaly distributed within a given range [low, up]
39 | # it assumes that the range is the same for every dimension
40 | def randomFloat(low, up):
41 | return [random.uniform(a, b) for a, b in zip([low] * DIMENSIONS, [up] * DIMENSIONS)]
42 |
43 | # create an operator that randomly returns a float in the desired range and dimension:
44 | toolbox.register("attr_float", randomFloat, BOUND_LOW, BOUND_UP)
45 |
46 | # create the individual operator to fill up an Individual instance:
47 | toolbox.register("individualCreator", tools.initIterate, creator.Individual, toolbox.attr_float)
48 |
49 | # create the population operator to generate a list of individuals:
50 | toolbox.register("populationCreator", tools.initRepeat, list, toolbox.individualCreator)
51 |
52 |
53 | # Himmelblau function as the given individual's fitness:
54 | def himmelblau(individual):
55 | x = individual[0]
56 | y = individual[1]
57 | f = (x ** 2 + y - 11) ** 2 + (x + y ** 2 - 7) ** 2
58 | return f, # return a tuple
59 |
60 | toolbox.register("evaluate", himmelblau)
61 |
62 | # genetic operators:
63 | toolbox.register("select", tools.selTournament, tournsize=2)
64 | toolbox.register("mate", tools.cxSimulatedBinaryBounded, low=BOUND_LOW, up=BOUND_UP, eta=CROWDING_FACTOR)
65 | toolbox.register("mutate", tools.mutPolynomialBounded, low=BOUND_LOW, up=BOUND_UP, eta=CROWDING_FACTOR, indpb=1.0/DIMENSIONS)
66 |
67 |
68 | # Genetic Algorithm flow:
69 | def main():
70 |
71 | # create initial population (generation 0):
72 | population = toolbox.populationCreator(n=POPULATION_SIZE)
73 |
74 | # prepare the statistics object:
75 | stats = tools.Statistics(lambda ind: ind.fitness.values)
76 | stats.register("min", np.min)
77 | stats.register("avg", np.mean)
78 |
79 | # define the hall-of-fame object:
80 | hof = tools.HallOfFame(HALL_OF_FAME_SIZE)
81 |
82 | # perform the Genetic Algorithm flow with elitism:
83 | population, logbook = elitism.eaSimpleWithElitism(population, toolbox, cxpb=P_CROSSOVER, mutpb=P_MUTATION,
84 | ngen=MAX_GENERATIONS, stats=stats, halloffame=hof, verbose=True)
85 |
86 | # print info for best solution found:
87 | best = hof.items[0]
88 | print("-- Best Individual = ", best)
89 | print("-- Best Fitness = ", best.fitness.values[0])
90 |
91 | print("- Best solutions are:")
92 | for i in range(HALL_OF_FAME_SIZE):
93 | print(i, ": ", hof.items[i].fitness.values[0], " -> ", hof.items[i])
94 |
95 | # plot solution locations on x-y plane:
96 | plt.figure(1)
97 | globalMinima = [[3.0, 2.0], [-2.805118, 3.131312], [-3.779310, -3.283186], [3.584458, -1.848126]]
98 | plt.scatter(*zip(*globalMinima), marker='X', color='red', zorder=1)
99 | plt.scatter(*zip(*population), marker='.', color='blue', zorder=0)
100 |
101 | # extract statistics:
102 | minFitnessValues, meanFitnessValues = logbook.select("min", "avg")
103 |
104 | # plot statistics:
105 | plt.figure(2)
106 | sns.set_style("whitegrid")
107 | plt.plot(minFitnessValues, color='red')
108 | plt.plot(meanFitnessValues, color='green')
109 | plt.xlabel('Generation')
110 | plt.ylabel('Min / Average Fitness')
111 | plt.title('Min and Average fitness over Generations')
112 |
113 | plt.show()
114 |
115 |
116 | if __name__ == "__main__":
117 | main()
118 |
--------------------------------------------------------------------------------
/Chapter06/03-optimize-himmelblau-sharing.py:
--------------------------------------------------------------------------------
1 | from deap import base
2 | from deap import creator
3 | from deap import tools
4 |
5 | import random
6 | import numpy as np
7 |
8 | import matplotlib.pyplot as plt
9 | import seaborn as sns
10 |
11 | import math
12 | import elitism
13 |
14 | # problem constants:
15 | DIMENSIONS = 2 # number of dimensions
16 | BOUND_LOW, BOUND_UP = -5.0, 5.0 # boundaries for all dimensions
17 |
18 | # Genetic Algorithm constants:
19 | POPULATION_SIZE = 300
20 | P_CROSSOVER = 0.9 # probability for crossover
21 | P_MUTATION = 0.5 # probability for mutating an individual
22 | MAX_GENERATIONS = 300
23 | HALL_OF_FAME_SIZE = 30
24 | CROWDING_FACTOR = 20.0 # crowding factor for crossover and mutation
25 |
26 | # sharing constants:
27 | DISTANCE_THRESHOLD = 0.1
28 | SHARING_EXTENT = 5.0
29 |
30 | # set the random seed:
31 | RANDOM_SEED = 42
32 | random.seed(RANDOM_SEED)
33 |
34 | toolbox = base.Toolbox()
35 |
36 | # define a single objective, maximizing fitness strategy:
37 | creator.create("FitnessMax", base.Fitness, weights=(1.0,))
38 |
39 | # create the Individual class based on list:
40 | creator.create("Individual", list, fitness=creator.FitnessMax)
41 |
42 |
43 | # helper function for creating random float numbers uniformaly distributed within a given range [low, up]
44 | # it assumes that the range is the same for every dimension
45 | def randomFloat(low, up):
46 | return [random.uniform(a, b) for a, b in zip([low] * DIMENSIONS, [up] * DIMENSIONS)]
47 |
48 | # create an operator that randomly returns a float in the desired range and dimension:
49 | toolbox.register("attr_float", randomFloat, BOUND_LOW, BOUND_UP)
50 |
51 | # create the individual operator to fill up an Individual instance:
52 | toolbox.register("individualCreator", tools.initIterate, creator.Individual, toolbox.attr_float)
53 |
54 | # create the population operator to generate a list of individuals:
55 | toolbox.register("populationCreator", tools.initRepeat, list, toolbox.individualCreator)
56 |
57 |
58 | # 'Inverted' Himmelblau function as the given individual's fitness:
59 | def himmelblauInverted(individual):
60 | x = individual[0]
61 | y = individual[1]
62 | f = (x ** 2 + y - 11) ** 2 + (x + y ** 2 - 7) ** 2
63 | return 2000.0 - f, # return a tuple
64 |
65 | toolbox.register("evaluate", himmelblauInverted)
66 |
67 | # wraps the tools.selTournament() with fitness sharing
68 | # same signature as tools.selTournament()
69 | def selTournamentWithSharing(individuals, k, tournsize, fit_attr="fitness"):
70 |
71 | # get orig fitnesses:
72 | origFitnesses = [ind.fitness.values[0] for ind in individuals]
73 |
74 | # apply sharing to each individual:
75 | for i in range(len(individuals)):
76 | sharingSum = 1
77 |
78 | # iterate over all other individuals
79 | for j in range(len(individuals)):
80 | if i != j:
81 | # calculate eucledean distance between individuals:
82 | distance = math.sqrt(
83 | ((individuals[i][0] - individuals[j][0]) ** 2) + ((individuals[i][1] - individuals[j][1]) ** 2))
84 |
85 | if distance < DISTANCE_THRESHOLD:
86 | sharingSum += (1 - distance / (SHARING_EXTENT * DISTANCE_THRESHOLD))
87 |
88 | # reduce fitness accordingly:
89 | individuals[i].fitness.values = origFitnesses[i] / sharingSum,
90 |
91 | # apply original tools.selTournament() using modified fitness:
92 | selected = tools.selTournament(individuals, k, tournsize, fit_attr)
93 |
94 | # retrieve original fitness:
95 | for i, ind in enumerate(individuals):
96 | ind.fitness.values = origFitnesses[i],
97 |
98 | return selected
99 |
100 |
101 | # genetic operators:
102 | toolbox.register("select", selTournamentWithSharing, tournsize=2)
103 | toolbox.register("mate", tools.cxSimulatedBinaryBounded, low=BOUND_LOW, up=BOUND_UP, eta=CROWDING_FACTOR)
104 | toolbox.register("mutate", tools.mutPolynomialBounded, low=BOUND_LOW, up=BOUND_UP, eta=CROWDING_FACTOR, indpb=1.0/DIMENSIONS)
105 |
106 |
107 | # Genetic Algorithm flow:
108 | def main():
109 |
110 | # create initial population (generation 0):
111 | population = toolbox.populationCreator(n=POPULATION_SIZE)
112 |
113 | # prepare the statistics object:
114 | stats = tools.Statistics(lambda ind: ind.fitness.values)
115 | stats.register("max", np.max)
116 | stats.register("avg", np.mean)
117 |
118 | # define the hall-of-fame object:
119 | hof = tools.HallOfFame(HALL_OF_FAME_SIZE)
120 |
121 | # perform the Genetic Algorithm flow with elitism:
122 | population, logbook = elitism.eaSimpleWithElitism(population, toolbox, cxpb=P_CROSSOVER, mutpb=P_MUTATION,
123 | ngen=MAX_GENERATIONS, stats=stats, halloffame=hof, verbose=True)
124 |
125 | # print info for best solution found:
126 | best = hof.items[0]
127 | print("-- Best Individual = ", best)
128 | print("-- Best Fitness = ", best.fitness.values[0])
129 |
130 | print("- Best solutions are:")
131 | for i in range(HALL_OF_FAME_SIZE):
132 | print(i, ": ", hof.items[i].fitness.values[0], " -> ", hof.items[i])
133 |
134 | # plot solution locations on x-y plane:
135 | plt.figure(1)
136 | globalMaxima = [[3.0, 2.0], [-2.805118, 3.131312], [-3.779310, -3.283186], [3.584458, -1.848126]]
137 | plt.scatter(*zip(*globalMaxima), marker='x', color='red', zorder=1)
138 | plt.scatter(*zip(*population), marker='.', color='blue', zorder=0) # plot solution locations on x-y plane:
139 |
140 | # plot best solutions locations on x-y plane:
141 | plt.figure(2)
142 | plt.scatter(*zip(*globalMaxima), marker='x', color='red', zorder=1)
143 | plt.scatter(*zip(*hof.items), marker='.', color='blue', zorder=0)
144 |
145 | # extract statistics:
146 | maxFitnessValues, meanFitnessValues = logbook.select("max", "avg")
147 |
148 | # plot statistics:
149 | plt.figure(3)
150 | sns.set_style("whitegrid")
151 | plt.plot(maxFitnessValues, color='red')
152 | plt.plot(meanFitnessValues, color='green')
153 | plt.xlabel('Generation')
154 | plt.ylabel('Max / Average Fitness')
155 | plt.title('Max and Average fitness over Generations')
156 |
157 | plt.show()
158 |
159 |
160 | if __name__ == "__main__":
161 | main()
162 |
--------------------------------------------------------------------------------
/Chapter06/04-optimize-simionescu.py:
--------------------------------------------------------------------------------
1 | from deap import base
2 | from deap import creator
3 | from deap import tools
4 |
5 | import random
6 | import numpy as np
7 | import math
8 |
9 | import matplotlib.pyplot as plt
10 | import seaborn as sns
11 |
12 | import elitism
13 |
14 | # problem constants:
15 | DIMENSIONS = 2 # number of dimensions
16 | BOUND_LOW, BOUND_UP = -1.25, 1.25 # boundaries for all dimensions
17 |
18 | # Genetic Algorithm constants:
19 | POPULATION_SIZE = 300
20 | P_CROSSOVER = 0.9 # probability for crossover
21 | P_MUTATION = 0.5 #0.1 # (try also 0.5) probability for mutating an individual
22 | MAX_GENERATIONS = 300
23 | HALL_OF_FAME_SIZE = 30
24 | CROWDING_FACTOR = 20.0 # crowding factor for crossover and mutation
25 | PENALTY_VALUE = 10.0 # fixed penalty for violating a constraint
26 |
27 | # set the random seed:
28 | RANDOM_SEED = 42
29 | random.seed(RANDOM_SEED)
30 |
31 | toolbox = base.Toolbox()
32 |
33 | # define a single objective, minimizing fitness strategy:
34 | creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
35 |
36 | # create the Individual class based on list:
37 | creator.create("Individual", list, fitness=creator.FitnessMin)
38 |
39 |
40 | # helper function for creating random real numbers uniformly distributed within a given range [low, up]
41 | # it assumes that the range is the same for every dimension
42 | def randomFloat(low, up):
43 | return [random.uniform(l, u) for l, u in zip([low] * DIMENSIONS, [up] * DIMENSIONS)]
44 |
45 | # create an operator that randomly returns a float in the desired range and dimension:
46 | toolbox.register("attrFloat", randomFloat, BOUND_LOW, BOUND_UP)
47 |
48 | # create the individual operator to fill up an Individual instance:
49 | toolbox.register("individualCreator", tools.initIterate, creator.Individual, toolbox.attrFloat)
50 |
51 | # create the population operator to generate a list of individuals:
52 | toolbox.register("populationCreator", tools.initRepeat, list, toolbox.individualCreator)
53 |
54 |
55 | # Simionescu's function as the given individual's fitness:
56 | def simionescu(individual):
57 | x = individual[0]
58 | y = individual[1]
59 | f = 0.1 * x * y
60 | return f, # return a tuple
61 |
62 | toolbox.register("evaluate", simionescu)
63 |
64 | # define the valid input domain using the cosntraints:
65 | def feasible(individual):
66 | """Feasibility function for the individual.
67 | Returns True if feasible, False otherwise.
68 | """
69 | x = individual[0]
70 | y = individual[1]
71 | return x**2 + y**2 <= (1 + 0.2 * math.cos(8.0 * math.atan2(x, y)))**2
72 |
73 | # decorate the fitness function with the delta penalty function:
74 | toolbox.decorate("evaluate", tools.DeltaPenalty(feasible, PENALTY_VALUE))
75 |
76 | # genetic operators:
77 | toolbox.register("select", tools.selTournament, tournsize=2)
78 | toolbox.register("mate", tools.cxSimulatedBinaryBounded, low=BOUND_LOW, up=BOUND_UP, eta=CROWDING_FACTOR)
79 | toolbox.register("mutate", tools.mutPolynomialBounded, low=BOUND_LOW, up=BOUND_UP, eta=CROWDING_FACTOR, indpb=1.0/DIMENSIONS)
80 |
81 |
82 | # Genetic Algorithm flow:
83 | def main():
84 |
85 | # create initial population (generation 0):
86 | population = toolbox.populationCreator(n=POPULATION_SIZE)
87 |
88 | # prepare the statistics object:
89 | stats = tools.Statistics(lambda ind: ind.fitness.values)
90 | stats.register("min", np.min)
91 | stats.register("avg", np.mean)
92 |
93 | # define the hall-of-fame object:
94 | hof = tools.HallOfFame(HALL_OF_FAME_SIZE)
95 |
96 | # perform the Genetic Algorithm flow with elitism:
97 | population, logbook = elitism.eaSimpleWithElitism(population, toolbox, cxpb=P_CROSSOVER, mutpb=P_MUTATION,
98 | ngen=MAX_GENERATIONS, stats=stats, halloffame=hof, verbose=True)
99 |
100 | # print info for best solution found:
101 | best = hof.items[0]
102 | print("-- Best Individual = ", best)
103 | print("-- Best Fitness = ", best.fitness.values[0])
104 |
105 | # extract statistics:
106 | minFitnessValues, meanFitnessValues = logbook.select("min", "avg")
107 |
108 | # plot statistics:
109 | sns.set_style("whitegrid")
110 | plt.plot(minFitnessValues, color='red')
111 | plt.plot(meanFitnessValues, color='green')
112 | plt.xlabel('Generation')
113 | plt.ylabel('Min / Average Fitness')
114 | plt.title('Min and Average fitness over Generations')
115 |
116 | plt.show()
117 |
118 |
119 | if __name__ == "__main__":
120 | main()
121 |
--------------------------------------------------------------------------------
/Chapter06/05-optimize-simionescu-second.py:
--------------------------------------------------------------------------------
1 | from deap import base
2 | from deap import creator
3 | from deap import tools
4 |
5 | import random
6 | import numpy as np
7 | import math
8 |
9 | import matplotlib.pyplot as plt
10 | import seaborn as sns
11 |
12 | import elitism
13 |
14 | # problem constants:
15 | DIMENSIONS = 2 # number of dimensions
16 | BOUND_LOW, BOUND_UP = -1.25, 1.25 # boundaries for all dimensions
17 |
18 | # Genetic Algorithm constants:
19 | POPULATION_SIZE = 300
20 | P_CROSSOVER = 0.9 # probability for crossover
21 | P_MUTATION = 0.5 #0.1 # (try also 0.5) probability for mutating an individual
22 | MAX_GENERATIONS = 300
23 | HALL_OF_FAME_SIZE = 30
24 | CROWDING_FACTOR = 20.0 # crowding factor for crossover and mutation
25 | PENALTY_VALUE = 10.0 # fixed penalty for violating a constraint
26 | DISTANCE_THRESHOLD = 0.1
27 |
28 | # set the random seed:
29 | RANDOM_SEED = 42
30 | random.seed(RANDOM_SEED)
31 |
32 | toolbox = base.Toolbox()
33 |
34 | # define a single objective, minimizing fitness strategy:
35 | creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
36 |
37 | # create the Individual class based on list:
38 | creator.create("Individual", list, fitness=creator.FitnessMin)
39 |
40 |
41 | # helper function for creating random real numbers uniformly distributed within a given range [low, up]
42 | # it assumes that the range is the same for every dimension
43 | def randomFloat(low, up):
44 | return [random.uniform(l, u) for l, u in zip([low] * DIMENSIONS, [up] * DIMENSIONS)]
45 |
46 | # create an operator that randomly returns a float in the desired range and dimension:
47 | toolbox.register("attrFloat", randomFloat, BOUND_LOW, BOUND_UP)
48 |
49 | # create the individual operator to fill up an Individual instance:
50 | toolbox.register("individualCreator", tools.initIterate, creator.Individual, toolbox.attrFloat)
51 |
52 | # create the population operator to generate a list of individuals:
53 | toolbox.register("populationCreator", tools.initRepeat, list, toolbox.individualCreator)
54 |
55 |
56 | # Simionescu's function as the given individual's fitness:
57 | def simionescu(individual):
58 | x = individual[0]
59 | y = individual[1]
60 | f = 0.1 * x * y
61 | return f, # return a tuple
62 |
63 | toolbox.register("evaluate", simionescu)
64 |
65 | # define the valid input domain using the cosntraints:
66 | def feasible(individual):
67 | """Feasibility function for the individual.
68 | Returns True if feasible, False otherwise.
69 | """
70 | x = individual[0]
71 | y = individual[1]
72 |
73 | # original constraint:
74 | if x**2 + y**2 > (1 + 0.2 * math.cos(8.0 * math.atan2(x, y)))**2:
75 | return False
76 |
77 | # previously found solution as an additional constraint:
78 | elif (x - 0.848)**2 + (y + 0.848)**2 < DISTANCE_THRESHOLD**2:
79 | return False
80 |
81 | else:
82 | return True
83 |
84 | # decorate the fitness function with the delta penalty function:
85 | toolbox.decorate("evaluate", tools.DeltaPenalty(feasible, PENALTY_VALUE))
86 |
87 | # genetic operators:
88 | toolbox.register("select", tools.selTournament, tournsize=2)
89 | toolbox.register("mate", tools.cxSimulatedBinaryBounded, low=BOUND_LOW, up=BOUND_UP, eta=CROWDING_FACTOR)
90 | toolbox.register("mutate", tools.mutPolynomialBounded, low=BOUND_LOW, up=BOUND_UP, eta=CROWDING_FACTOR, indpb=1.0/DIMENSIONS)
91 |
92 |
93 | # Genetic Algorithm flow:
94 | def main():
95 |
96 | # create initial population (generation 0):
97 | population = toolbox.populationCreator(n=POPULATION_SIZE)
98 |
99 | # prepare the statistics object:
100 | stats = tools.Statistics(lambda ind: ind.fitness.values)
101 | stats.register("min", np.min)
102 | stats.register("avg", np.mean)
103 |
104 | # define the hall-of-fame object:
105 | hof = tools.HallOfFame(HALL_OF_FAME_SIZE)
106 |
107 | # perform the Genetic Algorithm flow with elitism:
108 | population, logbook = elitism.eaSimpleWithElitism(population, toolbox, cxpb=P_CROSSOVER, mutpb=P_MUTATION,
109 | ngen=MAX_GENERATIONS, stats=stats, halloffame=hof, verbose=True)
110 |
111 | # print info for best solution found:
112 | best = hof.items[0]
113 | print("-- Best Individual = ", best)
114 | print("-- Best Fitness = ", best.fitness.values[0])
115 |
116 | # extract statistics:
117 | minFitnessValues, meanFitnessValues = logbook.select("min", "avg")
118 |
119 | # plot statistics:
120 | sns.set_style("whitegrid")
121 | plt.plot(minFitnessValues, color='red')
122 | plt.plot(meanFitnessValues, color='green')
123 | plt.xlabel('Generation')
124 | plt.ylabel('Min / Average Fitness')
125 | plt.title('Min and Average fitness over Generations')
126 |
127 | plt.show()
128 |
129 |
130 | if __name__ == "__main__":
131 | main()
132 |
--------------------------------------------------------------------------------
/Chapter06/elitism.py:
--------------------------------------------------------------------------------
1 | from deap import tools
2 | from deap import algorithms
3 |
4 | def eaSimpleWithElitism(population, toolbox, cxpb, mutpb, ngen, stats=None,
5 | halloffame=None, verbose=__debug__):
6 | """This algorithm is similar to DEAP eaSimple() algorithm, with the modification that
7 | halloffame is used to implement an elitism mechanism. The individuals contained in the
8 | halloffame are directly injected into the next generation and are not subject to the
9 | genetic operators of selection, crossover and mutation.
10 | """
11 | logbook = tools.Logbook()
12 | logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])
13 |
14 | # Evaluate the individuals with an invalid fitness
15 | invalid_ind = [ind for ind in population if not ind.fitness.valid]
16 | fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
17 | for ind, fit in zip(invalid_ind, fitnesses):
18 | ind.fitness.values = fit
19 |
20 | if halloffame is None:
21 | raise ValueError("halloffame parameter must not be empty!")
22 |
23 | halloffame.update(population)
24 | hof_size = len(halloffame.items) if halloffame.items else 0
25 |
26 | record = stats.compile(population) if stats else {}
27 | logbook.record(gen=0, nevals=len(invalid_ind), **record)
28 | if verbose:
29 | print(logbook.stream)
30 |
31 | # Begin the generational process
32 | for gen in range(1, ngen + 1):
33 |
34 | # Select the next generation individuals
35 | offspring = toolbox.select(population, len(population) - hof_size)
36 |
37 | # Vary the pool of individuals
38 | offspring = algorithms.varAnd(offspring, toolbox, cxpb, mutpb)
39 |
40 | # Evaluate the individuals with an invalid fitness
41 | invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
42 | fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
43 | for ind, fit in zip(invalid_ind, fitnesses):
44 | ind.fitness.values = fit
45 |
46 | # add the best back to population:
47 | offspring.extend(halloffame.items)
48 |
49 | # Update the hall of fame with the generated individuals
50 | halloffame.update(offspring)
51 |
52 | # Replace the current population by the offspring
53 | population[:] = offspring
54 |
55 | # Append the current generation statistics to the logbook
56 | record = stats.compile(population) if stats else {}
57 | logbook.record(gen=gen, nevals=len(invalid_ind), **record)
58 | if verbose:
59 | print(logbook.stream)
60 |
61 | return population, logbook
62 |
63 |
--------------------------------------------------------------------------------
/Chapter07/01-solve-friedman.py:
--------------------------------------------------------------------------------
1 | from deap import base
2 | from deap import creator
3 | from deap import tools
4 |
5 | import random
6 | import numpy
7 |
8 | import matplotlib.pyplot as plt
9 | import seaborn as sns
10 |
11 | import friedman
12 | import elitism
13 |
14 | NUM_OF_FEATURES = 15
15 | NUM_OF_SAMPLES = 60
16 |
17 | # Genetic Algorithm constants:
18 | POPULATION_SIZE = 30
19 | P_CROSSOVER = 0.9 # probability for crossover
20 | P_MUTATION = 0.2 # probability for mutating an individual
21 | MAX_GENERATIONS = 30
22 | HALL_OF_FAME_SIZE = 5
23 |
24 | # set the random seed:
25 | RANDOM_SEED = 42
26 | random.seed(RANDOM_SEED)
27 |
28 | # create the Friedman-1 test class:
29 | friedman = friedman.Friedman1Test(NUM_OF_FEATURES, NUM_OF_SAMPLES, RANDOM_SEED)
30 |
31 | toolbox = base.Toolbox()
32 |
33 | # define a single objective, minimizing fitness strategy:
34 | creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
35 |
36 | # create the Individual class based on list:
37 | creator.create("Individual", list, fitness=creator.FitnessMin)
38 |
39 | # create an operator that randomly returns 0 or 1:
40 | toolbox.register("zeroOrOne", random.randint, 0, 1)
41 |
42 | # create the individual operator to fill up an Individual instance:
43 | toolbox.register("individualCreator", tools.initRepeat, creator.Individual, toolbox.zeroOrOne, len(friedman))
44 |
45 | # create the population operator to generate a list of individuals:
46 | toolbox.register("populationCreator", tools.initRepeat, list, toolbox.individualCreator)
47 |
48 |
49 | # fitness calculation
50 | def friedmanTestScore(individual):
51 | return friedman.getMSE(individual), # return a tuple
52 |
53 |
54 | toolbox.register("evaluate", friedmanTestScore)
55 |
56 | # genetic operators for binary list:
57 | toolbox.register("select", tools.selTournament, tournsize=2)
58 | toolbox.register("mate", tools.cxTwoPoint)
59 | toolbox.register("mutate", tools.mutFlipBit, indpb=1.0/len(friedman))
60 |
61 |
62 | # Genetic Algorithm flow:
63 | def main():
64 |
65 | # create initial population (generation 0):
66 | population = toolbox.populationCreator(n=POPULATION_SIZE)
67 |
68 | # prepare the statistics object:
69 | stats = tools.Statistics(lambda ind: ind.fitness.values)
70 | stats.register("min", numpy.min)
71 | stats.register("avg", numpy.mean)
72 |
73 | # define the hall-of-fame object:
74 | hof = tools.HallOfFame(HALL_OF_FAME_SIZE)
75 |
76 | # perform the Genetic Algorithm flow with hof feature added:
77 | population, logbook = elitism.eaSimpleWithElitism(population, toolbox, cxpb=P_CROSSOVER, mutpb=P_MUTATION,
78 | ngen=MAX_GENERATIONS, stats=stats, halloffame=hof, verbose=True)
79 |
80 | # print best solution found:
81 | best = hof.items[0]
82 | print("-- Best Ever Individual = ", best)
83 | print("-- Best Ever Fitness = ", best.fitness.values[0])
84 |
85 | # extract statistics:
86 | minFitnessValues, meanFitnessValues = logbook.select("min", "avg")
87 |
88 | # plot statistics:
89 | sns.set_style("whitegrid")
90 | plt.plot(minFitnessValues, color='red')
91 | plt.plot(meanFitnessValues, color='green')
92 | plt.xlabel('Generation')
93 | plt.ylabel('Min / Average Fitness')
94 | plt.title('Min and Average fitness over Generations')
95 | plt.show()
96 |
97 |
98 | if __name__ == "__main__":
99 | main()
--------------------------------------------------------------------------------
/Chapter07/02-solve-zoo.py:
--------------------------------------------------------------------------------
1 | from deap import base
2 | from deap import creator
3 | from deap import tools
4 |
5 | import random
6 | import numpy
7 |
8 | import matplotlib.pyplot as plt
9 | import seaborn as sns
10 |
11 | import zoo
12 | import elitism
13 |
14 | # Genetic Algorithm constants:
15 | POPULATION_SIZE = 50
16 | P_CROSSOVER = 0.9 # probability for crossover
17 | P_MUTATION = 0.2 # probability for mutating an individual
18 | MAX_GENERATIONS = 50
19 | HALL_OF_FAME_SIZE = 5
20 |
21 | FEATURE_PENALTY_FACTOR = 0.001
22 |
23 | # set the random seed:
24 | RANDOM_SEED = 42
25 | random.seed(RANDOM_SEED)
26 |
27 | # create the Zoo test class:
28 | zoo = zoo.Zoo(RANDOM_SEED)
29 |
30 | toolbox = base.Toolbox()
31 |
32 | # define a single objective, maximizing fitness strategy:
33 | creator.create("FitnessMax", base.Fitness, weights=(1.0,))
34 |
35 | # create the Individual class based on list:
36 | creator.create("Individual", list, fitness=creator.FitnessMax)
37 |
38 | # create an operator that randomly returns 0 or 1:
39 | toolbox.register("zeroOrOne", random.randint, 0, 1)
40 |
41 | # create the individual operator to fill up an Individual instance:
42 | toolbox.register("individualCreator", tools.initRepeat, creator.Individual, toolbox.zeroOrOne, len(zoo))
43 |
44 | # create the population operator to generate a list of individuals:
45 | toolbox.register("populationCreator", tools.initRepeat, list, toolbox.individualCreator)
46 |
47 |
48 | # fitness calculation
49 | def zooClassificationAccuracy(individual):
50 | numFeaturesUsed = sum(individual)
51 | if numFeaturesUsed == 0:
52 | return 0.0,
53 | else:
54 | accuracy = zoo.getMeanAccuracy(individual)
55 | return accuracy - FEATURE_PENALTY_FACTOR * numFeaturesUsed, # return a tuple
56 |
57 |
58 | toolbox.register("evaluate", zooClassificationAccuracy)
59 |
60 | # genetic operators:mutFlipBit
61 |
62 | # Tournament selection with tournament size of 2:
63 | toolbox.register("select", tools.selTournament, tournsize=2)
64 |
65 | # Single-point crossover:
66 | toolbox.register("mate", tools.cxTwoPoint)
67 |
68 | # Flip-bit mutation:
69 | # indpb: Independent probability for each attribute to be flipped
70 | toolbox.register("mutate", tools.mutFlipBit, indpb=1.0/len(zoo))
71 |
72 |
73 | # Genetic Algorithm flow:
74 | def main():
75 |
76 | # create initial population (generation 0):
77 | population = toolbox.populationCreator(n=POPULATION_SIZE)
78 |
79 | # prepare the statistics object:
80 | stats = tools.Statistics(lambda ind: ind.fitness.values)
81 | stats.register("max", numpy.max)
82 | stats.register("avg", numpy.mean)
83 |
84 | # define the hall-of-fame object:
85 | hof = tools.HallOfFame(HALL_OF_FAME_SIZE)
86 |
87 | # perform the Genetic Algorithm flow with hof feature added:
88 | population, logbook = elitism.eaSimpleWithElitism(population, toolbox, cxpb=P_CROSSOVER, mutpb=P_MUTATION,
89 | ngen=MAX_GENERATIONS, stats=stats, halloffame=hof, verbose=True)
90 |
91 | # print best solution found:
92 | print("- Best solutions are:")
93 | for i in range(HALL_OF_FAME_SIZE):
94 | print(i, ": ", hof.items[i], ", fitness = ", hof.items[i].fitness.values[0],
95 | ", accuracy = ", zoo.getMeanAccuracy(hof.items[i]), ", features = ", sum(hof.items[i]))
96 |
97 | # extract statistics:
98 | maxFitnessValues, meanFitnessValues = logbook.select("max", "avg")
99 |
100 | # plot statistics:
101 | sns.set_style("whitegrid")
102 | plt.plot(maxFitnessValues, color='red')
103 | plt.plot(meanFitnessValues, color='green')
104 | plt.xlabel('Generation')
105 | plt.ylabel('Max / Average Fitness')
106 | plt.title('Max and Average fitness over Generations')
107 | plt.show()
108 |
109 |
110 | if __name__ == "__main__":
111 | main()
--------------------------------------------------------------------------------
/Chapter07/elitism.py:
--------------------------------------------------------------------------------
1 | from deap import tools
2 | from deap import algorithms
3 |
4 | def eaSimpleWithElitism(population, toolbox, cxpb, mutpb, ngen, stats=None,
5 | halloffame=None, verbose=__debug__):
6 | """This algorithm is similar to DEAP eaSimple() algorithm, with the modification that
7 | halloffame is used to implement an elitism mechanism. The individuals contained in the
8 | halloffame are directly injected into the next generation and are not subject to the
9 | genetic operators of selection, crossover and mutation.
10 | """
11 | logbook = tools.Logbook()
12 | logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])
13 |
14 | # Evaluate the individuals with an invalid fitness
15 | invalid_ind = [ind for ind in population if not ind.fitness.valid]
16 | fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
17 | for ind, fit in zip(invalid_ind, fitnesses):
18 | ind.fitness.values = fit
19 |
20 | if halloffame is None:
21 | raise ValueError("halloffame parameter must not be empty!")
22 |
23 | halloffame.update(population)
24 | hof_size = len(halloffame.items) if halloffame.items else 0
25 |
26 | record = stats.compile(population) if stats else {}
27 | logbook.record(gen=0, nevals=len(invalid_ind), **record)
28 | if verbose:
29 | print(logbook.stream)
30 |
31 | # Begin the generational process
32 | for gen in range(1, ngen + 1):
33 |
34 | # Select the next generation individuals
35 | offspring = toolbox.select(population, len(population) - hof_size)
36 |
37 | # Vary the pool of individuals
38 | offspring = algorithms.varAnd(offspring, toolbox, cxpb, mutpb)
39 |
40 | # Evaluate the individuals with an invalid fitness
41 | invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
42 | fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
43 | for ind, fit in zip(invalid_ind, fitnesses):
44 | ind.fitness.values = fit
45 |
46 | # add the best back to population:
47 | offspring.extend(halloffame.items)
48 |
49 | # Update the hall of fame with the generated individuals
50 | halloffame.update(offspring)
51 |
52 | # Replace the current population by the offspring
53 | population[:] = offspring
54 |
55 | # Append the current generation statistics to the logbook
56 | record = stats.compile(population) if stats else {}
57 | logbook.record(gen=gen, nevals=len(invalid_ind), **record)
58 | if verbose:
59 | print(logbook.stream)
60 |
61 | return population, logbook
62 |
63 |
--------------------------------------------------------------------------------
/Chapter07/friedman.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from sklearn import model_selection
4 | from sklearn import datasets
5 |
6 | from sklearn.ensemble import GradientBoostingRegressor
7 | from sklearn.metrics import mean_squared_error
8 | import matplotlib.pyplot as plt
9 | import seaborn as sns
10 |
11 | class Friedman1Test:
12 | """This class encapsulates the Friedman1 regression test for feature selection
13 | """
14 |
15 | VALIDATION_SIZE = 0.20
16 | NOISE = 1.0
17 |
18 | def __init__(self, numFeatures, numSamples, randomSeed):
19 | """
20 | :param numFeatures: total number of features to be used (at least 5)
21 | :param numSamples: number of samples in dataset
22 | :param randomSeed: random seed value used for reproducible results
23 | """
24 |
25 | self.numFeatures = numFeatures
26 | self.numSamples = numSamples
27 | self.randomSeed = randomSeed
28 |
29 | # generate test data:
30 | self.X, self.y = datasets.make_friedman1(n_samples=self.numSamples, n_features=self.numFeatures,
31 | noise=self.NOISE, random_state=self.randomSeed)
32 |
33 | # divide the data to a training set and a validation set:
34 | self.X_train, self.X_validation, self.y_train, self.y_validation = \
35 | model_selection.train_test_split(self.X, self.y, test_size=self.VALIDATION_SIZE, random_state=self.randomSeed)
36 |
37 | self.regressor = GradientBoostingRegressor(random_state=self.randomSeed)
38 |
39 | def __len__(self):
40 | """
41 | :return: the total number of features
42 | """
43 | return self.numFeatures
44 |
45 |
46 | def getMSE(self, zeroOneList):
47 | """
48 | returns the mean squared error of the regressor, calculated for the validation set, after training
49 | using the features selected by the zeroOneList
50 | :param zeroOneList: a list of binary values corresponding the features in the dataset. A value of '1'
51 | represents selecting the corresponding feature, while a value of '0' means that the feature is dropped.
52 | :return: the mean squared error of the regressor when using the features selected by the zeroOneList
53 | """
54 |
55 | # drop the columns of the training and validation sets that correspond to the
56 | # unselected features:
57 | zeroIndices = [i for i, n in enumerate(zeroOneList) if n == 0]
58 | currentX_train = np.delete(self.X_train, zeroIndices, 1)
59 | currentX_validation = np.delete(self.X_validation, zeroIndices, 1)
60 |
61 | # train the regression model using th etraining set:
62 | self.regressor.fit(currentX_train, self.y_train)
63 |
64 | # calculate the regressor's output for the validation set:
65 | prediction = self.regressor.predict(currentX_validation)
66 |
67 | # return the mean square error of predicition vs actual data:
68 | return mean_squared_error(self.y_validation, prediction)
69 |
70 |
71 | # testing the class:
72 | def main():
73 | # create a test instance:
74 | test = Friedman1Test(numFeatures=15, numSamples=60, randomSeed=42)
75 |
76 | scores = []
77 | # calculate MSE for 'n' first features:
78 | for n in range(1, len(test) + 1):
79 | nFirstFeatures = [1] * n + [0] * (len(test) - n)
80 | score = test.getMSE(nFirstFeatures)
81 | print("%d first features: score = %f" % (n, score))
82 | scores.append(score)
83 |
84 | # plot graph:
85 | sns.set_style("whitegrid")
86 | plt.plot([i + 1 for i in range(len(test))], scores, color='red')
87 | plt.xticks(np.arange(1, len(test) + 1, 1.0))
88 | plt.xlabel('n First Features')
89 | plt.ylabel('MSE')
90 | plt.title('MSE over Features Selected')
91 | plt.show()
92 |
93 |
94 | if __name__ == "__main__":
95 | main()
96 |
--------------------------------------------------------------------------------
/Chapter07/zoo.py:
--------------------------------------------------------------------------------
1 | import random
2 |
3 | from pandas import read_csv
4 |
5 | from sklearn import model_selection
6 | from sklearn.tree import DecisionTreeClassifier
7 |
8 |
9 | class Zoo:
10 | """This class encapsulates the Friedman1 test for a regressor
11 | """
12 |
13 | DATASET_URL = 'https://archive.ics.uci.edu/ml/machine-learning-databases/zoo/zoo.data'
14 | NUM_FOLDS = 5
15 |
16 | def __init__(self, randomSeed):
17 | """
18 | :param randomSeed: random seed value used for reproducible results
19 | """
20 | self.randomSeed = randomSeed
21 |
22 | # read the dataset, skipping the first columns (animal name):
23 | self.data = read_csv(self.DATASET_URL, header=None, usecols=range(1, 18))
24 |
25 | # separate to input features and resulting category (last column):
26 | self.X = self.data.iloc[:, 0:16]
27 | self.y = self.data.iloc[:, 16]
28 |
29 | # split the data, creating a group of training/validation sets to be used in the k-fold validation process:
30 | self.kfold = model_selection.KFold(n_splits=self.NUM_FOLDS, random_state=self.randomSeed)
31 |
32 | self.classifier = DecisionTreeClassifier(random_state=self.randomSeed)
33 |
34 | def __len__(self):
35 | """
36 | :return: the total number of features used in this classification problem
37 | """
38 | return self.X.shape[1]
39 |
40 | def getMeanAccuracy(self, zeroOneList):
41 | """
42 | returns the mean accuracy measure of the calssifier, calculated using k-fold validation process,
43 | using the features selected by the zeroOneList
44 | :param zeroOneList: a list of binary values corresponding the features in the dataset. A value of '1'
45 | represents selecting the corresponding feature, while a value of '0' means that the feature is dropped.
46 | :return: the mean accuracy measure of the calssifier when using the features selected by the zeroOneList
47 | """
48 |
49 | # drop the dataset columns that correspond to the unselected features:
50 | zeroIndices = [i for i, n in enumerate(zeroOneList) if n == 0]
51 | currentX = self.X.drop(self.X.columns[zeroIndices], axis=1)
52 |
53 | # perform k-fold validation and determine the accuracy measure of the classifier:
54 | cv_results = model_selection.cross_val_score(self.classifier, currentX, self.y, cv=self.kfold, scoring='accuracy')
55 |
56 | # return mean accuracy:
57 | return cv_results.mean()
58 |
59 |
60 | # testing the class:
61 | def main():
62 | # create a problem instance:
63 | zoo = Zoo(randomSeed=42)
64 |
65 | allOnes = [1] * len(zoo)
66 | print("-- All features selected: ", allOnes, ", accuracy = ", zoo.getMeanAccuracy(allOnes))
67 |
68 |
69 | if __name__ == "__main__":
70 | main()
71 |
--------------------------------------------------------------------------------
/Chapter08/01-hyperparameter-tuning-grid.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import time
3 | import random
4 |
5 | from sklearn import model_selection
6 | from sklearn.ensemble import AdaBoostClassifier
7 | from sklearn.model_selection import GridSearchCV
8 |
9 | from pandas import read_csv
10 | from evolutionary_search import EvolutionaryAlgorithmSearchCV
11 |
12 |
13 | class HyperparameterTuningGrid:
14 |
15 | NUM_FOLDS = 5
16 |
17 | def __init__(self, randomSeed):
18 |
19 | self.randomSeed = randomSeed
20 | self.initWineDataset()
21 | self.initClassifier()
22 | self.initKfold()
23 | self.initGridParams()
24 |
25 | def initWineDataset(self):
26 | url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data'
27 |
28 | self.data = read_csv(url, header=None, usecols=range(0, 14))
29 | self.X = self.data.iloc[:, 1:14]
30 | self.y = self.data.iloc[:, 0]
31 |
32 | def initClassifier(self):
33 | self.classifier = AdaBoostClassifier(random_state=self.randomSeed)
34 |
35 | def initKfold(self):
36 | self.kfold = model_selection.KFold(n_splits=self.NUM_FOLDS,
37 | random_state=self.randomSeed)
38 |
39 | def initGridParams(self):
40 | self.gridParams = {
41 | 'n_estimators': [10, 20, 30, 40, 50, 60, 70, 80, 90, 100],
42 | 'learning_rate': np.logspace(-2, 0, num=10, base=10),
43 | 'algorithm': ['SAMME', 'SAMME.R'],
44 | }
45 |
46 | def getDefaultAccuracy(self):
47 | cv_results = model_selection.cross_val_score(self.classifier,
48 | self.X,
49 | self.y,
50 | cv=self.kfold,
51 | scoring='accuracy')
52 | return cv_results.mean()
53 |
54 | def gridTest(self):
55 | print("performing grid search...")
56 |
57 | gridSearch = GridSearchCV(estimator=self.classifier,
58 | param_grid=self.gridParams,
59 | cv=self.kfold,
60 | scoring='accuracy',
61 | iid='False',
62 | n_jobs=4)
63 |
64 | gridSearch.fit(self.X, self.y)
65 | print("best parameters: ", gridSearch.best_params_)
66 | print("best score: ", gridSearch.best_score_)
67 |
68 | def geneticGridTest(self):
69 | print("performing Genetic grid search...")
70 |
71 | gridSearch = EvolutionaryAlgorithmSearchCV(estimator=self.classifier,
72 | params=self.gridParams,
73 | cv=self.kfold,
74 | scoring='accuracy',
75 | verbose=True,
76 | iid='False',
77 | n_jobs=4,
78 | population_size=20,
79 | gene_mutation_prob=0.30,
80 | tournament_size=2,
81 | generations_number=5)
82 | gridSearch.fit(self.X, self.y)
83 |
84 |
85 | def main():
86 | RANDOM_SEED = 42
87 | random.seed(RANDOM_SEED)
88 |
89 | # create a problem instance:
90 | test = HyperparameterTuningGrid(RANDOM_SEED)
91 |
92 | print("Default Classifier Hyperparameter values:")
93 | print(test.classifier.get_params())
94 | print("score with default values = ", test.getDefaultAccuracy())
95 |
96 | print()
97 | start = time.time()
98 | test.gridTest()
99 | end = time.time()
100 | print("Time Elapsed = ", end - start)
101 |
102 | print()
103 | start = time.time()
104 | test.geneticGridTest()
105 | end = time.time()
106 | print("Time Elapsed = ", end - start)
107 |
108 |
109 | if __name__ == "__main__":
110 | main()
111 |
--------------------------------------------------------------------------------
/Chapter08/02-hyperparameter-tuning-genetic.py:
--------------------------------------------------------------------------------
1 | from deap import base
2 | from deap import creator
3 | from deap import tools
4 |
5 | import random
6 | import numpy
7 |
8 | import matplotlib.pyplot as plt
9 | import seaborn as sns
10 |
11 | import hyperparameter_tuning_genetic_test
12 | import elitism
13 |
14 | # boundaries for ADABOOST parameters:
15 | # "n_estimators": 1..100
16 | # "learning_rate": 0.01..100
17 | # "algorithm": 0, 1
18 | # [n_estimators, learning_rate, algorithm]:
19 | BOUNDS_LOW = [ 1, 0.01, 0]
20 | BOUNDS_HIGH = [100, 1.00, 1]
21 |
22 | NUM_OF_PARAMS = len(BOUNDS_HIGH)
23 |
24 | # Genetic Algorithm constants:
25 | POPULATION_SIZE = 20
26 | P_CROSSOVER = 0.9 # probability for crossover
27 | P_MUTATION = 0.5 # probability for mutating an individual
28 | MAX_GENERATIONS = 5
29 | HALL_OF_FAME_SIZE = 5
30 | CROWDING_FACTOR = 20.0 # crowding factor for crossover and mutation
31 |
32 | # set the random seed:
33 | RANDOM_SEED = 42
34 | random.seed(RANDOM_SEED)
35 |
36 | # create the classifier accuracy test class:
37 | test = hyperparameter_tuning_genetic_test.HyperparameterTuningGenetic(RANDOM_SEED)
38 |
39 | toolbox = base.Toolbox()
40 |
41 | # define a single objective, maximizing fitness strategy:
42 | creator.create("FitnessMax", base.Fitness, weights=(1.0,))
43 |
44 | # create the Individual class based on list:
45 | creator.create("Individual", list, fitness=creator.FitnessMax)
46 |
47 | # define the hyperparameter attributes individually:
48 | for i in range(NUM_OF_PARAMS):
49 | # "hyperparameter_0", "hyperparameter_1", ...
50 | toolbox.register("hyperparameter_" + str(i),
51 | random.uniform,
52 | BOUNDS_LOW[i],
53 | BOUNDS_HIGH[i])
54 |
55 | # create a tuple containing an attribute generator for each param searched:
56 | hyperparameters = ()
57 | for i in range(NUM_OF_PARAMS):
58 | hyperparameters = hyperparameters + \
59 | (toolbox.__getattribute__("hyperparameter_" + str(i)),)
60 |
61 | # create the individual operator to fill up an Individual instance:
62 | toolbox.register("individualCreator",
63 | tools.initCycle,
64 | creator.Individual,
65 | hyperparameters,
66 | n=1)
67 |
68 | # create the population operator to generate a list of individuals:
69 | toolbox.register("populationCreator", tools.initRepeat, list, toolbox.individualCreator)
70 |
71 | # fitness calculation
72 | def classificationAccuracy(individual):
73 | return test.getAccuracy(individual),
74 |
75 | toolbox.register("evaluate", classificationAccuracy)
76 |
77 | # genetic operators:mutFlipBit
78 |
79 | # genetic operators:
80 | toolbox.register("select", tools.selTournament, tournsize=2)
81 | toolbox.register("mate",
82 | tools.cxSimulatedBinaryBounded,
83 | low=BOUNDS_LOW,
84 | up=BOUNDS_HIGH,
85 | eta=CROWDING_FACTOR)
86 |
87 | toolbox.register("mutate",
88 | tools.mutPolynomialBounded,
89 | low=BOUNDS_LOW,
90 | up=BOUNDS_HIGH,
91 | eta=CROWDING_FACTOR,
92 | indpb=1.0 / NUM_OF_PARAMS)
93 |
94 |
95 | # Genetic Algorithm flow:
96 | def main():
97 |
98 | # create initial population (generation 0):
99 | population = toolbox.populationCreator(n=POPULATION_SIZE)
100 |
101 | # prepare the statistics object:
102 | stats = tools.Statistics(lambda ind: ind.fitness.values)
103 | stats.register("max", numpy.max)
104 | stats.register("avg", numpy.mean)
105 |
106 | # define the hall-of-fame object:
107 | hof = tools.HallOfFame(HALL_OF_FAME_SIZE)
108 |
109 | # perform the Genetic Algorithm flow with hof feature added:
110 | population, logbook = elitism.eaSimpleWithElitism(population,
111 | toolbox,
112 | cxpb=P_CROSSOVER,
113 | mutpb=P_MUTATION,
114 | ngen=MAX_GENERATIONS,
115 | stats=stats,
116 | halloffame=hof,
117 | verbose=True)
118 |
119 | # print best solution found:
120 | print("- Best solution is: ")
121 | print("params = ", test.formatParams(hof.items[0]))
122 | print("Accuracy = %1.5f" % hof.items[0].fitness.values[0])
123 |
124 | # extract statistics:
125 | maxFitnessValues, meanFitnessValues = logbook.select("max", "avg")
126 |
127 | # plot statistics:
128 | sns.set_style("whitegrid")
129 | plt.plot(maxFitnessValues, color='red')
130 | plt.plot(meanFitnessValues, color='green')
131 | plt.xlabel('Generation')
132 | plt.ylabel('Max / Average Fitness')
133 | plt.title('Max and Average fitness over Generations')
134 | plt.show()
135 |
136 |
137 | if __name__ == "__main__":
138 | main()
--------------------------------------------------------------------------------
/Chapter08/elitism.py:
--------------------------------------------------------------------------------
1 | from deap import tools
2 | from deap import algorithms
3 |
4 | def eaSimpleWithElitism(population, toolbox, cxpb, mutpb, ngen, stats=None,
5 | halloffame=None, verbose=__debug__):
6 | """This algorithm is similar to DEAP eaSimple() algorithm, with the modification that
7 | halloffame is used to implement an elitism mechanism. The individuals contained in the
8 | halloffame are directly injected into the next generation and are not subject to the
9 | genetic operators of selection, crossover and mutation.
10 | """
11 | logbook = tools.Logbook()
12 | logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])
13 |
14 | # Evaluate the individuals with an invalid fitness
15 | invalid_ind = [ind for ind in population if not ind.fitness.valid]
16 | fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
17 | for ind, fit in zip(invalid_ind, fitnesses):
18 | ind.fitness.values = fit
19 |
20 | if halloffame is None:
21 | raise ValueError("halloffame parameter must not be empty!")
22 |
23 | halloffame.update(population)
24 | hof_size = len(halloffame.items) if halloffame.items else 0
25 |
26 | record = stats.compile(population) if stats else {}
27 | logbook.record(gen=0, nevals=len(invalid_ind), **record)
28 | if verbose:
29 | print(logbook.stream)
30 |
31 | # Begin the generational process
32 | for gen in range(1, ngen + 1):
33 |
34 | # Select the next generation individuals
35 | offspring = toolbox.select(population, len(population) - hof_size)
36 |
37 | # Vary the pool of individuals
38 | offspring = algorithms.varAnd(offspring, toolbox, cxpb, mutpb)
39 |
40 | # Evaluate the individuals with an invalid fitness
41 | invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
42 | fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
43 | for ind, fit in zip(invalid_ind, fitnesses):
44 | ind.fitness.values = fit
45 |
46 | # add the best back to population:
47 | offspring.extend(halloffame.items)
48 |
49 | # Update the hall of fame with the generated individuals
50 | halloffame.update(offspring)
51 |
52 | # Replace the current population by the offspring
53 | population[:] = offspring
54 |
55 | # Append the current generation statistics to the logbook
56 | record = stats.compile(population) if stats else {}
57 | logbook.record(gen=gen, nevals=len(invalid_ind), **record)
58 | if verbose:
59 | print(logbook.stream)
60 |
61 | return population, logbook
62 |
63 |
--------------------------------------------------------------------------------
/Chapter08/hyperparameter_tuning_genetic_test.py:
--------------------------------------------------------------------------------
1 | from sklearn import model_selection
2 | from sklearn.ensemble import AdaBoostClassifier
3 |
4 | from pandas import read_csv
5 |
6 |
7 | class HyperparameterTuningGenetic:
8 |
9 | NUM_FOLDS = 5
10 |
11 | def __init__(self, randomSeed):
12 |
13 | self.randomSeed = randomSeed
14 | self.initWineDataset()
15 | self.kfold = model_selection.KFold(n_splits=self.NUM_FOLDS, random_state=self.randomSeed)
16 |
17 | def initWineDataset(self):
18 | url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data'
19 |
20 | self.data = read_csv(url, header=None, usecols=range(0, 14))
21 | self.X = self.data.iloc[:, 1:14]
22 | self.y = self.data.iloc[:, 0]
23 |
24 | # ADABoost [n_estimators, learning_rate, algorithm]:
25 | # "n_estimators": integer
26 | # "learning_rate": float
27 | # "algorithm": {'SAMME', 'SAMME.R'}
28 | def convertParams(self, params):
29 | n_estimators = round(params[0]) # round to nearest integer
30 | learning_rate = params[1] # no conversion needed
31 | algorithm = ['SAMME', 'SAMME.R'][round(params[2])] # round to 0 or 1, then use as index
32 | return n_estimators, learning_rate, algorithm
33 |
34 | def getAccuracy(self, params):
35 | n_estimators, learning_rate, algorithm = self.convertParams(params)
36 | self.classifier = AdaBoostClassifier(random_state=self.randomSeed,
37 | n_estimators=n_estimators,
38 | learning_rate=learning_rate,
39 | algorithm=algorithm
40 | )
41 |
42 | cv_results = model_selection.cross_val_score(self.classifier,
43 | self.X,
44 | self.y,
45 | cv=self.kfold,
46 | scoring='accuracy')
47 | return cv_results.mean()
48 |
49 | def formatParams(self, params):
50 | return "'n_estimators'=%3d, 'learning_rate'=%1.3f, 'algorithm'=%s" % (self.convertParams(params))
51 |
--------------------------------------------------------------------------------
/Chapter09/01-optimize-mlp-layers.py:
--------------------------------------------------------------------------------
1 | from deap import base
2 | from deap import creator
3 | from deap import tools
4 |
5 | import random
6 | import numpy
7 |
8 | import mlp_layers_test
9 | import elitism
10 |
11 | # boundaries for layer size parameters:
12 | # [layer_layer_1_size, hidden_layer_2_size, hidden_layer_3_size, hidden_layer_4_size]
13 | BOUNDS_LOW = [ 5, -5, -10, -20]
14 | BOUNDS_HIGH = [15, 10, 10, 10]
15 |
16 | NUM_OF_PARAMS = len(BOUNDS_HIGH)
17 |
18 | # Genetic Algorithm constants:
19 | POPULATION_SIZE = 20
20 | P_CROSSOVER = 0.9 # probability for crossover
21 | P_MUTATION = 0.5 # probability for mutating an individual
22 | MAX_GENERATIONS = 10
23 | HALL_OF_FAME_SIZE = 3
24 | CROWDING_FACTOR = 10.0 # crowding factor for crossover and mutation
25 |
26 | # set the random seed:
27 | RANDOM_SEED = 42
28 | random.seed(RANDOM_SEED)
29 |
30 | # create the classifier accuracy test class:
31 | test = mlp_layers_test.MlpLayersTest(RANDOM_SEED)
32 |
33 | toolbox = base.Toolbox()
34 |
35 | # define a single objective, maximizing fitness strategy:
36 | creator.create("FitnessMax", base.Fitness, weights=(1.0,))
37 |
38 | # create the Individual class based on list:
39 | creator.create("Individual", list, fitness=creator.FitnessMax)
40 |
41 | # define the layer_size_attributes individually:
42 | for i in range(NUM_OF_PARAMS):
43 | # "layer_size_attribute_0", "layer_size_attribute_1", ...
44 | toolbox.register("layer_size_attribute_" + str(i),
45 | random.uniform,
46 | BOUNDS_LOW[i],
47 | BOUNDS_HIGH[i])
48 |
49 | # create a tuple containing an layer_size_attribute generator for each hidden layer:
50 | layer_size_attributes = ()
51 | for i in range(NUM_OF_PARAMS):
52 | layer_size_attributes = layer_size_attributes + \
53 | (toolbox.__getattribute__("layer_size_attribute_" + str(i)),)
54 |
55 | # create the individual operator to fill up an Individual instance:
56 | toolbox.register("individualCreator",
57 | tools.initCycle,
58 | creator.Individual,
59 | layer_size_attributes,
60 | n=1)
61 |
62 | # create the population operator to generate a list of individuals:
63 | toolbox.register("populationCreator",
64 | tools.initRepeat,
65 | list,
66 | toolbox.individualCreator)
67 |
68 |
69 | # fitness calculation
70 | def classificationAccuracy(individual):
71 | return test.getAccuracy(individual),
72 |
73 |
74 | toolbox.register("evaluate", classificationAccuracy)
75 |
76 | # genetic operators:mutFlipBit
77 |
78 | # genetic operators:
79 | toolbox.register("select", tools.selTournament, tournsize=2)
80 |
81 | toolbox.register("mate",
82 | tools.cxSimulatedBinaryBounded,
83 | low=BOUNDS_LOW,
84 | up=BOUNDS_HIGH,
85 | eta=CROWDING_FACTOR)
86 |
87 | toolbox.register("mutate",
88 | tools.mutPolynomialBounded,
89 | low=BOUNDS_LOW,
90 | up=BOUNDS_HIGH,
91 | eta=CROWDING_FACTOR,
92 | indpb=1.0/NUM_OF_PARAMS)
93 |
94 |
95 | # Genetic Algorithm flow:
96 | def main():
97 |
98 | # create initial population (generation 0):
99 | population = toolbox.populationCreator(n=POPULATION_SIZE)
100 |
101 | # prepare the statistics object:
102 | stats = tools.Statistics(lambda ind: ind.fitness.values)
103 | stats.register("max", numpy.max)
104 | stats.register("avg", numpy.mean)
105 |
106 | # define the hall-of-fame object:
107 | hof = tools.HallOfFame(HALL_OF_FAME_SIZE)
108 |
109 | # perform the Genetic Algorithm flow with hof feature added:
110 | population, logbook = elitism.eaSimpleWithElitism(population,
111 | toolbox,
112 | cxpb=P_CROSSOVER,
113 | mutpb=P_MUTATION,
114 | ngen=MAX_GENERATIONS,
115 | stats=stats,
116 | halloffame=hof,
117 | verbose=True)
118 |
119 | # print best solution found:
120 | print("- Best solution is: ",
121 | test.formatParams(hof.items[0]),
122 | ", accuracy = ",
123 | hof.items[0].fitness.values[0])
124 |
125 |
126 |
127 | if __name__ == "__main__":
128 | main()
--------------------------------------------------------------------------------
/Chapter09/02-optimize-mlp-hyperparameters.py:
--------------------------------------------------------------------------------
1 | from deap import base
2 | from deap import creator
3 | from deap import tools
4 |
5 | import random
6 | import numpy
7 |
8 | import mlp_hyperparameters_test
9 | import elitism
10 |
11 | # boundaries for all parameters:
12 | # 'hidden_layer_sizes': first four values
13 | # 'activation': ['tanh', 'relu', 'logistic'] -> 0, 1, 2
14 | # 'solver': ['sgd', 'adam', 'lbfgs'] -> 0, 1, 2
15 | # 'alpha': float in the range of [0.0001, 2.0],
16 | # 'learning_rate': ['constant', 'invscaling', 'adaptive'] -> 0, 1, 2
17 | BOUNDS_LOW = [ 5, -5, -10, -20, 0, 0, 0.0001, 0 ]
18 | BOUNDS_HIGH = [15, 10, 10, 10, 2.999, 2.999, 2.0, 2.999]
19 |
20 | NUM_OF_PARAMS = len(BOUNDS_HIGH)
21 |
22 | # Genetic Algorithm constants:
23 | POPULATION_SIZE = 20
24 | P_CROSSOVER = 0.9 # probability for crossover
25 | P_MUTATION = 0.5 # probability for mutating an individual
26 | MAX_GENERATIONS = 5
27 | HALL_OF_FAME_SIZE = 3
28 | CROWDING_FACTOR = 10.0 # crowding factor for crossover and mutation
29 |
30 | # set the random seed:
31 | RANDOM_SEED = 42
32 | random.seed(RANDOM_SEED)
33 |
34 | # create the classifier accuracy test class:
35 | test = mlp_hyperparameters_test.MlpHyperparametersTest(RANDOM_SEED)
36 |
37 | toolbox = base.Toolbox()
38 |
39 | # define a single objective, maximizing fitness strategy:
40 | creator.create("FitnessMax", base.Fitness, weights=(1.0,))
41 |
42 | # create the Individual class based on list:
43 | creator.create("Individual", list, fitness=creator.FitnessMax)
44 |
45 | # define the layer size attributes individually:
46 | for i in range(NUM_OF_PARAMS):
47 | # "attribute_0", "attribute_1", ...
48 | toolbox.register("attribute_" + str(i),
49 | random.uniform,
50 | BOUNDS_LOW[i],
51 | BOUNDS_HIGH[i])
52 |
53 | # create a tuple containing an attribute generator for each param searched:
54 | attributes = ()
55 | for i in range(NUM_OF_PARAMS):
56 | attributes = attributes + (toolbox.__getattribute__("attribute_" + str(i)),)
57 |
58 | # create the individual operator to fill up an Individual instance:
59 | toolbox.register("individualCreator",
60 | tools.initCycle,
61 | creator.Individual,
62 | attributes,
63 | n=1)
64 |
65 | # create the population operator to generate a list of individuals:
66 | toolbox.register("populationCreator",
67 | tools.initRepeat,
68 | list,
69 | toolbox.individualCreator)
70 |
71 |
72 | # fitness calculation
73 | def classificationAccuracy(individual):
74 | return test.getAccuracy(individual),
75 |
76 |
77 | toolbox.register("evaluate", classificationAccuracy)
78 |
79 | # genetic operators:mutFlipBit
80 |
81 | # genetic operators:
82 | toolbox.register("select", tools.selTournament, tournsize=2)
83 |
84 | toolbox.register("mate",
85 | tools.cxSimulatedBinaryBounded,
86 | low=BOUNDS_LOW,
87 | up=BOUNDS_HIGH,
88 | eta=CROWDING_FACTOR)
89 |
90 | toolbox.register("mutate",
91 | tools.mutPolynomialBounded,
92 | low=BOUNDS_LOW,
93 | up=BOUNDS_HIGH,
94 | eta=CROWDING_FACTOR,
95 | indpb=1.0/NUM_OF_PARAMS)
96 |
97 |
98 | # Genetic Algorithm flow:
99 | def main():
100 |
101 | # create initial population (generation 0):
102 | population = toolbox.populationCreator(n=POPULATION_SIZE)
103 |
104 | # prepare the statistics object:
105 | stats = tools.Statistics(lambda ind: ind.fitness.values)
106 | stats.register("max", numpy.max)
107 | stats.register("avg", numpy.mean)
108 |
109 | # define the hall-of-fame object:
110 | hof = tools.HallOfFame(HALL_OF_FAME_SIZE)
111 |
112 | # perform the Genetic Algorithm flow with hof feature added:
113 | population, logbook = elitism.eaSimpleWithElitism(population,
114 | toolbox,
115 | cxpb=P_CROSSOVER,
116 | mutpb=P_MUTATION,
117 | ngen=MAX_GENERATIONS,
118 | stats=stats,
119 | halloffame=hof,
120 | verbose=True)
121 |
122 | # print best solution found:
123 | print("- Best solution is: \n",
124 | test.formatParams(hof.items[0]),
125 | "\n => accuracy = ",
126 | hof.items[0].fitness.values[0])
127 |
128 |
129 |
130 | if __name__ == "__main__":
131 | main()
--------------------------------------------------------------------------------
/Chapter09/elitism.py:
--------------------------------------------------------------------------------
1 | from deap import tools
2 | from deap import algorithms
3 |
4 | def eaSimpleWithElitism(population, toolbox, cxpb, mutpb, ngen, stats=None,
5 | halloffame=None, verbose=__debug__):
6 | """This algorithm is similar to DEAP eaSimple() algorithm, with the modification that
7 | halloffame is used to implement an elitism mechanism. The individuals contained in the
8 | halloffame are directly injected into the next generation and are not subject to the
9 | genetic operators of selection, crossover and mutation.
10 | """
11 | logbook = tools.Logbook()
12 | logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])
13 |
14 | # Evaluate the individuals with an invalid fitness
15 | invalid_ind = [ind for ind in population if not ind.fitness.valid]
16 | fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
17 | for ind, fit in zip(invalid_ind, fitnesses):
18 | ind.fitness.values = fit
19 |
20 | if halloffame is None:
21 | raise ValueError("halloffame parameter must not be empty!")
22 |
23 | halloffame.update(population)
24 | hof_size = len(halloffame.items) if halloffame.items else 0
25 |
26 | record = stats.compile(population) if stats else {}
27 | logbook.record(gen=0, nevals=len(invalid_ind), **record)
28 | if verbose:
29 | print(logbook.stream)
30 |
31 | # Begin the generational process
32 | for gen in range(1, ngen + 1):
33 |
34 | # Select the next generation individuals
35 | offspring = toolbox.select(population, len(population) - hof_size)
36 |
37 | # Vary the pool of individuals
38 | offspring = algorithms.varAnd(offspring, toolbox, cxpb, mutpb)
39 |
40 | # Evaluate the individuals with an invalid fitness
41 | invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
42 | fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
43 | for ind, fit in zip(invalid_ind, fitnesses):
44 | ind.fitness.values = fit
45 |
46 | # add the best back to population:
47 | offspring.extend(halloffame.items)
48 |
49 | # Update the hall of fame with the generated individuals
50 | halloffame.update(offspring)
51 |
52 | # Replace the current population by the offspring
53 | population[:] = offspring
54 |
55 | # Append the current generation statistics to the logbook
56 | record = stats.compile(population) if stats else {}
57 | logbook.record(gen=gen, nevals=len(invalid_ind), **record)
58 | if verbose:
59 | print(logbook.stream)
60 |
61 | return population, logbook
62 |
63 |
--------------------------------------------------------------------------------
/Chapter09/mlp_hyperparameters_test.py:
--------------------------------------------------------------------------------
1 | from sklearn import model_selection
2 | from sklearn import datasets
3 | from sklearn.neural_network import MLPClassifier
4 |
5 | from sklearn.exceptions import ConvergenceWarning
6 | from sklearn.utils.testing import ignore_warnings
7 |
8 | from math import floor
9 |
10 | class MlpHyperparametersTest:
11 |
12 | NUM_FOLDS = 5
13 |
14 | def __init__(self, randomSeed):
15 |
16 | self.randomSeed = randomSeed
17 | self.initDataset()
18 | self.kfold = model_selection.KFold(n_splits=self.NUM_FOLDS, random_state=self.randomSeed)
19 |
20 | def initDataset(self):
21 | self.data = datasets.load_iris()
22 |
23 | self.X = self.data['data']
24 | self.y = self.data['target']
25 |
26 |
27 | # params contains floats representing the following:
28 | # 'hidden_layer_sizes': up to 4 positive integers
29 | # 'activation': {'tanh', 'relu', 'logistic'},
30 | # 'solver': {'sgd', 'adam', 'lbfgs'},
31 | # 'alpha': float,
32 | # 'learning_rate': {'constant', 'invscaling', 'adaptive'}
33 | def convertParams(self, params):
34 |
35 | # transform the layer sizes from float (possibly negative) values into hiddenLayerSizes tuple:
36 | if round(params[1]) <= 0:
37 | hiddenLayerSizes = round(params[0]),
38 | elif round(params[2]) <= 0:
39 | hiddenLayerSizes = (round(params[0]), round(params[1]))
40 | elif round(params[3]) <= 0:
41 | hiddenLayerSizes = (round(params[0]), round(params[1]), round(params[2]))
42 | else:
43 | hiddenLayerSizes = (round(params[0]), round(params[1]), round(params[2]), round(params[3]))
44 |
45 | activation = ['tanh', 'relu', 'logistic'][floor(params[4])]
46 | solver = ['sgd', 'adam', 'lbfgs'][floor(params[5])]
47 | alpha = params[6]
48 | learning_rate = ['constant', 'invscaling', 'adaptive'][floor(params[7])]
49 |
50 | return hiddenLayerSizes, activation, solver, alpha, learning_rate
51 |
52 | @ignore_warnings(category=ConvergenceWarning)
53 | def getAccuracy(self, params):
54 | hiddenLayerSizes, activation, solver, alpha, learning_rate = self.convertParams(params)
55 |
56 | self.classifier = MLPClassifier(random_state=self.randomSeed,
57 | hidden_layer_sizes=hiddenLayerSizes,
58 | activation=activation,
59 | solver=solver,
60 | alpha=alpha,
61 | learning_rate=learning_rate)
62 |
63 | cv_results = model_selection.cross_val_score(self.classifier,
64 | self.X,
65 | self.y,
66 | cv=self.kfold,
67 | scoring='accuracy')
68 |
69 | return cv_results.mean()
70 |
71 | def formatParams(self, params):
72 | hiddenLayerSizes, activation, solver, alpha, learning_rate = self.convertParams(params)
73 | return "'hidden_layer_sizes'={}\n " \
74 | "'activation'='{}'\n " \
75 | "'solver'='{}'\n " \
76 | "'alpha'={}\n " \
77 | "'learning_rate'='{}'"\
78 | .format(hiddenLayerSizes, activation, solver, alpha, learning_rate)
79 |
--------------------------------------------------------------------------------
/Chapter09/mlp_layers_test.py:
--------------------------------------------------------------------------------
1 | from sklearn import model_selection
2 | from sklearn import datasets
3 | from sklearn.neural_network import MLPClassifier
4 |
5 | from sklearn.exceptions import ConvergenceWarning
6 | from sklearn.utils.testing import ignore_warnings
7 |
8 | class MlpLayersTest:
9 |
10 | NUM_FOLDS = 5
11 |
12 | def __init__(self, randomSeed):
13 |
14 | self.randomSeed = randomSeed
15 | self.initDataset()
16 | self.kfold = model_selection.KFold(n_splits=self.NUM_FOLDS, random_state=self.randomSeed)
17 |
18 | def initDataset(self):
19 | self.data = datasets.load_iris()
20 |
21 | self.X = self.data['data']
22 | self.y = self.data['target']
23 |
24 |
25 | # params contains: [layer_1_size, layer_2_size, layer_3_size, layer_4_size]
26 | def convertParams(self, params):
27 |
28 | # transform the layer sizes from float (possibly negative) values into hiddenLayerSizes tuple:
29 | if round(params[1]) <= 0:
30 | hiddenLayerSizes = round(params[0]),
31 | elif round(params[2]) <= 0:
32 | hiddenLayerSizes = (round(params[0]), round(params[1]))
33 | elif round(params[3]) <= 0:
34 | hiddenLayerSizes = (round(params[0]), round(params[1]), round(params[2]))
35 | else:
36 | hiddenLayerSizes = (round(params[0]), round(params[1]), round(params[2]), round(params[3]))
37 |
38 | return hiddenLayerSizes
39 |
40 | @ignore_warnings(category=ConvergenceWarning)
41 | def getAccuracy(self, params):
42 | hiddenLayerSizes = self.convertParams(params)
43 |
44 | self.classifier = MLPClassifier(random_state=self.randomSeed,
45 | hidden_layer_sizes=hiddenLayerSizes)
46 |
47 | cv_results = model_selection.cross_val_score(self.classifier,
48 | self.X,
49 | self.y,
50 | cv=self.kfold,
51 | scoring='accuracy')
52 |
53 | return cv_results.mean()
54 |
55 | def formatParams(self, params):
56 | return "'hidden_layer_sizes'={}".format(self.convertParams(params))
57 |
--------------------------------------------------------------------------------
/Chapter10/01-solve-mountain-car.py:
--------------------------------------------------------------------------------
1 | from deap import base
2 | from deap import creator
3 | from deap import tools
4 |
5 | import random
6 | import numpy
7 |
8 | import mountain_car
9 | import elitism
10 |
11 | # Genetic Algorithm constants:
12 | POPULATION_SIZE = 100
13 | P_CROSSOVER = 0.9 # probability for crossover
14 | P_MUTATION = 0.5 # probability for mutating an individual
15 | MAX_GENERATIONS = 80
16 | HALL_OF_FAME_SIZE = 20
17 |
18 | # set the random seed:
19 | RANDOM_SEED = 42
20 | random.seed(RANDOM_SEED)
21 |
22 | # create the Zoo test class:
23 | car = mountain_car.MountainCar(RANDOM_SEED)
24 |
25 | toolbox = base.Toolbox()
26 |
27 | # define a single objective, minimizing fitness strategy:
28 | creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
29 |
30 | # create the Individual class based on list:
31 | creator.create("Individual", list, fitness=creator.FitnessMin)
32 |
33 | # create an operator that randomly returns 0, 1 or 2:
34 | toolbox.register("zeroOneOrTwo", random.randint, 0, 2)
35 |
36 | # create an operator that generates a list of individuals:
37 | toolbox.register("individualCreator",
38 | tools.initRepeat,
39 | creator.Individual,
40 | toolbox.zeroOneOrTwo,
41 | len(car))
42 |
43 | # create the population operator to generate a list of individuals:
44 | toolbox.register("populationCreator", tools.initRepeat, list, toolbox.individualCreator)
45 |
46 |
47 | # fitness calculation
48 | def getCarScore(individual):
49 | return car.getScore(individual), # return a tuple
50 |
51 | toolbox.register("evaluate", getCarScore)
52 |
53 | # genetic operators for binary list:
54 | toolbox.register("select", tools.selTournament, tournsize=2)
55 | toolbox.register("mate", tools.cxTwoPoint)
56 | toolbox.register("mutate", tools.mutUniformInt, low=0, up=2, indpb=1.0/len(car))
57 |
58 |
59 | # Genetic Algorithm flow:
60 | def main():
61 |
62 | # create initial population (generation 0):
63 | population = toolbox.populationCreator(n=POPULATION_SIZE)
64 |
65 | # prepare the statistics object:
66 | stats = tools.Statistics(lambda ind: ind.fitness.values)
67 | stats.register("min", numpy.min)
68 | stats.register("avg", numpy.mean)
69 |
70 | # define the hall-of-fame object:
71 | hof = tools.HallOfFame(HALL_OF_FAME_SIZE)
72 |
73 | # perform the Genetic Algorithm flow with hof feature added:
74 | population, logbook = elitism.eaSimpleWithElitism(population,
75 | toolbox,
76 | cxpb=P_CROSSOVER,
77 | mutpb=P_MUTATION,
78 | ngen=MAX_GENERATIONS,
79 | stats=stats,
80 | halloffame=hof,
81 | verbose=True)
82 |
83 | # print best solution:
84 | best = hof.items[0]
85 | print()
86 | print("Best Solution = ", best)
87 | print("Best Fitness = ", best.fitness.values[0])
88 |
89 | # save best solution for a replay:
90 | car.saveActions(best)
91 |
92 | if __name__ == "__main__":
93 | main()
--------------------------------------------------------------------------------
/Chapter10/02-solve-cart-pole.py:
--------------------------------------------------------------------------------
1 | from deap import base
2 | from deap import creator
3 | from deap import tools
4 |
5 | import random
6 | import numpy
7 |
8 | import cart_pole
9 | import elitism
10 |
11 |
12 | # Genetic Algorithm constants:
13 | POPULATION_SIZE = 20
14 | P_CROSSOVER = 0.9 # probability for crossover
15 | P_MUTATION = 0.5 # probability for mutating an individual
16 | MAX_GENERATIONS = 10
17 | HALL_OF_FAME_SIZE = 3
18 | CROWDING_FACTOR = 10.0 # crowding factor for crossover and mutation
19 |
20 | # set the random seed:
21 | RANDOM_SEED = 42
22 | random.seed(RANDOM_SEED)
23 |
24 | # create the cart pole task class:
25 | cartPole = cart_pole.CartPole(RANDOM_SEED)
26 | NUM_OF_PARAMS = len(cartPole)
27 | # boundaries for layer size parameters:
28 |
29 | # weight and bias values are bound between -1 and 1:
30 | BOUNDS_LOW, BOUNDS_HIGH = -1.0, 1.0 # boundaries for all dimensions
31 |
32 | toolbox = base.Toolbox()
33 |
34 | # define a single objective, maximizing fitness strategy:
35 | creator.create("FitnessMax", base.Fitness, weights=(1.0,))
36 |
37 | # create the Individual class based on list:
38 | creator.create("Individual", list, fitness=creator.FitnessMax)
39 |
40 | # helper function for creating random real numbers uniformly distributed within a given range [low, up]
41 | # it assumes that the range is the same for every dimension
42 | def randomFloat(low, up):
43 | return [random.uniform(l, u) for l, u in zip([low] * NUM_OF_PARAMS, [up] * NUM_OF_PARAMS)]
44 |
45 | # create an operator that randomly returns a float in the desired range:
46 | toolbox.register("attrFloat", randomFloat, BOUNDS_LOW, BOUNDS_HIGH)
47 |
48 | # create an operator that fills up an Individual instance:
49 | toolbox.register("individualCreator",
50 | tools.initIterate,
51 | creator.Individual,
52 | toolbox.attrFloat)
53 |
54 | # create an operator that generates a list of individuals:
55 | toolbox.register("populationCreator",
56 | tools.initRepeat,
57 | list,
58 | toolbox.individualCreator)
59 |
60 |
61 | # fitness calculation using the CrtPole class:
62 | def score(individual):
63 | return cartPole.getScore(individual),
64 |
65 |
66 | toolbox.register("evaluate", score)
67 |
68 |
69 | # genetic operators:
70 | toolbox.register("select", tools.selTournament, tournsize=2)
71 |
72 | toolbox.register("mate",
73 | tools.cxSimulatedBinaryBounded,
74 | low=BOUNDS_LOW,
75 | up=BOUNDS_HIGH,
76 | eta=CROWDING_FACTOR)
77 |
78 | toolbox.register("mutate",
79 | tools.mutPolynomialBounded,
80 | low=BOUNDS_LOW,
81 | up=BOUNDS_HIGH,
82 | eta=CROWDING_FACTOR,
83 | indpb=1.0/NUM_OF_PARAMS)
84 |
85 |
86 | # Genetic Algorithm flow:
87 | def main():
88 |
89 | # create initial population (generation 0):
90 | population = toolbox.populationCreator(n=POPULATION_SIZE)
91 |
92 | # prepare the statistics object:
93 | stats = tools.Statistics(lambda ind: ind.fitness.values)
94 | stats.register("max", numpy.max)
95 | stats.register("avg", numpy.mean)
96 |
97 | # define the hall-of-fame object:
98 | hof = tools.HallOfFame(HALL_OF_FAME_SIZE)
99 |
100 | # perform the Genetic Algorithm flow with hof feature added:
101 | population, logbook = elitism.eaSimpleWithElitism(population,
102 | toolbox,
103 | cxpb=P_CROSSOVER,
104 | mutpb=P_MUTATION,
105 | ngen=MAX_GENERATIONS,
106 | stats=stats,
107 | halloffame=hof,
108 | verbose=True)
109 |
110 | # print best solution found:
111 | best = hof.items[0]
112 | print()
113 | print("Best Solution = ", best)
114 | print("Best Score = ", best.fitness.values[0])
115 | print()
116 |
117 | # save best solution for a replay:
118 | cartPole.saveParams(best)
119 |
120 | # find average score of 100 episodes using the best solution found:
121 | print("Running 100 episodes using the best solution...")
122 | scores = []
123 | for test in range(100):
124 | scores.append(cart_pole.CartPole().getScore(best))
125 | print("scores = ", scores)
126 | print("Avg. score = ", sum(scores) / len(scores))
127 |
128 |
129 | if __name__ == "__main__":
130 | main()
--------------------------------------------------------------------------------
/Chapter10/cart_pole.py:
--------------------------------------------------------------------------------
1 | import gym
2 | import time
3 |
4 | import numpy as np
5 | import pickle
6 |
7 | from sklearn.neural_network import MLPRegressor
8 |
9 | from sklearn.exceptions import ConvergenceWarning
10 | from sklearn.utils.testing import ignore_warnings
11 |
12 | INPUTS = 4
13 | HIDDEN_LAYER = 4
14 | OUTPUTS = 1
15 |
16 | class CartPole:
17 |
18 | def __init__(self, randomSeed=None):
19 |
20 | self.env = gym.make('CartPole-v1')
21 |
22 | if randomSeed is not None:
23 | self.env.seed(randomSeed)
24 |
25 | def __len__(self):
26 | return INPUTS * HIDDEN_LAYER + HIDDEN_LAYER * OUTPUTS + HIDDEN_LAYER + OUTPUTS
27 |
28 | @ignore_warnings(category=ConvergenceWarning)
29 | def initMlp(self, netParams):
30 | """
31 | initializes a MultiLayer Perceptron (MLP) Regressor with the desired network architecture (layers)
32 | and network parameters (weights and biases).
33 | :param netParams: a list of floats representing the network parameters (weights and biases) of the MLP
34 | :return: initialized MLP Regressor
35 | """
36 |
37 | # create the initial MLP:
38 | mlp = MLPRegressor(hidden_layer_sizes=(HIDDEN_LAYER,), max_iter=1)
39 |
40 | # This will initialize input and output layers, and nodes weights and biases:
41 | # we are not otherwise interested in training the MLP here, hence the settings max_iter=1 above
42 | mlp.fit(np.random.uniform(low=-1, high=1, size=INPUTS).reshape(1, -1), np.ones(OUTPUTS))
43 |
44 | # weights are represented as a list of 2 ndarrays:
45 | # - hidden layer weights: INPUTS x HIDDEN_LAYER
46 | # - output layer weights: HIDDEN_LAYER x OUTPUTS
47 | numWeights = INPUTS * HIDDEN_LAYER + HIDDEN_LAYER * OUTPUTS
48 | weights = np.array(netParams[:numWeights])
49 | mlp.coefs_ = [
50 | weights[0:INPUTS * HIDDEN_LAYER].reshape((INPUTS, HIDDEN_LAYER)),
51 | weights[INPUTS * HIDDEN_LAYER:].reshape((HIDDEN_LAYER, OUTPUTS))
52 | ]
53 |
54 | # biases are represented as a list of 2 ndarrays:
55 | # - hidden layer biases: HIDDEN_LAYER x 1
56 | # - output layer biases: OUTPUTS x 1
57 | biases = np.array(netParams[numWeights:])
58 | mlp.intercepts_ = [biases[:HIDDEN_LAYER], biases[HIDDEN_LAYER:]]
59 |
60 | return mlp
61 |
62 | def getScore(self, netParams):
63 | """
64 | calculates the score of a given solution, represented by the list of float-valued network parameters,
65 | by creating a corresponding MLP Regressor, initiating an episode of the Cart-Pole environment and
66 | running it with the MLP controlling the actions, while using the observations as inputs.
67 | Higher score is better.
68 | :param netParams: a list of floats representing the network parameters (weights and biases) of the MLP
69 | :return: the calculated score value
70 | """
71 |
72 | mlp = self.initMlp(netParams)
73 |
74 | self.env.reset()
75 |
76 | actionCounter = 0
77 | totalReward = 0
78 | observation = self.env.reset()
79 | action = int(mlp.predict(observation.reshape(1, -1)) > 0)
80 |
81 | while True:
82 | actionCounter += 1
83 | observation, reward, done, info = self.env.step(action)
84 | totalReward += reward
85 |
86 | if done:
87 | break
88 | else:
89 | action = int(mlp.predict(observation.reshape(1, -1)) > 0)
90 | #print(action)
91 |
92 | return totalReward
93 |
94 | def saveParams(self, netParams):
95 | """
96 | serializes and saves a list of network parameters using pickle
97 | :param netParams: a list of floats representing the network parameters (weights and biases) of the MLP
98 | """
99 | savedParams = []
100 | for param in netParams:
101 | savedParams.append(param)
102 |
103 | pickle.dump(savedParams, open("cart-pole-data.pickle", "wb"))
104 |
105 | def replayWithSavedParams(self):
106 | """
107 | deserializes a saved list of network parameters and uses it to replay an episode
108 | """
109 | savedParams = pickle.load(open("cart-pole-data.pickle", "rb"))
110 | self.replay(savedParams)
111 |
112 | def replay(self, netParams):
113 | """
114 | renders the environment and uses the given network parameters to replay an episode, to visualize a given solution
115 | :param netParams: a list of floats representing the network parameters (weights and biases) of the MLP
116 | """
117 | mlp = self.initMlp(netParams)
118 |
119 | self.env.render()
120 |
121 | actionCounter = 0
122 | totalReward = 0
123 | observation = self.env.reset()
124 | action = int(mlp.predict(observation.reshape(1, -1)) > 0)
125 |
126 | while True:
127 | actionCounter += 1
128 | self.env.render()
129 | observation, reward, done, info = self.env.step(action)
130 | totalReward += reward
131 |
132 | print(actionCounter, ": --------------------------")
133 | print("action = ", action)
134 | print("observation = ", observation)
135 | print("reward = ", reward)
136 | print("totalReward = ", totalReward)
137 | print("done = ", done)
138 | print()
139 |
140 | if done:
141 | break
142 | else:
143 | time.sleep(0.03)
144 | action = int(mlp.predict(observation.reshape(1, -1)) > 0)
145 |
146 | self.env.close()
147 |
148 |
149 | def main():
150 | cart = CartPole()
151 | cart.replayWithSavedParams()
152 |
153 | exit()
154 |
155 |
156 | if __name__ == '__main__':
157 | main()
--------------------------------------------------------------------------------
/Chapter10/elitism.py:
--------------------------------------------------------------------------------
1 | from deap import tools
2 | from deap import algorithms
3 |
4 | def eaSimpleWithElitism(population, toolbox, cxpb, mutpb, ngen, stats=None,
5 | halloffame=None, verbose=__debug__):
6 | """This algorithm is similar to DEAP eaSimple() algorithm, with the modification that
7 | halloffame is used to implement an elitism mechanism. The individuals contained in the
8 | halloffame are directly injected into the next generation and are not subject to the
9 | genetic operators of selection, crossover and mutation.
10 | """
11 | logbook = tools.Logbook()
12 | logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])
13 |
14 | # Evaluate the individuals with an invalid fitness
15 | invalid_ind = [ind for ind in population if not ind.fitness.valid]
16 | fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
17 | for ind, fit in zip(invalid_ind, fitnesses):
18 | ind.fitness.values = fit
19 |
20 | if halloffame is None:
21 | raise ValueError("halloffame parameter must not be empty!")
22 |
23 | halloffame.update(population)
24 | hof_size = len(halloffame.items) if halloffame.items else 0
25 |
26 | record = stats.compile(population) if stats else {}
27 | logbook.record(gen=0, nevals=len(invalid_ind), **record)
28 | if verbose:
29 | print(logbook.stream)
30 |
31 | # Begin the generational process
32 | for gen in range(1, ngen + 1):
33 |
34 | # Select the next generation individuals
35 | offspring = toolbox.select(population, len(population) - hof_size)
36 |
37 | # Vary the pool of individuals
38 | offspring = algorithms.varAnd(offspring, toolbox, cxpb, mutpb)
39 |
40 | # Evaluate the individuals with an invalid fitness
41 | invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
42 | fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
43 | for ind, fit in zip(invalid_ind, fitnesses):
44 | ind.fitness.values = fit
45 |
46 | # add the best back to population:
47 | offspring.extend(halloffame.items)
48 |
49 | # Update the hall of fame with the generated individuals
50 | halloffame.update(offspring)
51 |
52 | # Replace the current population by the offspring
53 | population[:] = offspring
54 |
55 | # Append the current generation statistics to the logbook
56 | record = stats.compile(population) if stats else {}
57 | logbook.record(gen=gen, nevals=len(invalid_ind), **record)
58 | if verbose:
59 | print(logbook.stream)
60 |
61 | return population, logbook
62 |
63 |
--------------------------------------------------------------------------------
/Chapter10/mountain_car.py:
--------------------------------------------------------------------------------
1 | import gym
2 | import time
3 | import pickle
4 |
5 | MAX_STEPS = 200
6 | FLAG_LOCATION = 0.5
7 |
8 | class MountainCar:
9 |
10 | def __init__(self, randomSeed):
11 |
12 | self.env = gym.make('MountainCar-v0')
13 | self.env.seed(randomSeed)
14 |
15 | def __len__(self):
16 | return MAX_STEPS
17 |
18 | def getScore(self, actions):
19 | """
20 | calculates the score of a given solution, represented by the list of actions, for the Moutain-Car environment,
21 | by initiating an episode of the Mountain-Car environment and running it with the provided actions.
22 | Lower score is better.
23 | :param actions: a list of actions (values 0, 1, or 2) to be fed into the mountain cart environment
24 | :return: the calculated score value
25 | """
26 |
27 | # start a new episode:
28 | self.env.reset()
29 |
30 | actionCounter = 0
31 |
32 | # feed the actions to the environment:
33 | for action in actions:
34 | actionCounter += 1
35 |
36 | # provide an action and get feedback:
37 | observation, reward, done, info = self.env.step(action)
38 |
39 | # episode over - either the car hit the flag, or 200 actions processed:
40 | if done:
41 | break
42 |
43 | # evaluate the results to produce the score:
44 | if actionCounter < MAX_STEPS:
45 | # the car hit the flag:
46 | # start from a score of 0
47 | # reward further for a smaller amount of steps
48 | score = 0 - (MAX_STEPS - actionCounter)/MAX_STEPS
49 | else:
50 | # the car did not hit the flag:
51 | # reward according to distance from flag
52 | score = abs(observation[0] - FLAG_LOCATION) # we want to minimize that
53 |
54 | return score
55 |
56 | def saveActions(self, actions):
57 | """
58 | serializes and saves a list of actions using pickle
59 | :param actions: a list of actions (values 0, 1, or 2) to be fed into the mountain cart environment
60 | """
61 | savedActions = []
62 | for action in actions:
63 | savedActions.append(action)
64 |
65 | pickle.dump(savedActions, open("mountain-car-data.pickle", "wb"))
66 |
67 | def replaySavedActions(self):
68 | """
69 | deserializes a saved list of actions and replays it
70 | """
71 | savedActions = pickle.load(open("mountain-car-data.pickle", "rb"))
72 | self.replay(savedActions)
73 |
74 | def replay(self, actions):
75 | """
76 | renders the environment and replays list of actions into it, to visualize a given solution
77 | :param actions: a list of actions (values 0, 1, or 2) to be fed into the mountain cart environment
78 | """
79 | # start a new episode:
80 | observation = self.env.reset()
81 |
82 | # start rendering:
83 | self.env.render()
84 |
85 | actionCounter = 0
86 |
87 | # replay the given actions by feeding them into the environment:
88 | for action in actions:
89 |
90 | actionCounter += 1
91 | self.env.render()
92 | observation, reward, done, info = self.env.step(action)
93 | print(actionCounter, ": --------------------------")
94 | print("action = ", action)
95 | print("observation = ", observation)
96 | print("distance from flag = ", abs(observation[0] - 0.5))
97 | print()
98 |
99 | if done:
100 | break
101 | else:
102 | time.sleep(0.02)
103 |
104 | self.env.close()
105 |
106 |
107 | def main():
108 |
109 | RANDOM_SEED = 42
110 | car = MountainCar(RANDOM_SEED)
111 | car.replaySavedActions()
112 |
113 | if __name__ == '__main__':
114 | main()
--------------------------------------------------------------------------------
/Chapter11/01-reconstruct-with-polygons.py:
--------------------------------------------------------------------------------
1 | from deap import base
2 | from deap import creator
3 | from deap import tools
4 |
5 | import random
6 | import numpy
7 | import os
8 |
9 | import image_test
10 | import elitism_callback
11 |
12 | import matplotlib.pyplot as plt
13 | import seaborn as sns
14 |
15 | # problem related constants
16 | POLYGON_SIZE = 3
17 | NUM_OF_POLYGONS = 100
18 |
19 | # calculate total number of params in chromosome:
20 | # For each polygon we have:
21 | # two coordinates per vertex, 3 color values, one alpha value
22 | NUM_OF_PARAMS = NUM_OF_POLYGONS * (POLYGON_SIZE * 2 + 4)
23 |
24 | # Genetic Algorithm constants:
25 | POPULATION_SIZE = 200
26 | P_CROSSOVER = 0.9 # probability for crossover
27 | P_MUTATION = 0.5 # probability for mutating an individual
28 | MAX_GENERATIONS = 5000
29 | HALL_OF_FAME_SIZE = 20
30 | CROWDING_FACTOR = 10.0 # crowding factor for crossover and mutation
31 |
32 | # set the random seed:
33 | RANDOM_SEED = 42
34 | random.seed(RANDOM_SEED)
35 |
36 | # create the image test class instance:
37 | imageTest = image_test.ImageTest("images/Mona_Lisa_head.png", POLYGON_SIZE)
38 |
39 | # calculate total number of params in chromosome:
40 | # For each polygon we have:
41 | # two coordinates per vertex, 3 color values, one alpha value
42 | NUM_OF_PARAMS = NUM_OF_POLYGONS * (POLYGON_SIZE * 2 + 4)
43 |
44 | # all parameter values are bound between 0 and 1, later to be expanded:
45 | BOUNDS_LOW, BOUNDS_HIGH = 0.0, 1.0 # boundaries for all dimensions
46 |
47 | toolbox = base.Toolbox()
48 |
49 | # define a single objective, minimizing fitness strategy:
50 | creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
51 |
52 | # create the Individual class based on list:
53 | creator.create("Individual", list, fitness=creator.FitnessMin)
54 |
55 | # helper function for creating random real numbers uniformly distributed within a given range [low, up]
56 | # it assumes that the range is the same for every dimension
57 | def randomFloat(low, up):
58 | return [random.uniform(l, u) for l, u in zip([low] * NUM_OF_PARAMS, [up] * NUM_OF_PARAMS)]
59 |
60 | # create an operator that randomly returns a float in the desired range:
61 | toolbox.register("attrFloat", randomFloat, BOUNDS_LOW, BOUNDS_HIGH)
62 |
63 | # create an operator that fills up an Individual instance:
64 | toolbox.register("individualCreator",
65 | tools.initIterate,
66 | creator.Individual,
67 | toolbox.attrFloat)
68 |
69 | # create an operator that generates a list of individuals:
70 | toolbox.register("populationCreator",
71 | tools.initRepeat,
72 | list,
73 | toolbox.individualCreator)
74 |
75 |
76 | # fitness calculation using MSE as difference metric:
77 | def getDiff(individual):
78 | return imageTest.getDifference(individual, "MSE"),
79 | #return imageTest.getDifference(individual, "SSIM"),
80 |
81 | toolbox.register("evaluate", getDiff)
82 |
83 |
84 | # genetic operators:
85 | toolbox.register("select", tools.selTournament, tournsize=2)
86 |
87 | toolbox.register("mate",
88 | tools.cxSimulatedBinaryBounded,
89 | low=BOUNDS_LOW,
90 | up=BOUNDS_HIGH,
91 | eta=CROWDING_FACTOR)
92 |
93 | toolbox.register("mutate",
94 | tools.mutPolynomialBounded,
95 | low=BOUNDS_LOW,
96 | up=BOUNDS_HIGH,
97 | eta=CROWDING_FACTOR,
98 | indpb=1.0/NUM_OF_PARAMS)
99 |
100 |
101 | # save the best current drawing every 100 generations (used as a callback):
102 | def saveImage(gen, polygonData):
103 |
104 | # only every 100 generations:
105 | if gen % 100 == 0:
106 |
107 | # create folder if does not exist:
108 | folder = "images/results/run-{}-{}".format(POLYGON_SIZE, NUM_OF_POLYGONS)
109 | if not os.path.exists(folder):
110 | os.makedirs(folder)
111 |
112 | # save the image in the folder:
113 | imageTest.saveImage(polygonData,
114 | "{}/after-{}-gen.png".format(folder, gen),
115 | "After {} Generations".format(gen))
116 |
117 | # Genetic Algorithm flow:
118 | def main():
119 |
120 | # create initial population (generation 0):
121 | population = toolbox.populationCreator(n=POPULATION_SIZE)
122 |
123 | # prepare the statistics object:
124 | stats = tools.Statistics(lambda ind: ind.fitness.values)
125 | stats.register("min", numpy.min)
126 | stats.register("avg", numpy.mean)
127 |
128 | # define the hall-of-fame object:
129 | hof = tools.HallOfFame(HALL_OF_FAME_SIZE)
130 |
131 |
132 | # perform the Genetic Algorithm flow with elitism and 'saveImage' callback:
133 | population, logbook = elitism_callback.eaSimpleWithElitismAndCallback(population,
134 | toolbox,
135 | cxpb=P_CROSSOVER,
136 | mutpb=P_MUTATION,
137 | ngen=MAX_GENERATIONS,
138 | callback=saveImage,
139 | stats=stats,
140 | halloffame=hof,
141 | verbose=True)
142 |
143 | # print best solution found:
144 | best = hof.items[0]
145 | print()
146 | print("Best Solution = ", best)
147 | print("Best Score = ", best.fitness.values[0])
148 | print()
149 |
150 | # draw best image next to reference image:
151 | imageTest.plotImages(imageTest.polygonDataToImage(best))
152 |
153 | # extract statistics:
154 | minFitnessValues, meanFitnessValues = logbook.select("min", "avg")
155 |
156 | # plot statistics:
157 | sns.set_style("whitegrid")
158 | plt.figure("Stats:")
159 | plt.plot(minFitnessValues, color='red')
160 | plt.plot(meanFitnessValues, color='green')
161 | plt.xlabel('Generation')
162 | plt.ylabel('Min / Average Fitness')
163 | plt.title('Min and Average fitness over Generations')
164 |
165 | # show both plots:
166 | plt.show()
167 |
168 | if __name__ == "__main__":
169 | main()
170 |
--------------------------------------------------------------------------------
/Chapter11/elitism_callback.py:
--------------------------------------------------------------------------------
1 | from deap import tools
2 | from deap import algorithms
3 |
4 | def eaSimpleWithElitismAndCallback(population, toolbox, cxpb, mutpb, ngen, callback=None, stats=None,
5 | halloffame=None, verbose=__debug__):
6 | """This algorithm is similar to DEAP eaSimple() algorithm, with two additions:
7 | 1. halloffame is used to implement an elitism mechanism. The individuals contained in the
8 | halloffame are directly injected into the next generation and are not subject to the
9 | genetic operators of selection, crossover and mutation.
10 | 2. a callback argument was added. It represents an external function that will be called after
11 | each iteration, passing the current generation number and the current best individual as arguments
12 | """
13 | logbook = tools.Logbook()
14 | logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])
15 |
16 | # Evaluate the individuals with an invalid fitness
17 | invalid_ind = [ind for ind in population if not ind.fitness.valid]
18 | fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
19 | for ind, fit in zip(invalid_ind, fitnesses):
20 | ind.fitness.values = fit
21 |
22 | if halloffame is None:
23 | raise ValueError("halloffame parameter must not be empty!")
24 |
25 | halloffame.update(population)
26 | hof_size = len(halloffame.items) if halloffame.items else 0
27 |
28 | record = stats.compile(population) if stats else {}
29 | logbook.record(gen=0, nevals=len(invalid_ind), **record)
30 | if verbose:
31 | print(logbook.stream)
32 |
33 | # Begin the generational process
34 | for gen in range(1, ngen + 1):
35 |
36 | # Select the next generation individuals
37 | offspring = toolbox.select(population, len(population) - hof_size)
38 |
39 | # Vary the pool of individuals
40 | offspring = algorithms.varAnd(offspring, toolbox, cxpb, mutpb)
41 |
42 | # Evaluate the individuals with an invalid fitness
43 | invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
44 | fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
45 | for ind, fit in zip(invalid_ind, fitnesses):
46 | ind.fitness.values = fit
47 |
48 | # add the best back to population:
49 | offspring.extend(halloffame.items)
50 |
51 | # Update the hall of fame with the generated individuals
52 | halloffame.update(offspring)
53 |
54 | # Replace the current population by the offspring
55 | population[:] = offspring
56 |
57 | # Append the current generation statistics to the logbook
58 | record = stats.compile(population) if stats else {}
59 | logbook.record(gen=gen, nevals=len(invalid_ind), **record)
60 | if verbose:
61 | print(logbook.stream)
62 |
63 | if callback:
64 | callback(gen, halloffame.items[0])
65 |
66 | return population, logbook
67 |
68 |
--------------------------------------------------------------------------------
/Chapter11/image_test.py:
--------------------------------------------------------------------------------
1 | from PIL import Image, ImageDraw
2 | import numpy as np
3 | from skimage.metrics import structural_similarity
4 | import cv2
5 | import matplotlib.pyplot as plt
6 |
7 | MAX_STEPS = 200
8 | FLAG_LOCATION = 0.5
9 |
10 | class ImageTest:
11 |
12 | def __init__(self, imagePath, polygonSize):
13 | """
14 | Initializes an instance of the class
15 | :param imagePath: the path of the file containing the reference image
16 | :param polygonSize: the number of vertices on the polygons used to recreate the image
17 | """
18 | self.refImage = Image.open(imagePath)
19 | self.polygonSize = polygonSize
20 |
21 | self.width, self.height = self.refImage.size
22 | self.numPixels = self.width * self.height
23 | self.refImageCv2 = self.toCv2(self.refImage)
24 |
25 | def polygonDataToImage(self, polygonData):
26 | """
27 | accepts polygon data and creates an image containing these polygons.
28 | :param polygonData: a list of polygon parameters. Each item in the list
29 | represents the vertices locations, color and transparency of the corresponding polygon
30 | :return: the image containing the polygons (Pillow format)
31 | """
32 |
33 | # start with a new image:
34 | image = Image.new('RGB', (self.width, self.height))#TODO
35 | draw = ImageDraw.Draw(image, 'RGBA')
36 |
37 | # divide the polygonData to chunks, each containing the data for a single polygon:
38 | chunkSize = self.polygonSize * 2 + 4 # (x,y) per vertex + (RGBA)
39 | polygons = self.list2Chunks(polygonData, chunkSize)
40 |
41 | # iterate over all polygons and draw each of them into the image:
42 | for poly in polygons:
43 | index = 0
44 |
45 | # extract the vertices of the current polygon:
46 | vertices = []
47 | for vertex in range(self.polygonSize):
48 | vertices.append((int(poly[index] * self.width), int(poly[index + 1] * self.height)))
49 | index += 2
50 |
51 | # extract the RGB and alpha values of the current polygon:
52 | red = int(poly[index] * 255)
53 | green = int(poly[index + 1] * 255)
54 | blue = int(poly[index + 2] * 255)
55 | alpha = int(poly[index + 3] * 255)
56 |
57 | # draw the polygon into the image:
58 | draw.polygon(vertices, (red, green, blue, alpha))
59 |
60 | # cleanup:
61 | del draw
62 |
63 | return image
64 |
65 | def getDifference(self, polygonData, method="MSE"):
66 | """
67 | accepts polygon data, creates an image containing these polygons, and calculates the difference
68 | between this image and the reference image using one of two methods.
69 | :param polygonData: a list of polygon parameters. Each item in the list
70 | represents the vertices locations, color and transparency of the corresponding polygon
71 | :param method: base method of calculating the difference ("MSE" or "SSIM").
72 | larger return value always means larger difference
73 | :return: the calculated difference between the image containg the polygons and the reference image
74 | """
75 |
76 | # create the image containing the polygons:
77 | image = self.polygonDataToImage(polygonData)
78 |
79 | if method == "MSE":
80 | return self.getMse(image)
81 | else:
82 | return 1.0 - self.getSsim(image)
83 |
84 | def plotImages(self, image, header=None):
85 | """
86 | creates a 'side-by-side' plot of the given image next to the reference image
87 | :param image: image to be drawn next to reference image (Pillow format)
88 | :param header: text used as a header for the plot
89 | """
90 |
91 | fig = plt.figure("Image Comparison:")
92 | if header:
93 | plt.suptitle(header)
94 |
95 | # plot the reference image on the left:
96 | ax = fig.add_subplot(1, 2, 1)
97 | plt.imshow(self.refImage)
98 | self.ticksOff(plt)
99 |
100 | # plot the given image on the right:
101 | fig.add_subplot(1, 2, 2)
102 | plt.imshow(image)
103 | self.ticksOff(plt)
104 |
105 | return plt
106 |
107 | def saveImage(self, polygonData, imageFilePath, header=None):
108 | """
109 | accepts polygon data, creates an image containing these polygons,
110 | creates a 'side-by-side' plot of this image next to the reference image,
111 | and saves the plot to a file
112 | :param polygonData: a list of polygon parameters. Each item in the list
113 | represents the vertices locations, color and transparency of the corresponding polygon
114 | :param imageFilePath: path of file to be used to save the plot to
115 | :param header: text used as a header for the plot
116 | """
117 |
118 | # create an image from th epolygon data:
119 | image = self.polygonDataToImage(polygonData)
120 |
121 | # plot the image side-by-side with the reference image:
122 | self.plotImages(image, header)
123 |
124 | # save the plot to file:
125 | plt.savefig(imageFilePath)
126 |
127 | # utility methods:
128 |
129 | def toCv2(self, pil_image):
130 | """converts the given Pillow image to CV2 format"""
131 | return cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)
132 |
133 | def getMse(self, image):
134 | """calculates MSE of difference between the given image and the reference image"""
135 | return np.sum((self.toCv2(image).astype("float") - self.refImageCv2.astype("float")) ** 2)/float(self.numPixels)
136 |
137 | def getSsim(self, image):
138 | """calculates mean structural similarity index between the given image and the reference image"""
139 | return structural_similarity(self.toCv2(image), self.refImageCv2, multichannel=True)
140 |
141 | def list2Chunks(self, list, chunkSize):
142 | """divides a given list to fixed size chunks, returns a generator iterator"""
143 | for chunk in range(0, len(list), chunkSize):
144 | yield(list[chunk:chunk + chunkSize])
145 |
146 | def ticksOff(self, plot):#TODO
147 | """turns off ticks on both axes"""
148 | plt.tick_params(
149 | axis='both',
150 | which='both',
151 | bottom=False,
152 | left=False,
153 | top=False,
154 | right=False,
155 | labelbottom=False,
156 | labelleft=False,
157 | )
158 |
--------------------------------------------------------------------------------
/Chapter11/images/Mona_Lisa_head.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Hands-On-Genetic-Algorithms-with-Python/cc5a1b8711a3e6cc6d3835335f2d09b8822e97f5/Chapter11/images/Mona_Lisa_head.png
--------------------------------------------------------------------------------
/Chapter12/01-gp-even-parity.py:
--------------------------------------------------------------------------------
1 | import random
2 | import operator
3 |
4 | import numpy as np
5 |
6 | from deap import base
7 | from deap import creator
8 | from deap import tools
9 | from deap import gp
10 |
11 | import itertools
12 | import matplotlib.pyplot as plt
13 | import networkx as nx
14 |
15 | import elitism
16 |
17 | # problem constants:
18 | NUM_INPUTS = 3
19 | NUM_COMBINATIONS = 2 ** NUM_INPUTS
20 |
21 | # Genetic Algorithm constants:
22 | POPULATION_SIZE = 60
23 | P_CROSSOVER = 0.9
24 | P_MUTATION = 0.5
25 | MAX_GENERATIONS = 20
26 | HALL_OF_FAME_SIZE = 10
27 |
28 | # Genetic Programming specific constans:
29 | MIN_TREE_HEIGHT = 3
30 | MAX_TREE_HEIGHT = 5
31 | LIMIT_TREE_HEIGHT = 17
32 | MUT_MIN_TREE_HEIGHT = 0
33 | MUT_MAX_TREE_HEIGHT = 2
34 |
35 |
36 | # set the random seed:
37 | RANDOM_SEED = 42
38 | random.seed(RANDOM_SEED)
39 |
40 | toolbox = base.Toolbox()
41 |
42 | # calculate the truth table of even parity check:
43 | parityIn = list(itertools.product([0, 1], repeat=NUM_INPUTS))
44 | parityOut = []
45 | for row in parityIn:
46 | parityOut.append(sum(row) % 2)
47 |
48 | # create the primitive set:
49 | primitiveSet = gp.PrimitiveSet("main", NUM_INPUTS, "in_")
50 | primitiveSet.addPrimitive(operator.and_, 2)
51 | primitiveSet.addPrimitive(operator.or_, 2)
52 | primitiveSet.addPrimitive(operator.xor, 2)
53 | primitiveSet.addPrimitive(operator.not_, 1)
54 |
55 | # add terminal values:
56 | primitiveSet.addTerminal(1)
57 | primitiveSet.addTerminal(0)
58 |
59 | # define a single objective, minimizing fitness strategy:
60 | creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
61 |
62 | # create the Individual class based on the primitive tree:
63 | creator.create("Individual", gp.PrimitiveTree, fitness=creator.FitnessMin)
64 |
65 | # create a helper function for creating random trees using the primitive set:
66 | toolbox.register("expr", gp.genFull, pset=primitiveSet, min_=MIN_TREE_HEIGHT, max_=MAX_TREE_HEIGHT)
67 |
68 | # create the individual operator to fill up an Individual instance:
69 | toolbox.register("individualCreator", tools.initIterate, creator.Individual, toolbox.expr)
70 |
71 | # create the population operator to generate a list of individuals:
72 | toolbox.register("populationCreator", tools.initRepeat, list, toolbox.individualCreator)
73 |
74 | # create an operator to compile the primitive tree into python code:
75 | toolbox.register("compile", gp.compile, pset=primitiveSet)
76 |
77 | # calculate the difference between the results of the
78 | # generated function and the expected parity results:
79 | def parityError(individual):
80 | func = toolbox.compile(expr=individual)
81 | return sum(func(*pIn) != pOut for pIn, pOut in zip(parityIn, parityOut))
82 |
83 | # fitness measure:
84 | def getCost(individual):
85 | return parityError(individual), # return a tuple
86 |
87 | toolbox.register("evaluate", getCost)
88 |
89 | # genetic operators:
90 | toolbox.register("select", tools.selTournament, tournsize=2)
91 | toolbox.register("mate", gp.cxOnePoint)
92 | toolbox.register("expr_mut", gp.genGrow, min_=MUT_MIN_TREE_HEIGHT, max_=MUT_MAX_TREE_HEIGHT)
93 | toolbox.register("mutate", gp.mutUniform, expr=toolbox.expr_mut, pset=primitiveSet)
94 |
95 | # bloat control:
96 | toolbox.decorate("mate", gp.staticLimit(key=operator.attrgetter("height"), max_value=LIMIT_TREE_HEIGHT))
97 | toolbox.decorate("mutate", gp.staticLimit(key=operator.attrgetter("height"), max_value=LIMIT_TREE_HEIGHT))
98 |
99 | # Genetic Algorithm flow:
100 | def main():
101 | # create initial population (generation 0):
102 | population = toolbox.populationCreator(n=POPULATION_SIZE)
103 |
104 | # prepare the statistics object:
105 | stats = tools.Statistics(lambda ind: ind.fitness.values)
106 | stats.register("min", np.min)
107 | stats.register("avg", np.mean)
108 |
109 | # define the hall-of-fame object:
110 | hof = tools.HallOfFame(HALL_OF_FAME_SIZE)
111 |
112 | # perform the Genetic Algorithm flow with elitism:
113 | population, logbook = elitism.eaSimpleWithElitism(population,
114 | toolbox,
115 | cxpb=P_CROSSOVER,
116 | mutpb=P_MUTATION,
117 | ngen=MAX_GENERATIONS,
118 | stats=stats,
119 | halloffame=hof,
120 | verbose=True)
121 |
122 | # print info for best solution found:
123 | best = hof.items[0]
124 | print("-- Best Individual = ", best)
125 | print("-- length={}, height={}".format(len(best), best.height))
126 | print("-- Best Fitness = ", best.fitness.values[0])
127 |
128 |
129 | # plot best tree:
130 | nodes, edges, labels = gp.graph(best)
131 | g = nx.Graph()
132 | g.add_nodes_from(nodes)
133 | g.add_edges_from(edges)
134 | pos = nx.spring_layout(g)
135 |
136 | nx.draw_networkx_nodes(g, pos, node_color='cyan')
137 | nx.draw_networkx_nodes(g, pos, nodelist=[0], node_color='red', node_size=400)
138 |
139 | nx.draw_networkx_edges(g, pos)
140 | nx.draw_networkx_labels(g, pos, **{"labels": labels, "font_size": 8})
141 |
142 |
143 | plt.show()
144 |
145 |
146 | if __name__ == "__main__":
147 | main()
--------------------------------------------------------------------------------
/Chapter12/02-gp-even-parity-reduced.py:
--------------------------------------------------------------------------------
1 | import random
2 | import operator
3 |
4 | import numpy as np
5 |
6 | from deap import base
7 | from deap import creator
8 | from deap import tools
9 | from deap import gp
10 |
11 | import itertools
12 | import matplotlib.pyplot as plt
13 | import networkx as nx
14 |
15 | import elitism
16 |
17 | # problem constants:
18 | NUM_INPUTS = 3
19 | NUM_COMBINATIONS = 2 ** NUM_INPUTS
20 |
21 | # Genetic Algorithm constants:
22 | POPULATION_SIZE = 60
23 | P_CROSSOVER = 0.9
24 | P_MUTATION = 0.5
25 | MAX_GENERATIONS = 20
26 | HALL_OF_FAME_SIZE = 10
27 |
28 | # Genetic Programming specific constans:
29 | MIN_TREE_HEIGHT = 3
30 | MAX_TREE_HEIGHT = 5
31 | LIMIT_TREE_HEIGHT = 17
32 | MUT_MIN_TREE_HEIGHT = 0
33 | MUT_MAX_TREE_HEIGHT = 2
34 |
35 |
36 | # set the random seed:
37 | RANDOM_SEED = 42
38 | random.seed(RANDOM_SEED)
39 |
40 | toolbox = base.Toolbox()
41 |
42 | # calculate the truth table of even parity check:
43 | parityIn = list(itertools.product([0, 1], repeat=NUM_INPUTS))
44 | parityOut = []
45 | for row in parityIn:
46 | parityOut.append(sum(row) % 2)
47 |
48 | # create the primitive set:
49 | primitiveSet = gp.PrimitiveSet("main", NUM_INPUTS, "in_")
50 | primitiveSet.addPrimitive(operator.and_, 2)
51 | primitiveSet.addPrimitive(operator.or_, 2)
52 | primitiveSet.addPrimitive(operator.xor, 2)
53 | primitiveSet.addPrimitive(operator.not_, 1)
54 |
55 | # add terminal values:
56 | primitiveSet.addTerminal(1)
57 | primitiveSet.addTerminal(0)
58 |
59 | # define a single objective, minimizing fitness strategy:
60 | creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
61 |
62 | # create the Individual class based on the primitive tree:
63 | creator.create("Individual", gp.PrimitiveTree, fitness=creator.FitnessMin)
64 |
65 | # create a helper function for creating random trees using the primitive set:
66 | toolbox.register("expr", gp.genFull, pset=primitiveSet, min_=MIN_TREE_HEIGHT, max_=MAX_TREE_HEIGHT)
67 |
68 | # create the individual operator to fill up an Individual instance:
69 | toolbox.register("individualCreator", tools.initIterate, creator.Individual, toolbox.expr)
70 |
71 | # create the population operator to generate a list of individuals:
72 | toolbox.register("populationCreator", tools.initRepeat, list, toolbox.individualCreator)
73 |
74 | # create an operator to compile the primitive tree into python code:
75 | toolbox.register("compile", gp.compile, pset=primitiveSet)
76 |
77 | # calculate the difference between the results of the
78 | # generated function and the expected parity results:
79 | def parityError(individual):
80 | func = toolbox.compile(expr=individual)
81 | return sum(func(*pIn) != pOut for pIn, pOut in zip(parityIn, parityOut))
82 |
83 | # fitness measure:
84 | def getCost(individual):
85 | return parityError(individual) + individual.height / 100, # return a tuple
86 |
87 | toolbox.register("evaluate", getCost)
88 |
89 | # genetic operators:
90 | toolbox.register("select", tools.selTournament, tournsize=2)
91 | toolbox.register("mate", gp.cxOnePoint)
92 | toolbox.register("expr_mut", gp.genGrow, min_=MUT_MIN_TREE_HEIGHT, max_=MUT_MAX_TREE_HEIGHT)
93 | toolbox.register("mutate", gp.mutUniform, expr=toolbox.expr_mut, pset=primitiveSet)
94 |
95 | # bloat control:
96 | toolbox.decorate("mate", gp.staticLimit(key=operator.attrgetter("height"), max_value=LIMIT_TREE_HEIGHT))
97 | toolbox.decorate("mutate", gp.staticLimit(key=operator.attrgetter("height"), max_value=LIMIT_TREE_HEIGHT))
98 |
99 | # Genetic Algorithm flow:
100 | def main():
101 | # create initial population (generation 0):
102 | population = toolbox.populationCreator(n=POPULATION_SIZE)
103 |
104 | # prepare the statistics object:
105 | stats = tools.Statistics(lambda ind: ind.fitness.values)
106 | stats.register("min", np.min)
107 | stats.register("avg", np.mean)
108 |
109 | # define the hall-of-fame object:
110 | hof = tools.HallOfFame(HALL_OF_FAME_SIZE)
111 |
112 | # perform the Genetic Algorithm flow with elitism:
113 | population, logbook = elitism.eaSimpleWithElitism(population,
114 | toolbox,
115 | cxpb=P_CROSSOVER,
116 | mutpb=P_MUTATION,
117 | ngen=MAX_GENERATIONS,
118 | stats=stats,
119 | halloffame=hof,
120 | verbose=True)
121 |
122 | # print info for best solution found:
123 | best = hof.items[0]
124 | print("-- Best Individual = ", best)
125 | print("-- length={}, height={}".format(len(best), best.height))
126 | print("-- Best Fitness = ", best.fitness.values[0])
127 | print("-- Best Parity Error = ", parityError(best))
128 |
129 |
130 | # plot best tree:
131 | nodes, edges, labels = gp.graph(best)
132 | g = nx.Graph()
133 | g.add_nodes_from(nodes)
134 | g.add_edges_from(edges)
135 | pos = nx.spring_layout(g)
136 |
137 | nx.draw_networkx_nodes(g, pos, node_color='cyan')
138 | nx.draw_networkx_nodes(g, pos, nodelist=[0], node_color='red', node_size=400)
139 |
140 | nx.draw_networkx_edges(g, pos)
141 | nx.draw_networkx_labels(g, pos, **{"labels": labels, "font_size": 8})
142 |
143 | plt.show()
144 |
145 |
146 | if __name__ == "__main__":
147 | main()
--------------------------------------------------------------------------------
/Chapter12/03-pso-himmelblau.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from deap import base
4 | from deap import creator
5 | from deap import tools
6 |
7 | # constants:
8 | DIMENSIONS = 2
9 | POPULATION_SIZE = 20
10 | MAX_GENERATIONS = 500
11 | MIN_START_POSITION, MAX_START_POSITION = -5, 5
12 | MIN_SPEED, MAX_SPEED = -3, 3
13 | MAX_LOCAL_UPDATE_FACTOR = MAX_GLOBAL_UPDATE_FACTOR = 2.0
14 |
15 | # set the random seed:
16 | RANDOM_SEED = 42
17 | np.random.seed(RANDOM_SEED)
18 |
19 | toolbox = base.Toolbox()
20 |
21 | # define a single objective, minimizing fitness strategy:
22 | creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
23 |
24 | # define the particle class based on ndarray:
25 | creator.create("Particle", np.ndarray, fitness=creator.FitnessMin, speed=None, best=None)
26 |
27 | # create and initialize a new particle:
28 | def createParticle():
29 | particle = creator.Particle(np.random.uniform(MIN_START_POSITION,
30 | MAX_START_POSITION,
31 | DIMENSIONS))
32 | particle.speed = np.random.uniform(MIN_SPEED, MAX_SPEED, DIMENSIONS)
33 | return particle
34 |
35 | # create the 'particleCreator' operator to fill up a particle instance:
36 | toolbox.register("particleCreator", createParticle)
37 |
38 |
39 | # create the 'population' operator to generate a list of particles:
40 | toolbox.register("populationCreator", tools.initRepeat, list, toolbox.particleCreator)
41 |
42 |
43 | def updateParticle(particle, best):
44 |
45 | # create random factors:
46 | localUpdateFactor = np.random.uniform(0, MAX_LOCAL_UPDATE_FACTOR, particle.size)
47 | globalUpdateFactor = np.random.uniform(0, MAX_GLOBAL_UPDATE_FACTOR, particle.size)
48 |
49 | # calculate local and global speed updates:
50 | localSpeedUpdate = localUpdateFactor * (particle.best - particle)
51 | globalSpeedUpdate = globalUpdateFactor * (best - particle)
52 |
53 | # scalculate updated speed:
54 | particle.speed = particle.speed + (localSpeedUpdate + globalSpeedUpdate)
55 |
56 | # enforce limits on the updated speed:
57 | particle.speed = np.clip(particle.speed, MIN_SPEED, MAX_SPEED)
58 |
59 | # replace particle position with old-position + speed:
60 | particle[:] = particle + particle.speed
61 |
62 |
63 | toolbox.register("update", updateParticle)
64 |
65 |
66 | # Himmelblau function:
67 | def himmelblau(particle):
68 | x = particle[0]
69 | y = particle[1]
70 | f = (x ** 2 + y - 11) ** 2 + (x + y ** 2 - 7) ** 2
71 | return f, # return a tuple
72 |
73 |
74 | toolbox.register("evaluate", himmelblau)
75 |
76 |
77 | def main():
78 | # create the population of particle population:
79 | population = toolbox.populationCreator(n=POPULATION_SIZE)
80 |
81 | # prepare the statistics object:
82 | stats = tools.Statistics(lambda ind: ind.fitness.values)
83 | stats.register("min", np.min)
84 | stats.register("avg", np.mean)
85 |
86 | logbook = tools.Logbook()
87 | logbook.header = ["gen", "evals"] + stats.fields
88 |
89 | best = None
90 |
91 | for generation in range(MAX_GENERATIONS):
92 |
93 | # evaluate all particles in polulation:
94 | for particle in population:
95 |
96 | # find the fitness of the particle:
97 | particle.fitness.values = toolbox.evaluate(particle)
98 |
99 | # particle best needs to be updated:
100 | if particle.best is None or particle.best.size == 0 or particle.best.fitness < particle.fitness:
101 | particle.best = creator.Particle(particle)
102 | particle.best.fitness.values = particle.fitness.values
103 |
104 | # global best needs to be updated:
105 | if best is None or best.size == 0 or best.fitness < particle.fitness:
106 | best = creator.Particle(particle)
107 | best.fitness.values = particle.fitness.values
108 |
109 | # update each particle's speed and position:
110 | for particle in population:
111 | toolbox.update(particle, best)
112 |
113 | # record the statistics for the current generation and print it:
114 | logbook.record(gen=generation, evals=len(population), **stats.compile(population))
115 | print(logbook.stream)
116 |
117 | # print info for best solution found:
118 | print("-- Best Particle = ", best)
119 | print("-- Best Fitness = ", best.fitness.values[0])
120 |
121 |
122 | if __name__ == "__main__":
123 | main()
--------------------------------------------------------------------------------
/Chapter12/elitism.py:
--------------------------------------------------------------------------------
1 | from deap import tools
2 | from deap import algorithms
3 |
4 | def eaSimpleWithElitism(population, toolbox, cxpb, mutpb, ngen, stats=None,
5 | halloffame=None, verbose=__debug__):
6 | """This algorithm is similar to DEAP eaSimple() algorithm, with the modification that
7 | halloffame is used to implement an elitism mechanism. The individuals contained in the
8 | halloffame are directly injected into the next generation and are not subject to the
9 | genetic operators of selection, crossover and mutation.
10 | """
11 | logbook = tools.Logbook()
12 | logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])
13 |
14 | # Evaluate the individuals with an invalid fitness
15 | invalid_ind = [ind for ind in population if not ind.fitness.valid]
16 | fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
17 | for ind, fit in zip(invalid_ind, fitnesses):
18 | ind.fitness.values = fit
19 |
20 | if halloffame is None:
21 | raise ValueError("halloffame parameter must not be empty!")
22 |
23 | halloffame.update(population)
24 | hof_size = len(halloffame.items) if halloffame.items else 0
25 |
26 | record = stats.compile(population) if stats else {}
27 | logbook.record(gen=0, nevals=len(invalid_ind), **record)
28 | if verbose:
29 | print(logbook.stream)
30 |
31 | # Begin the generational process
32 | for gen in range(1, ngen + 1):
33 |
34 | # Select the next generation individuals
35 | offspring = toolbox.select(population, len(population) - hof_size)
36 |
37 | # Vary the pool of individuals
38 | offspring = algorithms.varAnd(offspring, toolbox, cxpb, mutpb)
39 |
40 | # Evaluate the individuals with an invalid fitness
41 | invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
42 | fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
43 | for ind, fit in zip(invalid_ind, fitnesses):
44 | ind.fitness.values = fit
45 |
46 | # add the best back to population:
47 | offspring.extend(halloffame.items)
48 |
49 | # Update the hall of fame with the generated individuals
50 | halloffame.update(offspring)
51 |
52 | # Replace the current population by the offspring
53 | population[:] = offspring
54 |
55 | # Append the current generation statistics to the logbook
56 | record = stats.compile(population) if stats else {}
57 | logbook.record(gen=gen, nevals=len(invalid_ind), **record)
58 | if verbose:
59 | print(logbook.stream)
60 |
61 | return population, logbook
62 |
63 |
--------------------------------------------------------------------------------
/Feedback-and-Improvements/Chapter04/01-solve-knapsack.py:
--------------------------------------------------------------------------------
1 | from deap import base
2 | from deap import creator
3 | from deap import tools
4 | from deap import algorithms
5 |
6 | import random
7 | import numpy
8 |
9 | import matplotlib.pyplot as plt
10 | import seaborn as sns
11 |
12 | import knapsack
13 |
14 | # problem constants:
15 | # create the knapsack problem instance to be used:
16 | knapsack = knapsack.Knapsack01Problem()
17 |
18 | # Genetic Algorithm constants:
19 | POPULATION_SIZE = 50
20 | P_CROSSOVER = 0.9 # probability for crossover
21 | P_MUTATION = 0.1 # probability for mutating an individual
22 | MAX_GENERATIONS = 50
23 | HALL_OF_FAME_SIZE = 1
24 |
25 |
26 | # set the random seed:
27 | RANDOM_SEED = 42
28 | random.seed(RANDOM_SEED)
29 |
30 | toolbox = base.Toolbox()
31 |
32 | # create an operator that randomly returns 0 or 1:
33 | toolbox.register("zeroOrOne", random.randint, 0, 1)
34 |
35 | # define a single objective, maximizing fitness strategy:
36 | creator.create("FitnessMax", base.Fitness, weights=(1.0,))
37 |
38 | # create the Individual class based on list:
39 | creator.create("Individual", list, fitness=creator.FitnessMax)
40 |
41 | # create the individual operator to fill up an Individual instance:
42 | toolbox.register("individualCreator", tools.initRepeat, creator.Individual, toolbox.zeroOrOne, len(knapsack))
43 |
44 | # create the population operator to generate a list of individuals:
45 | toolbox.register("populationCreator", tools.initRepeat, list, toolbox.individualCreator)
46 |
47 |
48 | # fitness calculation
49 | def knapsackValue(individual):
50 | return knapsack.getValue(individual), # return a tuple
51 |
52 |
53 | toolbox.register("evaluate", knapsackValue)
54 |
55 | # genetic operators:mutFlipBit
56 |
57 | # Tournament selection with tournament size of 3:
58 | toolbox.register("select", tools.selTournament, tournsize=3)
59 |
60 | # Single-point crossover:
61 | toolbox.register("mate", tools.cxTwoPoint)
62 |
63 | # Flip-bit mutation:
64 | # indpb: Independent probability for each attribute to be flipped
65 | toolbox.register("mutate", tools.mutFlipBit, indpb=1.0/len(knapsack))
66 |
67 |
68 | # Genetic Algorithm flow:
69 | def main():
70 |
71 | # create initial population (generation 0):
72 | population = toolbox.populationCreator(n=POPULATION_SIZE)
73 |
74 | # prepare the statistics object:
75 | stats = tools.Statistics(lambda ind: ind.fitness.values)
76 | stats.register("max", numpy.max)
77 | stats.register("avg", numpy.mean)
78 |
79 | # define the hall-of-fame object:
80 | hof = tools.HallOfFame(HALL_OF_FAME_SIZE)
81 |
82 | # perform the Genetic Algorithm flow with hof feature added:
83 | population, logbook = algorithms.eaSimple(population, toolbox, cxpb=P_CROSSOVER, mutpb=P_MUTATION,
84 | ngen=MAX_GENERATIONS, stats=stats, halloffame=hof, verbose=True)
85 |
86 | # print best solution found:
87 | best = hof.items[0]
88 | print("-- Best Ever Individual = ", best)
89 | print("-- Best Ever Fitness = ", best.fitness.values[0])
90 |
91 | print("-- Knapsack Items = ")
92 | knapsack.printItems(best)
93 |
94 | # extract statistics:
95 | maxFitnessValues, meanFitnessValues = logbook.select("max", "avg")
96 |
97 | # plot statistics:
98 | sns.set_style("whitegrid")
99 | plt.plot(maxFitnessValues, color='red')
100 | plt.plot(meanFitnessValues, color='green')
101 | plt.xlabel('Generation')
102 | plt.ylabel('Max / Average Fitness')
103 | plt.title('Max and Average fitness over Generations')
104 | plt.show()
105 |
106 |
107 | if __name__ == "__main__":
108 | main()
--------------------------------------------------------------------------------
/Feedback-and-Improvements/Chapter04/02-solve-tsp-first-attempt.py:
--------------------------------------------------------------------------------
1 | from deap import base
2 | from deap import creator
3 | from deap import tools
4 | from deap import algorithms
5 |
6 | import random
7 | import array
8 |
9 | import numpy as np
10 | import matplotlib.pyplot as plt
11 | import seaborn as sns
12 |
13 | import tsp
14 |
15 | # set the random seed for repeatable results
16 | RANDOM_SEED = 42
17 | random.seed(RANDOM_SEED)
18 |
19 | # create the desired traveling salesman problem instace:
20 | TSP_NAME = "bayg29" # name of problem
21 | tsp = tsp.TravelingSalesmanProblem(TSP_NAME)
22 |
23 | # Genetic Algorithm constants:
24 | POPULATION_SIZE = 300
25 | MAX_GENERATIONS = 200
26 | HALL_OF_FAME_SIZE = 1
27 | P_CROSSOVER = 0.9 # probability for crossover
28 | P_MUTATION = 0.1 # probability for mutating an individual
29 |
30 | toolbox = base.Toolbox()
31 |
32 | # define a single objective, minimizing fitness strategy:
33 | creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
34 |
35 | # create the Individual class based on list of integers:
36 | creator.create("Individual", array.array, typecode='i', fitness=creator.FitnessMin)
37 |
38 | # create an operator that generates randomly shuffled indices:
39 | toolbox.register("randomOrder", random.sample, range(len(tsp)), len(tsp))
40 |
41 | # create the individual creation operator to fill up an Individual instance with shuffled indices:
42 | toolbox.register("individualCreator", tools.initIterate, creator.Individual, toolbox.randomOrder)
43 |
44 | # create the population creation operator to generate a list of individuals:
45 | toolbox.register("populationCreator", tools.initRepeat, list, toolbox.individualCreator)
46 |
47 |
48 | # fitness calculation - compute the total distance of the list of cities represented by indices:
49 | def tpsDistance(individual):
50 | return tsp.getTotalDistance(individual), # return a tuple
51 |
52 |
53 | toolbox.register("evaluate", tpsDistance)
54 |
55 |
56 | # Genetic operators:
57 | toolbox.register("select", tools.selTournament, tournsize=3)
58 | toolbox.register("mate", tools.cxOrdered)
59 | toolbox.register("mutate", tools.mutShuffleIndexes, indpb=1.0/len(tsp))
60 |
61 |
62 | # Genetic Algorithm flow:
63 | def main():
64 |
65 | # create initial population (generation 0):
66 | population = toolbox.populationCreator(n=POPULATION_SIZE)
67 |
68 | # prepare the statistics object:
69 | stats = tools.Statistics(lambda ind: ind.fitness.values)
70 | stats.register("min", np.min)
71 | stats.register("avg", np.mean)
72 |
73 | # define the hall-of-fame object:
74 | hof = tools.HallOfFame(HALL_OF_FAME_SIZE)
75 |
76 | # perform the Genetic Algorithm flow with hof feature added:
77 | population, logbook = algorithms.eaSimple(population, toolbox, cxpb=P_CROSSOVER, mutpb=P_MUTATION,
78 | ngen=MAX_GENERATIONS, stats=stats, halloffame=hof, verbose=True)
79 |
80 | # print best individual info:
81 | best = hof.items[0]
82 | print("-- Best Ever Individual = ", best)
83 | print("-- Best Ever Fitness = ", best.fitness.values[0])
84 |
85 | # plot best solution:
86 | plt.figure(1)
87 | tsp.plotData(best)
88 |
89 | # plot statistics:
90 | minFitnessValues, meanFitnessValues = logbook.select("min", "avg")
91 | plt.figure(2)
92 | sns.set_style("whitegrid")
93 | plt.plot(minFitnessValues, color='red')
94 | plt.plot(meanFitnessValues, color='green')
95 | plt.xlabel('Generation')
96 | plt.ylabel('Min / Average Fitness')
97 | plt.title('Min and Average fitness over Generations')
98 |
99 | # show both plots:
100 | plt.show()
101 |
102 |
103 | if __name__ == "__main__":
104 | main()
105 |
--------------------------------------------------------------------------------
/Feedback-and-Improvements/Chapter04/03-solve-tsp.py:
--------------------------------------------------------------------------------
1 | from deap import base
2 | from deap import creator
3 | from deap import tools
4 |
5 | import random
6 | import array
7 |
8 | import numpy as np
9 | import matplotlib.pyplot as plt
10 | import seaborn as sns
11 |
12 | import tsp
13 | import elitism
14 |
15 | # set the random seed for repeatable results
16 | RANDOM_SEED = 42
17 | random.seed(RANDOM_SEED)
18 |
19 | # create the desired traveling salesman problem instace:
20 | TSP_NAME = "bayg29" # name of problem
21 | tsp = tsp.TravelingSalesmanProblem(TSP_NAME)
22 |
23 | # Genetic Algorithm constants:
24 | POPULATION_SIZE = 300
25 | MAX_GENERATIONS = 200
26 | HALL_OF_FAME_SIZE = 30
27 | P_CROSSOVER = 0.9 # probability for crossover
28 | P_MUTATION = 0.1 # probability for mutating an individual
29 |
30 | toolbox = base.Toolbox()
31 |
32 | # define a single objective, minimizing fitness strategy:
33 | creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
34 |
35 | # create the Individual class based on list of integers:
36 | creator.create("Individual", array.array, typecode='i', fitness=creator.FitnessMin)
37 |
38 | # create an operator that generates randomly shuffled indices:
39 | toolbox.register("randomOrder", random.sample, range(len(tsp)), len(tsp))
40 |
41 | # create the individual creation operator to fill up an Individual instance with shuffled indices:
42 | toolbox.register("individualCreator", tools.initIterate, creator.Individual, toolbox.randomOrder)
43 |
44 | # create the population creation operator to generate a list of individuals:
45 | toolbox.register("populationCreator", tools.initRepeat, list, toolbox.individualCreator)
46 |
47 |
48 | # fitness calculation - compute the total distance of the list of cities represented by indices:
49 | def tpsDistance(individual):
50 | return tsp.getTotalDistance(individual), # return a tuple
51 |
52 |
53 | toolbox.register("evaluate", tpsDistance)
54 |
55 |
56 | # Genetic operators:
57 | toolbox.register("select", tools.selTournament, tournsize=2)
58 | toolbox.register("mate", tools.cxOrdered)
59 | toolbox.register("mutate", tools.mutShuffleIndexes, indpb=1.0/len(tsp))
60 |
61 |
62 | # Genetic Algorithm flow:
63 | def main():
64 |
65 | # create initial population (generation 0):
66 | population = toolbox.populationCreator(n=POPULATION_SIZE)
67 |
68 | # prepare the statistics object:
69 | stats = tools.Statistics(lambda ind: ind.fitness.values)
70 | stats.register("min", np.min)
71 | stats.register("avg", np.mean)
72 |
73 | # define the hall-of-fame object:
74 | hof = tools.HallOfFame(HALL_OF_FAME_SIZE)
75 |
76 | # perform the Genetic Algorithm flow with hof feature added:
77 | population, logbook = elitism.eaSimpleWithElitism(population, toolbox, cxpb=P_CROSSOVER, mutpb=P_MUTATION,
78 | ngen=MAX_GENERATIONS, stats=stats, halloffame=hof, verbose=True)
79 |
80 | # print best individual info:
81 | best = hof.items[0]
82 | print("-- Best Ever Individual = ", best)
83 | print("-- Best Ever Fitness = ", best.fitness.values[0])
84 |
85 | # plot best solution:
86 | plt.figure(1)
87 | tsp.plotData(best)
88 |
89 | # plot statistics:
90 | minFitnessValues, meanFitnessValues = logbook.select("min", "avg")
91 | plt.figure(2)
92 | sns.set_style("whitegrid")
93 | plt.plot(minFitnessValues, color='red')
94 | plt.plot(meanFitnessValues, color='green')
95 | plt.xlabel('Generation')
96 | plt.ylabel('Min / Average Fitness')
97 | plt.title('Min and Average fitness over Generations')
98 |
99 | # show both plots:
100 | plt.show()
101 |
102 |
103 | if __name__ == "__main__":
104 | main()
105 |
--------------------------------------------------------------------------------
/Feedback-and-Improvements/Chapter04/04-solve-vrp.py:
--------------------------------------------------------------------------------
1 | from deap import base
2 | from deap import creator
3 | from deap import tools
4 |
5 | import random
6 | import array
7 |
8 | import numpy as np
9 | import matplotlib.pyplot as plt
10 | import seaborn as sns
11 |
12 | import vrp
13 | import elitism
14 |
15 | # set the random seed:
16 | RANDOM_SEED = 42
17 | random.seed(RANDOM_SEED)
18 |
19 | # create the desired vehicle routing problem using a traveling salesman problem instance:
20 | TSP_NAME = "bayg29"
21 | NUM_OF_VEHICLES = 3
22 | DEPOT_LOCATION = 12
23 | vrp = vrp.VehicleRoutingProblem(TSP_NAME, NUM_OF_VEHICLES, DEPOT_LOCATION)
24 |
25 | # Genetic Algorithm constants:
26 | POPULATION_SIZE = 500
27 | P_CROSSOVER = 0.9 # probability for crossover
28 | P_MUTATION = 0.2 # probability for mutating an individual
29 | MAX_GENERATIONS = 1000
30 | HALL_OF_FAME_SIZE = 30
31 |
32 | toolbox = base.Toolbox()
33 |
34 | # define a single objective, minimizing fitness strategy:
35 | creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
36 |
37 | # create the Individual class based on list of integers:
38 | creator.create("Individual", array.array, typecode='i', fitness=creator.FitnessMin)
39 |
40 | # create an operator that generates randomly shuffled indices:
41 | toolbox.register("randomOrder", random.sample, range(len(vrp)), len(vrp))
42 |
43 | # create the individual creation operator to fill up an Individual instance with shuffled indices:
44 | toolbox.register("individualCreator", tools.initIterate, creator.Individual, toolbox.randomOrder)
45 |
46 | # create the population creation operator to generate a list of individuals:
47 | toolbox.register("populationCreator", tools.initRepeat, list, toolbox.individualCreator)
48 |
49 |
50 | # fitness calculation - compute the max distance that the vehicles covered
51 | # for the given list of cities represented by indices:
52 | def vrpDistance(individual):
53 | return vrp.getMaxDistance(individual), # return a tuple
54 |
55 |
56 | toolbox.register("evaluate", vrpDistance)
57 |
58 | # Genetic operators:
59 | toolbox.register("select", tools.selTournament, tournsize=2)
60 | toolbox.register("mutate", tools.mutShuffleIndexes, indpb=1.0/len(vrp))
61 | toolbox.register("mate", tools.cxUniformPartialyMatched, indpb=2.0/len(vrp))
62 |
63 |
64 | # Genetic Algorithm flow:
65 | def main():
66 |
67 | # create initial population (generation 0):
68 | population = toolbox.populationCreator(n=POPULATION_SIZE)
69 |
70 | # prepare the statistics object:
71 | stats = tools.Statistics(lambda ind: ind.fitness.values)
72 | stats.register("min", np.min)
73 | stats.register("avg", np.mean)
74 |
75 | # define the hall-of-fame object:
76 | hof = tools.HallOfFame(HALL_OF_FAME_SIZE)
77 |
78 | # perform the Genetic Algorithm flow with hof feature added:
79 | population, logbook = elitism.eaSimpleWithElitism(population, toolbox, cxpb=P_CROSSOVER, mutpb=P_MUTATION,
80 | ngen=MAX_GENERATIONS, stats=stats, halloffame=hof, verbose=True)
81 |
82 | # print best individual info:
83 | best = hof.items[0]
84 | print("-- Best Ever Individual = ", best)
85 | print("-- Best Ever Fitness = ", best.fitness.values[0])
86 |
87 | print("-- Route Breakdown = ", vrp.getRoutes(best))
88 | print("-- total distance = ", vrp.getTotalDistance(best))
89 | print("-- max distance = ", vrp.getMaxDistance(best))
90 |
91 | # plot best solution:
92 | plt.figure(1)
93 | vrp.plotData(best)
94 |
95 | # plot statistics:
96 | minFitnessValues, meanFitnessValues = logbook.select("min", "avg")
97 | plt.figure(2)
98 | sns.set_style("whitegrid")
99 | plt.plot(minFitnessValues, color='red')
100 | plt.plot(meanFitnessValues, color='green')
101 | plt.xlabel('Generation')
102 | plt.ylabel('Min / Average Fitness')
103 | plt.title('Min and Average fitness over Generations')
104 |
105 | # show both plots:
106 | plt.show()
107 |
108 |
109 | if __name__ == "__main__":
110 | main()
111 |
112 |
--------------------------------------------------------------------------------
/Feedback-and-Improvements/Chapter04/elitism.py:
--------------------------------------------------------------------------------
1 | from deap import tools
2 | from deap import algorithms
3 |
4 | def eaSimpleWithElitism(population, toolbox, cxpb, mutpb, ngen, stats=None,
5 | halloffame=None, verbose=__debug__):
6 | """This algorithm is similar to DEAP eaSimple() algorithm, with the modification that
7 | halloffame is used to implement an elitism mechanism. The individuals contained in the
8 | halloffame are directly injected into the next generation and are not subject to the
9 | genetic operators of selection, crossover and mutation.
10 | """
11 | logbook = tools.Logbook()
12 | logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])
13 |
14 | # Evaluate the individuals with an invalid fitness
15 | invalid_ind = [ind for ind in population if not ind.fitness.valid]
16 | fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
17 | for ind, fit in zip(invalid_ind, fitnesses):
18 | ind.fitness.values = fit
19 |
20 | if halloffame is None:
21 | raise ValueError("halloffame parameter must not be empty!")
22 |
23 | halloffame.update(population)
24 | hof_size = len(halloffame.items) if halloffame.items else 0
25 |
26 | record = stats.compile(population) if stats else {}
27 | logbook.record(gen=0, nevals=len(invalid_ind), **record)
28 | if verbose:
29 | print(logbook.stream)
30 |
31 | # Begin the generational process
32 | for gen in range(1, ngen + 1):
33 |
34 | # Select the next generation individuals
35 | offspring = toolbox.select(population, len(population) - hof_size)
36 |
37 | # Vary the pool of individuals
38 | offspring = algorithms.varAnd(offspring, toolbox, cxpb, mutpb)
39 |
40 | # Evaluate the individuals with an invalid fitness
41 | invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
42 | fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
43 | for ind, fit in zip(invalid_ind, fitnesses):
44 | ind.fitness.values = fit
45 |
46 | # add the best back to population:
47 | offspring.extend(halloffame.items)
48 |
49 | # Update the hall of fame with the generated individuals
50 | halloffame.update(offspring)
51 |
52 | # Replace the current population by the offspring
53 | population[:] = offspring
54 |
55 | # Append the current generation statistics to the logbook
56 | record = stats.compile(population) if stats else {}
57 | logbook.record(gen=gen, nevals=len(invalid_ind), **record)
58 | if verbose:
59 | print(logbook.stream)
60 |
61 | return population, logbook
62 |
63 |
--------------------------------------------------------------------------------
/Feedback-and-Improvements/Chapter04/knapsack.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | class Knapsack01Problem:
4 | """This class encapsulates the Knapsack 0-1 Problem from RosettaCode.org
5 | """
6 |
7 | def __init__(self):
8 |
9 | # initialize instance variables:
10 | self.items = []
11 | self.maxCapacity = 0
12 |
13 | # initialize the data:
14 | self.__initData()
15 |
16 | def __len__(self):
17 | """
18 | :return: the total number of items defined in the problem
19 | """
20 | return len(self.items)
21 |
22 | def __initData(self):
23 | """initializes the RosettaCode.org knapsack 0-1 problem data
24 | """
25 | self.items = [
26 | ("map", 9, 150),
27 | ("compass", 13, 35),
28 | ("water", 153, 200),
29 | ("sandwich", 50, 160),
30 | ("glucose", 15, 60),
31 | ("tin", 68, 45),
32 | ("banana", 27, 60),
33 | ("apple", 39, 40),
34 | ("cheese", 23, 30),
35 | ("beer", 52, 10),
36 | ("suntan cream", 11, 70),
37 | ("camera", 32, 30),
38 | ("t-shirt", 24, 15),
39 | ("trousers", 48, 10),
40 | ("umbrella", 73, 40),
41 | ("waterproof trousers", 42, 70),
42 | ("waterproof overclothes", 43, 75),
43 | ("note-case", 22, 80),
44 | ("sunglasses", 7, 20),
45 | ("towel", 18, 12),
46 | ("socks", 4, 50),
47 | ("book", 30, 10)
48 | ]
49 |
50 | self.maxCapacity = 400
51 |
52 | def getValue(self, zeroOneList):
53 | """
54 | Calculates the value of the selected items in the list, while ignoring items that will cause the accumulating weight to exceed the maximum weight
55 | :param zeroOneList: a list of 0/1 values corresponding to the list of the problem's items. '1' means that item was selected.
56 | :return: the calculated value
57 | """
58 |
59 | totalWeight = totalValue = 0
60 |
61 | for i in range(len(zeroOneList)):
62 | item, weight, value = self.items[i]
63 | if totalWeight + weight <= self.maxCapacity:
64 | totalWeight += zeroOneList[i] * weight
65 | totalValue += zeroOneList[i] * value
66 | return totalValue
67 |
68 | def printItems(self, zeroOneList):
69 | """
70 | Prints the selected items in the list, while ignoring items that will cause the accumulating weight to exceed the maximum weight
71 | :param zeroOneList: a list of 0/1 values corresponding to the list of the problem's items. '1' means that item was selected.
72 | """
73 | totalWeight = totalValue = 0
74 |
75 | for i in range(len(zeroOneList)):
76 | item, weight, value = self.items[i]
77 | if totalWeight + weight <= self.maxCapacity:
78 | if zeroOneList[i] > 0:
79 | totalWeight += weight
80 | totalValue += value
81 | print("- Adding {}: weight = {}, value = {}, accumulated weight = {}, accumulated value = {}".format(item, weight, value, totalWeight, totalValue))
82 | print("- Total weight = {}, Total value = {}".format(totalWeight, totalValue))
83 |
84 |
85 | # testing the class:
86 | def main():
87 | # create a problem instance:
88 | knapsack = Knapsack01Problem()
89 |
90 | # creaete a random solution and evaluate it:
91 | randomSolution = np.random.randint(2, size=len(knapsack))
92 | print("Random Solution = ")
93 | print(randomSolution)
94 | knapsack.printItems(randomSolution)
95 |
96 |
97 | if __name__ == "__main__":
98 | main()
--------------------------------------------------------------------------------
/Feedback-and-Improvements/Chapter04/tsp.py:
--------------------------------------------------------------------------------
1 | import csv
2 | import pickle
3 | import os
4 | import codecs
5 |
6 | import numpy as np
7 |
8 | from urllib.request import urlopen
9 |
10 | import matplotlib.pyplot as plt
11 |
12 |
13 | class TravelingSalesmanProblem:
14 | """This class encapsulates the Traveling Salesman Problem.
15 | City coordinates are read from an online file and distance matrix is calculated.
16 | The data is serialized to disk.
17 | The total distance can be calculated for a path represented by a list of city indices.
18 | A plot can be created for a path represented by a list of city indices.
19 |
20 | :param name: The name of the corresponding TSPLIB problem, e.g. 'burma14' or 'bayg29'.
21 | """
22 |
23 | def __init__(self, name):
24 | """
25 | Creates an instance of a TSP
26 |
27 | :param name: name of the TSP problem
28 | """
29 |
30 | # initialize instance variables:
31 | self.name = name
32 | self.locations = []
33 | self.distances = []
34 | self.tspSize = 0
35 |
36 | # initialize the data:
37 | self.__initData()
38 |
39 | def __len__(self):
40 | """
41 | returns the length of the underlying TSP
42 | :return: the length of the underlying TSP (number of cities)
43 | """
44 | return self.tspSize
45 |
46 | def __initData(self):
47 | """Reads the serialized data, and if not available - calls __create_data() to prepare it
48 | """
49 |
50 | # attempt to read serialized data:
51 | try:
52 | self.locations = pickle.load(open(os.path.join("tsp-data", self.name + "-loc.pickle"), "rb"))
53 | self.distances = pickle.load(open(os.path.join("tsp-data", self.name + "-dist.pickle"), "rb"))
54 | except (OSError, IOError):
55 | pass
56 |
57 | # serailized data not found - create the data from scratch:
58 | if not self.locations or not self.distances:
59 | self.__createData()
60 |
61 | # set the problem 'size':
62 | self.tspSize = len(self.locations)
63 |
64 | def __createData(self):
65 | """Reads the desired TSP file from the Internet, extracts the city coordinates, calculates the distances
66 | between every two cities and uses them to populate a distance matrix (two-dimensional array).
67 | It then serializes the city locations and the calculated distances to disk using the pickle utility.
68 | """
69 | self.locations = []
70 |
71 | # open whitespace-delimited file from url and read lines from it:
72 | with urlopen("http://elib.zib.de/pub/mp-testdata/tsp/tsplib/tsp/" + self.name + ".tsp") as f:
73 | reader = csv.reader(codecs.iterdecode(f, 'utf-8'), delimiter=" ", skipinitialspace=True)
74 |
75 | # skip lines until one of these lines is found:
76 | for row in reader:
77 | if row[0] in ('DISPLAY_DATA_SECTION', 'NODE_COORD_SECTION'):
78 | break
79 |
80 | # read data lines until 'EOF' found:
81 | for row in reader:
82 | if row[0] != 'EOF':
83 | # remove index at beginning of line:
84 | del row[0]
85 |
86 | # convert x,y coordinates to ndarray:
87 | self.locations.append(np.asarray(row, dtype=np.float32))
88 | else:
89 | break
90 |
91 | # set the problem 'size':
92 | self.tspSize = len(self.locations)
93 |
94 | # print data:
95 | print("length = {}, locations = {}".format(self.tspSize, self.locations))
96 |
97 | # initialize distance matrix by filling it with 0's:
98 | self.distances = [[0] * self.tspSize for _ in range(self.tspSize)]
99 |
100 | # populate the distance matrix with calculated distances:
101 | for i in range(self.tspSize):
102 | for j in range(i + 1, self.tspSize):
103 | # calculate euclidean distance between two ndarrays:
104 | distance = np.linalg.norm(self.locations[j] - self.locations[i])
105 | self.distances[i][j] = distance
106 | self.distances[j][i] = distance
107 | print("{}, {}: location1 = {}, location2 = {} => distance = {}".format(i, j, self.locations[i], self.locations[j], distance))
108 |
109 | # serialize locations and distances:
110 | if not os.path.exists("tsp-data"):
111 | os.makedirs("tsp-data")
112 | pickle.dump(self.locations, open(os.path.join("tsp-data", self.name + "-loc.pickle"), "wb"))
113 | pickle.dump(self.distances, open(os.path.join("tsp-data", self.name + "-dist.pickle"), "wb"))
114 |
115 | def getTotalDistance(self, indices):
116 | """Calculates the total distance of the path described by the given indices of the cities
117 |
118 | :param indices: A list of ordered city indices describing the given path.
119 | :return: total distance of the path described by the given indices
120 | """
121 | # distance between th elast and first city:
122 | distance = self.distances[indices[-1]][indices[0]]
123 |
124 | # add the distance between each pair of consequtive cities:
125 | for i in range(len(indices) - 1):
126 | distance += self.distances[indices[i]][indices[i + 1]]
127 |
128 | return distance
129 |
130 | def plotData(self, indices):
131 | """plots the path described by the given indices of the cities
132 |
133 | :param indices: A list of ordered city indices describing the given path.
134 | :return: the resulting plot
135 | """
136 |
137 | # plot the dots representing the cities:
138 | plt.scatter(*zip(*self.locations), marker='.', color='red')
139 |
140 | # create a list of the corresponding city locations:
141 | locs = [self.locations[i] for i in indices]
142 | locs.append(locs[0])
143 |
144 | # plot a line between each pair of consequtive cities:
145 | plt.plot(*zip(*locs), linestyle='-', color='blue')
146 |
147 | return plt
148 |
149 |
150 | # testing the class:
151 | def main():
152 | # create a problem instance:
153 | tsp = TravelingSalesmanProblem("bayg29")
154 |
155 | # generate a random solution and evaluate it:
156 | #randomSolution = random.sample(range(len(tsp)), len(tsp))
157 |
158 | # see http://elib.zib.de/pub/mp-testdata/tsp/tsplib/tsp/bayg29.opt.tour
159 | optimalSolution = [0, 27, 5, 11, 8, 25, 2, 28, 4, 20, 1, 19, 9, 3, 14, 17, 13, 16, 21, 10, 18, 24, 6, 22, 7, 26, 15, 12, 23]
160 |
161 | print("Problem name: " + tsp.name)
162 | print("Optimal solution = ", optimalSolution)
163 | print("Optimal distance = ", tsp.getTotalDistance(optimalSolution))
164 |
165 | # plot the solution:
166 | plot = tsp.plotData(optimalSolution)
167 | plot.show()
168 |
169 |
170 | if __name__ == "__main__":
171 | main()
172 |
--------------------------------------------------------------------------------
/Feedback-and-Improvements/README.md:
--------------------------------------------------------------------------------
1 | # Feedback and Improvements
2 |
3 | This folder serves as a platform for readers to suggest changes, improvements and variations of the original code.
4 |
5 | These suggestions may be incorporated into future editions of this book.
6 |
7 | Each folder underneath this one starts as a copy of the original corresponding folder, and readers are invited to submit PRs against its files.
8 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2019 Packt
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | # Hands-On-Genetic-Algorithms-with-Python
5 | Hands-On Genetic Algorithms with Python, Published by Packt
6 | ### Download a free PDF
7 |
8 | If you have already purchased a print or Kindle version of this book, you can get a DRM-free PDF version at no cost.
Simply click on the link to claim your free PDF.
9 |