├── differential-evolution └── differential-evolution.py ├── particle-swarm └── particle-swarm.py ├── hill-climbing └── hill_climbing.py ├── sine-cosine └── sine-cosine.py ├── tabu-search └── tabu-search.py ├── simulated-annealing └── simulated-annealing.py ├── emperor-penguins-colony └── emperor-penguins-colony.py ├── learning-based └── learning-based.py ├── dragonfly └── dragonfly.py ├── spotted-hyena └── spotted-hyena.py ├── whale └── whale.py ├── gravitational-search └── gravitational-search.py ├── dandelion └── dandelion.py ├── genetic └── genetic.py ├── firefly └── firefly.py ├── cuckoo └── cuckoo.py ├── war-strategy └── war-strategy.py ├── memetic └── memetic.py ├── moth-flame └── moth-flame.py ├── artificial-bee-colony └── artificial-bee-colony.py ├── grey-wolf └── grey-wolf.py ├── rat-swarm └── rat-swarm.py ├── crow-search └── crow-search.py ├── flower-pollination └── flower-pollination.py ├── imperialist-competitive └── imperialist-competitive.py ├── ant-colony └── ant-colony.py ├── harmony-search └── harmony-search.py ├── red-deer └── red-deer.py ├── bat └── bat.py ├── culture └── culture.py ├── pelican └── pelican.py ├── lion-optimization └── lion-optimization.py └── harris-hawks └── harris-hawks.py /differential-evolution/differential-evolution.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy.optimize import differential_evolution 3 | 4 | # Define the objective function to be optimized 5 | def sphere(x): 6 | return np.sum(x**2) 7 | 8 | # Define the bounds of the search space 9 | bounds = [(-5, 5), (-5, 5)] 10 | 11 | # Set up the DE optimizer 12 | optimizer = differential_evolution(sphere, bounds) 13 | 14 | # Run the DE algorithm to optimize the objective function 15 | result = optimizer.run() 16 | 17 | # Print the results 18 | print("Best cost: ", result.fun) 19 | print("Best position: ", result.x) -------------------------------------------------------------------------------- /particle-swarm/particle-swarm.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pyswarms as ps 3 | 4 | # Define the objective function to be optimized 5 | def sphere(x): 6 | return np.sum(x**2) 7 | 8 | # Define the bounds of the search space 9 | bounds = (np.array([-5, -5]), np.array([5, 5])) 10 | 11 | # Set up the PSO optimizer 12 | optimizer = ps.single.GlobalBestPSO(n_particles=10, dimensions=2, bounds=bounds) 13 | 14 | # Run the PSO algorithm to optimize the objective function 15 | best_cost, best_pos = optimizer.optimize(sphere, iters=100) 16 | 17 | # Print the results 18 | print("Best cost: ", best_cost) 19 | print("Best position: ", best_pos) -------------------------------------------------------------------------------- /hill-climbing/hill_climbing.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | def hill_climbing(f, n): 4 | # f is the fitness function 5 | # n is the number of dimensions of the problem 6 | x = [random.uniform(-10, 10) for i in range(n)] # initialize random solution 7 | best = x 8 | count = 0 9 | while True: 10 | count += 1 11 | neighbor = [x[i] + random.uniform(-1, 1) for i in range(n)] # generate a random neighbor 12 | if f(neighbor) > f(best): # if the neighbor is better, update the best solution 13 | best = neighbor 14 | x = neighbor 15 | if count > 1000: # stop after 1000 iterations without improvement 16 | break 17 | return best 18 | 19 | # Example fitness function 20 | def sphere(x): 21 | return sum(i**2 for i in x) 22 | 23 | print(hill_climbing(sphere, 3)) 24 | 25 | -------------------------------------------------------------------------------- /sine-cosine/sine-cosine.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def sine_cosine_algorithm(fitness_func, lb, ub, pop_size=50, max_iter=100): 4 | # initialize the population 5 | pop = np.random.uniform(lb, ub, (pop_size, len(lb))) 6 | best = pop[np.argmin([fitness_func(x) for x in pop])] 7 | 8 | for i in range(max_iter): 9 | # generate random numbers between -1 and 1 using the sine function 10 | rand_sin = np.sin(np.random.uniform(-np.pi/2, np.pi/2, (pop_size, len(lb)))) 11 | # generate random numbers between 0 and 1 using the cosine function 12 | rand_cos = np.cos(np.random.uniform(0, 2*np.pi, (pop_size, len(lb)))) 13 | # update the positions of the population 14 | pop = pop + rand_sin * (best - pop) + rand_cos * (pop - pop.mean(axis=0)) 15 | # apply bounds to the positions 16 | pop = np.clip(pop, lb, ub) 17 | # evaluate the fitness of the population 18 | fitness = [fitness_func(x) for x in pop] 19 | # update the global best solution 20 | if min(fitness) < fitness_func(best): 21 | best = pop[np.argmin(fitness)] 22 | return best -------------------------------------------------------------------------------- /tabu-search/tabu-search.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | def tabu_search(f, n, tabulen, maxiter): 4 | # f is the fitness function 5 | # n is the number of dimensions of the problem 6 | # tabulen is the length of the tabu list 7 | # maxiter is the maximum number of iterations 8 | x = [random.uniform(-10, 10) for i in range(n)] # initialize random solution 9 | best = x 10 | tabu = [x] * tabulen # initialize tabu list 11 | count = 0 12 | while count < maxiter: 13 | count += 1 14 | candidate_list = [] 15 | for i in range(n): 16 | for j in [-1, 1]: 17 | candidate = x.copy() 18 | candidate[i] += j 19 | if candidate not in tabu: 20 | candidate_list.append(candidate) 21 | if len(candidate_list) == 0: 22 | break 23 | candidate_list.sort(key=f, reverse=True) 24 | x = candidate_list[0] 25 | if f(x) > f(best): 26 | best = x 27 | tabu.pop(0) 28 | tabu.append(x) 29 | return best 30 | 31 | # Example fitness function 32 | def sphere(x): 33 | return sum(i**2 for i in x) 34 | 35 | print(tabu_search(sphere, 3, 10, 100)) -------------------------------------------------------------------------------- /simulated-annealing/simulated-annealing.py: -------------------------------------------------------------------------------- 1 | import math 2 | import random 3 | 4 | def simulated_annealing(f, n, T, cool_rate): 5 | # f is the fitness function 6 | # n is the number of dimensions of the problem 7 | # T is the initial temperature 8 | # cool_rate is the cooling rate 9 | x = [random.uniform(-10, 10) for i in range(n)] # initialize random solution 10 | best = x 11 | count = 0 12 | while T > 1e-8: # stop when temperature is close to zero 13 | count += 1 14 | neighbor = [x[i] + random.uniform(-1, 1) for i in range(n)] # generate a random neighbor 15 | delta = f(neighbor) - f(x) 16 | if delta > 0: # if the neighbor is better, update the best solution 17 | x = neighbor 18 | if f(neighbor) > f(best): 19 | best = neighbor 20 | else: 21 | prob = math.exp(delta / T) 22 | if random.random() < prob: # accept the neighbor with a probability 23 | x = neighbor 24 | T *= cool_rate # reduce the temperature 25 | return best 26 | 27 | # Example fitness function 28 | def sphere(x): 29 | return sum(i**2 for i in x) 30 | 31 | print(simulated_annealing(sphere, 3, 10, 0.99)) -------------------------------------------------------------------------------- /emperor-penguins-colony/emperor-penguins-colony.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | # Define the problem to be optimized 4 | def objective_function(attributes): 5 | # Evaluate the fitness of a penguin's attributes 6 | # and return a scalar value representing its quality 7 | return ... 8 | 9 | # Define the parameters of the algorithm 10 | population_size = ... 11 | mutation_probability = ... 12 | num_iterations = ... 13 | 14 | # Initialize the population of penguins with random attributes 15 | population = [] 16 | for i in range(population_size): 17 | attributes = ... 18 | population.append(attributes) 19 | 20 | # Main loop of the algorithm 21 | for iteration in range(num_iterations): 22 | # Evaluate the fitness of each penguin in the population 23 | fitness_values = [objective_function(attributes) for attributes in population] 24 | 25 | # Select the best penguins to form a new generation 26 | best_penguins = ... 27 | 28 | # Adapt the penguins' attributes using random mutations and information sharing 29 | new_population = [] 30 | for i in range(population_size): 31 | parent = random.choice(best_penguins) 32 | child_attributes = ... 33 | new_population.append(child_attributes) 34 | 35 | # Update the population with the new generation 36 | population = new_population -------------------------------------------------------------------------------- /learning-based/learning-based.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | # Define the fitness function 4 | def fitness(x): 5 | return np.sum(np.square(x)) 6 | 7 | # Define the TLBO algorithm 8 | def tlbo(fitness, bounds, n, m, max_iter): 9 | # Initialize the population 10 | pop = np.random.uniform(bounds[0], bounds[1], size=(n, m)) 11 | # Evaluate the fitness of each individual 12 | fit = np.apply_along_axis(fitness, 1, pop) 13 | # Main loop 14 | for t in range(max_iter): 15 | # Determine the best individuals (teachers) 16 | idx = np.argsort(fit)[:n//2] 17 | teachers = pop[idx, :] 18 | # Teaching and learning process 19 | for i in range(n): 20 | # Select a teacher randomly 21 | j = np.random.choice(n//2) 22 | # Select a student randomly 23 | k = np.random.choice(n) 24 | while k == i: 25 | k = np.random.choice(n) 26 | # Teaching and learning process 27 | r = np.random.uniform(-1, 1, size=m) 28 | x_new = pop[i, :] + r * (teachers[j, :] - pop[i, :]) + r * (pop[k, :] - pop[i, :]) 29 | # Evaluate the fitness of the new individual 30 | fit_new = fitness(x_new) 31 | # Update the student if the fitness improves 32 | if fit_new < fit[i]: 33 | pop[i, :] = x_new 34 | fit[i] = fit_new 35 | # Return the best individual and its fitness 36 | idx = np.argmin(fit) 37 | return pop[idx, :], fit[idx] -------------------------------------------------------------------------------- /dragonfly/dragonfly.py: -------------------------------------------------------------------------------- 1 | import pyswarms as ps 2 | from pyswarms.utils.functions import single_obj as fx 3 | from pyswarms.backend.topology import Pyramid 4 | from pyswarms.backend.operators import compute_pbest 5 | 6 | # Define the fitness function to be optimized 7 | def sphere(x): 8 | return fx.sphere(x) 9 | 10 | # Set the search space and the number of dimensions 11 | bounds = ([-5, -5, -5], [5, 5, 5]) 12 | dimensions = 3 13 | 14 | # Initialize the swarm and the topology 15 | options = {'c1': 0.5, 'c2': 0.3, 'w': 0.9} 16 | swarm = ps.discrete.binary.BinaryPSO(n_particles=20, dimensions=dimensions, options=options, bounds=bounds) 17 | topology = Pyramid(static=False) 18 | 19 | # Initialize the dragonfly algorithm 20 | dragonfly = ps.single.DragonflySwarmOptimizer(n_particles=20, dimensions=dimensions, options=options, bounds=bounds, topology=topology) 21 | 22 | # Optimize the function using both algorithms and compare the results 23 | sphere_min = fx.sphere([0]*dimensions) 24 | 25 | for i in range(100): 26 | # Run one iteration of the PSO algorithm 27 | cost, pos = swarm.optimize(sphere, iters=1) 28 | 29 | # Compute the personal best for each particle 30 | swarm.pbest_pos, swarm.pbest_cost = compute_pbest(swarm) 31 | 32 | # Run one iteration of the dragonfly algorithm 33 | dragonfly.optimize(sphere) 34 | 35 | # Compare the results 36 | print('Iteration:', i, 'PSO Best:', swarm.best_cost, 'DA Best:', dragonfly.best_cost) 37 | if swarm.best_cost <= sphere_min and dragonfly.best_cost <= sphere_min: 38 | break -------------------------------------------------------------------------------- /spotted-hyena/spotted-hyena.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def SHO(obj_func, lb, ub, num_hyenas=50, max_iter=100): 4 | 5 | # Initialization 6 | dim = len(lb) 7 | hyenas = np.random.uniform(lb, ub, (num_hyenas, dim)) 8 | fitness = np.zeros(num_hyenas) 9 | 10 | # Main loop 11 | for t in range(max_iter): 12 | # Evaluate fitness 13 | for i in range(num_hyenas): 14 | fitness[i] = obj_func(hyenas[i, :]) 15 | 16 | # Find the best hyena 17 | best_hyena_idx = np.argmin(fitness) 18 | best_hyena = hyenas[best_hyena_idx, :] 19 | 20 | # Update hyenas 21 | for i in range(num_hyenas): 22 | if i == best_hyena_idx: 23 | continue 24 | 25 | # Calculate distance 26 | dist = np.linalg.norm(hyenas[i, :] - best_hyena) 27 | 28 | # Update position 29 | if fitness[i] < fitness[best_hyena_idx]: 30 | a = 2.0 - 2.0 * t / max_iter 31 | b = np.random.uniform(0, 1) 32 | hyenas[i, :] += a * np.exp(-b * dist) * (hyenas[i, :] - best_hyena) 33 | else: 34 | a = 2.0 * t / max_iter 35 | b = np.random.uniform(0, 1) 36 | hyenas[i, :] += a * np.exp(-b * dist) * (hyenas[i, :] - best_hyena) 37 | 38 | # Enforce bounds 39 | hyenas[i, :] = np.maximum(hyenas[i, :], lb) 40 | hyenas[i, :] = np.minimum(hyenas[i, :], ub) 41 | 42 | # Find the best solution 43 | best_hyena_idx = np.argmin(fitness) 44 | best_hyena = hyenas[best_hyena_idx, :] 45 | best_fitness = fitness[best_hyena_idx] 46 | 47 | return (best_hyena, best_fitness) -------------------------------------------------------------------------------- /whale/whale.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import math 3 | 4 | def whale_optimization_algorithm(cost_func, dim, n, iterations, lb, ub): 5 | # Initialize positions and velocities of search agents 6 | positions = np.zeros((n, dim)) 7 | for i in range(n): 8 | positions[i, :] = np.random.uniform(0, 1, dim) * (ub - lb) + lb 9 | 10 | # Initialize convergence curve 11 | convergence_curve = np.zeros(iterations) 12 | 13 | # Main loop 14 | for t in range(iterations): 15 | # Update a 16 | a = 2 - t * ((2) / iterations) 17 | 18 | for i in range(n): 19 | # Update the position of the current whale 20 | r1 = np.random.uniform(0, 1, dim) 21 | r2 = np.random.uniform(0, 1, dim) 22 | A = 2 * a * r1 - a 23 | C = 2 * r2 24 | b = 1 25 | l = (a - 1) * np.random.uniform(0, 1) + 1 26 | 27 | p = np.random.uniform(0, 1, dim) 28 | d = np.abs(C * positions[np.random.randint(0, n), :] - positions[i, :]) 29 | new_position = np.clip(p * positions[i, :] + b * d * np.exp(l * A), lb, ub) 30 | 31 | # Evaluate the new position 32 | new_cost = cost_func(new_position) 33 | 34 | # Update the position if it is better 35 | if new_cost < cost_func(positions[i, :]): 36 | positions[i, :] = new_position 37 | 38 | # Update convergence curve 39 | convergence_curve[t] = cost_func(positions.min(axis=0)) 40 | 41 | # Return the best solution and convergence curve 42 | best_solution = positions.min(axis=0) 43 | return best_solution, convergence_curve -------------------------------------------------------------------------------- /gravitational-search/gravitational-search.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def gsa(f, lb, ub, dim, n_iter, n_pop, G0=100, alpha=20, G_dec=0.99): 4 | # f: objective function 5 | # lb: lower bounds 6 | # ub: upper bounds 7 | # dim: dimensions 8 | # n_iter: number of iterations 9 | # n_pop: population size 10 | # G0: initial gravitational constant 11 | # alpha: power index 12 | # G_dec: gravitational constant decay rate 13 | 14 | # initialization 15 | X = np.random.uniform(lb, ub, size=(n_pop, dim)) 16 | G = G0 17 | best_fitness = np.inf 18 | best_solution = None 19 | fitness_history = [] 20 | 21 | for i in range(n_iter): 22 | # calculate fitness 23 | fitness = np.array([f(x) for x in X]) 24 | # update best solution 25 | best_idx = np.argmin(fitness) 26 | if fitness[best_idx] < best_fitness: 27 | best_fitness = fitness[best_idx] 28 | best_solution = X[best_idx] 29 | # update fitness history 30 | fitness_history.append(best_fitness) 31 | # calculate mass 32 | M = fitness / np.sum(fitness) 33 | # calculate acceleration 34 | a = np.zeros_like(X) 35 | for j in range(n_pop): 36 | for k in range(n_pop): 37 | if j == k: 38 | continue 39 | r = np.linalg.norm(X[j] - X[k]) 40 | a[j] += (X[k] - X[j]) * M[k] / r ** alpha 41 | # calculate new position 42 | X = X + np.random.uniform(size=X.shape) * a / (G + 1e-10) 43 | # apply boundary conditions 44 | X = np.clip(X, lb, ub) 45 | # decay gravitational constant 46 | G *= G_dec 47 | 48 | return best_solution, best_fitness, fitness_history -------------------------------------------------------------------------------- /dandelion/dandelion.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def dandelion_optimizer(objective_function, lb, ub, dimension, max_iter): 4 | # Initialize the position of dandelions 5 | positions = np.random.uniform(low=lb, high=ub, size=(dimension, 10)) 6 | fitness = np.zeros(10) 7 | best_fitness = float('inf') 8 | best_position = np.zeros(dimension) 9 | 10 | for i in range(10): 11 | fitness[i] = objective_function(positions[:, i]) 12 | 13 | if fitness[i] < best_fitness: 14 | best_fitness = fitness[i] 15 | best_position = positions[:, i] 16 | 17 | for iter in range(max_iter): 18 | for i in range(10): 19 | # Mutation operator 20 | mutant = positions[:, i] + np.random.normal(scale=0.1, size=dimension) 21 | 22 | # Boundary handling 23 | mutant = np.maximum(mutant, lb) 24 | mutant = np.minimum(mutant, ub) 25 | 26 | # Evaluate the fitness of mutant 27 | mutant_fitness = objective_function(mutant) 28 | 29 | # Competition operator 30 | if mutant_fitness < fitness[i]: 31 | positions[:, i] = mutant 32 | fitness[i] = mutant_fitness 33 | 34 | if fitness[i] < best_fitness: 35 | best_fitness = fitness[i] 36 | best_position = positions[:, i] 37 | 38 | # Dispersion operator 39 | for i in range(10): 40 | for j in range(dimension): 41 | positions[j, i] = best_position[j] + np.random.normal(scale=0.1) 42 | 43 | # Boundary handling 44 | positions[j, i] = np.maximum(positions[j, i], lb[j]) 45 | positions[j, i] = np.minimum(positions[j, i], ub[j]) 46 | 47 | return best_position, best_fitness -------------------------------------------------------------------------------- /genetic/genetic.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | # Define the fitness function 4 | def fitness_func(x): 5 | return np.sum(x) 6 | 7 | # Define the genetic algorithm 8 | def genetic_algorithm(pop_size, gene_length, mutation_rate, generations): 9 | # Initialize the population 10 | population = np.random.randint(0, 2, size=(pop_size, gene_length)) 11 | 12 | # Iterate over the generations 13 | for i in range(generations): 14 | # Evaluate the fitness of each individual in the population 15 | fitness = np.apply_along_axis(fitness_func, 1, population) 16 | 17 | # Select the parents for the next generation 18 | parents = population[np.argsort(-fitness)][:2] 19 | 20 | # Perform crossover to create the offspring 21 | offspring = np.empty((pop_size, gene_length)) 22 | for j in range(pop_size): 23 | parent1 = parents[j % 2] 24 | parent2 = parents[(j + 1) % 2] 25 | mask = np.random.randint(0, 2, size=gene_length, dtype=bool) 26 | offspring[j][mask] = parent1[mask] 27 | offspring[j][~mask] = parent2[~mask] 28 | 29 | # Perform mutation on the offspring 30 | mask = np.random.random((pop_size, gene_length)) < mutation_rate 31 | offspring[mask] = 1 - offspring[mask] 32 | 33 | # Update the population with the new generation 34 | population = offspring 35 | 36 | # Return the best individual 37 | fitness = np.apply_along_axis(fitness_func, 1, population) 38 | best_individual = population[np.argmax(fitness)] 39 | 40 | return best_individual, np.max(fitness) 41 | 42 | # Example usage 43 | best_individual, max_fitness = genetic_algorithm(pop_size=10, gene_length=5, mutation_rate=0.1, generations=100) 44 | print("Best individual: ", best_individual) 45 | print("Max fitness: ", max_fitness) -------------------------------------------------------------------------------- /firefly/firefly.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy.spatial.distance import euclidean 3 | 4 | def firefly_algorithm(fitness_function, dim, n, alpha=0.2, beta0=1.0, gamma=1.0, theta=1.0, max_iter=100): 5 | """ 6 | Implements the Firefly Algorithm for optimization problems 7 | """ 8 | 9 | # initialize the firefly population 10 | population = np.random.uniform(size=(n, dim)) 11 | 12 | # evaluate the fitness of the initial population 13 | fitness = np.apply_along_axis(fitness_function, 1, population) 14 | 15 | # find the index of the best firefly 16 | best_index = np.argmin(fitness) 17 | 18 | # initialize the best firefly and its fitness 19 | best_firefly = population[best_index] 20 | best_fitness = fitness[best_index] 21 | 22 | # run the algorithm for the specified number of iterations 23 | for i in range(max_iter): 24 | 25 | # update the light intensity of each firefly 26 | for j in range(n): 27 | for k in range(n): 28 | if fitness[k] < fitness[j]: 29 | r = euclidean(population[j], population[k]) 30 | beta = beta0 * np.exp(-gamma * r ** 2) 31 | population[j] += alpha * beta * (population[k] - population[j]) + \ 32 | theta * np.random.normal(size=dim) 33 | population[j] = np.clip(population[j], 0, 1) 34 | 35 | # evaluate the fitness of the updated population 36 | fitness = np.apply_along_axis(fitness_function, 1, population) 37 | 38 | # find the index of the best firefly 39 | best_index = np.argmin(fitness) 40 | 41 | # update the best firefly and its fitness 42 | if fitness[best_index] < best_fitness: 43 | best_firefly = population[best_index] 44 | best_fitness = fitness[best_index] 45 | 46 | return best_firefly -------------------------------------------------------------------------------- /cuckoo/cuckoo.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def levy_flight(beta): 4 | sigma = (np.math.gamma(1 + beta) * np.sin(np.pi * beta / 2) / (np.math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2))) ** (1 / beta) 5 | u = np.random.normal(0, sigma) 6 | v = np.random.normal(0, sigma) 7 | step = u / np.power(np.abs(v), 1 / beta) 8 | return step 9 | 10 | def generate_cuckoos(n, dim): 11 | cuckoos = np.random.uniform(low=-10, high=10, size=(n, dim)) 12 | return cuckoos 13 | 14 | def get_best_nests(nests, fitness): 15 | sorted_fitness = np.argsort(fitness) 16 | best_nests = nests[sorted_fitness][:int(len(nests) * 0.25)] 17 | return best_nests 18 | 19 | def empty_nests(nests, alpha): 20 | n, dim = nests.shape 21 | beta = 3 / 2 22 | sigma = (np.power(np.math.gamma(1 + beta), 2) * np.power(np.sin(np.pi * beta / 2), 2) / (np.power(np.math.gamma((1 + beta) / 2), 2) * beta * np.power(2, (beta - 1)))) ** (1 / (2 * beta)) 23 | u = np.random.normal(0, sigma, size=(n, dim)) 24 | v = np.random.normal(0, 1, size=(n, dim)) 25 | step = u / np.power(np.abs(v), 1 / beta) 26 | step_size = alpha * step * (nests - nests.mean(axis=0)) 27 | new_nests = nests + step_size 28 | return new_nests 29 | 30 | def cuckoo_optimization(n, dim, num_iter): 31 | nests = generate_cuckoos(n, dim) 32 | fitness = np.random.uniform(low=0, high=1, size=n) 33 | best_nests = get_best_nests(nests, fitness) 34 | best_fitness = fitness.min() 35 | for i in range(num_iter): 36 | new_nests = empty_nests(nests, alpha=0.01) 37 | new_fitness = np.random.uniform(low=0, high=1, size=n) 38 | new_best_nests = get_best_nests(new_nests, new_fitness) 39 | for j in range(len(new_best_nests)): 40 | if new_fitness[j] < best_fitness: 41 | idx = np.random.randint(len(best_nests)) 42 | best_nests[idx] = new_best_nests[j] 43 | best_fitness = new_fitness[j] 44 | nests = new_nests 45 | fitness = new_fitness 46 | return best_nests, best_fitness -------------------------------------------------------------------------------- /war-strategy/war-strategy.py: -------------------------------------------------------------------------------- 1 | import random 2 | import copy 3 | 4 | def initialize_population(population_size, num_resources, num_targets): 5 | population = [] 6 | for i in range(population_size): 7 | army = { 8 | "resources": [random.random() for j in range(num_resources)], 9 | "targets": [random.random() for j in range(num_targets)] 10 | } 11 | population.append(army) 12 | return population 13 | 14 | def evaluate_fitness(army, target): 15 | # calculate the fitness of an army based on its ability to achieve its target 16 | # return a fitness value 17 | pass 18 | 19 | def allocate_resources(army, resources): 20 | # allocate resources to an army based on its targets 21 | # return the updated army 22 | pass 23 | 24 | def mutate(army, mutation_rate): 25 | # mutate an army with a given mutation rate 26 | # return the mutated army 27 | pass 28 | 29 | def crossover(army1, army2): 30 | # perform crossover between two armies 31 | # return the offspring 32 | pass 33 | 34 | def select_best_armies(population, num_best): 35 | # select the best armies for reproduction 36 | # return the selected armies 37 | pass 38 | 39 | def war_strategy_optimization(population_size, num_resources, num_targets, num_generations): 40 | population = initialize_population(population_size, num_resources, num_targets) 41 | for i in range(num_generations): 42 | for army in population: 43 | fitness = evaluate_fitness(army, army["targets"]) 44 | army = allocate_resources(army, army["resources"]) 45 | army["fitness"] = fitness 46 | 47 | selected_armies = select_best_armies(population, population_size // 2) 48 | offspring = [] 49 | for i in range(population_size): 50 | parent1 = random.choice(selected_armies) 51 | parent2 = random.choice(selected_armies) 52 | child = crossover(parent1, parent2) 53 | child = mutate(child, mutation_rate) 54 | offspring.append(child) 55 | 56 | population = offspring 57 | best_army = max(population, key=lambda x: x["fitness"]) 58 | return best_army -------------------------------------------------------------------------------- /memetic/memetic.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | # Define the fitness function 4 | def fitness_func(x): 5 | return np.sum(x) 6 | 7 | # Define the local search method 8 | def local_search(x): 9 | y = x.copy() 10 | for i in range(len(x)): 11 | y[i] = 1 - y[i] 12 | if fitness_func(y) > fitness_func(x): 13 | x = y.copy() 14 | else: 15 | y[i] = 1 - y[i] 16 | return x 17 | 18 | # Define the memetic algorithm 19 | def memetic_algorithm(pop_size, gene_length, mutation_rate, generations): 20 | # Initialize the population 21 | population = np.random.randint(0, 2, size=(pop_size, gene_length)) 22 | 23 | # Iterate over the generations 24 | for i in range(generations): 25 | # Evaluate the fitness of each individual in the population 26 | fitness = np.apply_along_axis(fitness_func, 1, population) 27 | 28 | # Select the parents for the next generation 29 | parents = population[np.argsort(-fitness)][:2] 30 | 31 | # Perform crossover to create the offspring 32 | offspring = np.empty((pop_size, gene_length)) 33 | for j in range(pop_size): 34 | parent1 = parents[j % 2] 35 | parent2 = parents[(j + 1) % 2] 36 | mask = np.random.randint(0, 2, size=gene_length, dtype=bool) 37 | offspring[j][mask] = parent1[mask] 38 | offspring[j][~mask] = parent2[~mask] 39 | 40 | # Perform mutation on the offspring 41 | mask = np.random.random((pop_size, gene_length)) < mutation_rate 42 | offspring[mask] = 1 - offspring[mask] 43 | 44 | # Apply local search to the offspring 45 | offspring = np.apply_along_axis(local_search, 1, offspring) 46 | 47 | # Update the population with the new generation 48 | population = offspring 49 | 50 | # Return the best individual 51 | fitness = np.apply_along_axis(fitness_func, 1, population) 52 | best_individual = population[np.argmax(fitness)] 53 | 54 | return best_individual, np.max(fitness) 55 | 56 | # Example usage 57 | best_individual, max_fitness = memetic_algorithm(pop_size=10, gene_length=5, mutation_rate=0.1, generations=100) 58 | print("Best individual: ", best_individual) 59 | print("Max fitness: ", max_fitness) -------------------------------------------------------------------------------- /moth-flame/moth-flame.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | # Define the objective function to be optimized 4 | def sphere(x): 5 | return np.sum(x**2) 6 | 7 | # Set up the MFO algorithm 8 | num_moths = 50 9 | dim = 10 10 | lb = -5.12 11 | ub = 5.12 12 | pa = 0.25 13 | max_iter = 100 14 | 15 | positions = np.random.uniform(lb, ub, size=(num_moths, dim)) 16 | fitness = np.array([sphere(pos) for pos in positions]) 17 | 18 | # Run the MFO algorithm to optimize the objective function 19 | best_fitness = np.inf 20 | best_position = None 21 | 22 | for i in range(max_iter): 23 | # Update the brightness of each moth 24 | brightness = 1 / (1 + fitness) 25 | 26 | # Calculate the distance to the flame 27 | distances = np.sqrt(np.sum((positions - positions.mean(axis=0))**2, axis=1)) 28 | 29 | # Update the position of each moth 30 | for j in range(num_moths): 31 | # Calculate the attraction to the flame 32 | flame_distance = distances[j] 33 | flame_attraction = pa * np.exp(-flame_distance) * (positions.mean(axis=0) - positions[j]) 34 | 35 | # Calculate the attraction to other moths 36 | moth_indices = np.arange(num_moths) 37 | moth_indices = np.delete(moth_indices, j) 38 | moth_distances = distances[moth_indices] 39 | brightest_moth_index = np.argmax(brightness[moth_indices]) 40 | brightest_moth_distance = moth_distances[brightest_moth_index] 41 | brightest_moth_position = positions[moth_indices[brightest_moth_index]] 42 | moth_attraction = pa * np.exp(-brightest_moth_distance) * (brightest_moth_position - positions[j]) 43 | 44 | # Update the position of the moth 45 | movement = flame_attraction + moth_attraction 46 | positions[j] += movement 47 | 48 | # Clip the position to the search space 49 | positions[j] = np.clip(positions[j], lb, ub) 50 | 51 | # Update the fitness of the moth 52 | fitness[j] = sphere(positions[j]) 53 | 54 | # Update the best solution found so far 55 | if fitness[j] < best_fitness: 56 | best_fitness = fitness[j] 57 | best_position = positions[j] 58 | 59 | # Print the current best solution 60 | print("Iteration", i+1, "Best cost:", best_fitness) 61 | 62 | # Print the final best solution 63 | print("Final best cost:", best_fitness) 64 | print("Final best position:", best_position) -------------------------------------------------------------------------------- /artificial-bee-colony/artificial-bee-colony.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def abc_algorithm(objective_function, lb, ub, colony_size=10, max_iter=100): 4 | dim = len(lb) 5 | best_solution = None 6 | best_fitness = np.inf 7 | 8 | # initialize population 9 | population = np.zeros((colony_size, dim)) 10 | for i in range(colony_size): 11 | population[i] = np.random.uniform(lb, ub) 12 | 13 | # evaluate population fitness 14 | fitness = np.zeros(colony_size) 15 | for i in range(colony_size): 16 | fitness[i] = objective_function(population[i]) 17 | 18 | # main loop 19 | for it in range(max_iter): 20 | # employed bee phase 21 | for i in range(colony_size): 22 | phi = np.random.uniform(low=-1, high=1, size=dim) 23 | new_solution = population[i] + phi * (population[np.random.randint(colony_size)] - population[i]) 24 | new_solution = np.clip(new_solution, lb, ub) 25 | new_fitness = objective_function(new_solution) 26 | if new_fitness < fitness[i]: 27 | population[i] = new_solution 28 | fitness[i] = new_fitness 29 | 30 | # onlooker bee phase 31 | total_fitness = np.sum(fitness) 32 | probabilities = fitness / total_fitness 33 | for i in range(colony_size): 34 | if np.random.uniform() < probabilities[i]: 35 | phi = np.random.uniform(low=-1, high=1, size=dim) 36 | new_solution = population[i] + phi * (population[np.random.randint(colony_size)] - population[i]) 37 | new_solution = np.clip(new_solution, lb, ub) 38 | new_fitness = objective_function(new_solution) 39 | if new_fitness < fitness[i]: 40 | population[i] = new_solution 41 | fitness[i] = new_fitness 42 | 43 | # scout bee phase 44 | for i in range(colony_size): 45 | if np.random.uniform() < 0.1: 46 | population[i] = np.random.uniform(lb, ub) 47 | fitness[i] = objective_function(population[i]) 48 | 49 | # update best solution 50 | index = np.argmin(fitness) 51 | if fitness[index] < best_fitness: 52 | best_fitness = fitness[index] 53 | best_solution = population[index] 54 | 55 | print("Iteration {}: Best Fitness = {}".format(it, best_fitness)) 56 | 57 | return best_solution, best_fitness -------------------------------------------------------------------------------- /grey-wolf/grey-wolf.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | # Define the objective function 4 | def objective(x): 5 | return np.sum(x**2) 6 | 7 | # Define the Grey Wolf Optimizer function 8 | def grey_wolf_optimizer(obj_func, lb, ub, dim, search_agents=10, max_iter=100): 9 | # Initialize alpha, beta, and delta positions 10 | alpha_pos = np.zeros(dim) 11 | alpha_score = float("inf") 12 | beta_pos = np.zeros(dim) 13 | beta_score = float("inf") 14 | delta_pos = np.zeros(dim) 15 | delta_score = float("inf") 16 | 17 | # Initialize the search agents 18 | positions = np.zeros((search_agents, dim)) 19 | for i in range(search_agents): 20 | positions[i] = np.random.uniform(lb, ub, dim) 21 | 22 | # Main loop 23 | for iteration in range(max_iter): 24 | # Update alpha, beta, and delta positions 25 | for i in range(search_agents): 26 | # Evaluate the objective function for the current position 27 | fitness = obj_func(positions[i]) 28 | 29 | if fitness < alpha_score: 30 | alpha_score = fitness 31 | alpha_pos = positions[i] 32 | elif (fitness > alpha_score and fitness < beta_score): 33 | beta_score = fitness 34 | beta_pos = positions[i] 35 | elif (fitness > alpha_score and fitness > beta_score and fitness < delta_score): 36 | delta_score = fitness 37 | delta_pos = positions[i] 38 | 39 | # Update the search agents 40 | a = 2 - iteration * (2 / max_iter) 41 | for i in range(search_agents): 42 | r1 = np.random.random() 43 | r2 = np.random.random() 44 | A1 = 2 * a * r1 - a 45 | C1 = 2 * r2 46 | D_alpha = np.abs(C1 * alpha_pos - positions[i]) 47 | X1 = alpha_pos - A1 * D_alpha 48 | 49 | r1 = np.random.random() 50 | r2 = np.random.random() 51 | A2 = 2 * a * r1 - a 52 | C2 = 2 * r2 53 | D_beta = np.abs(C2 * beta_pos - positions[i]) 54 | X2 = beta_pos - A2 * D_beta 55 | 56 | r1 = np.random.random() 57 | r2 = np.random.random() 58 | A3 = 2 * a * r1 - a 59 | C3 = 2 * r2 60 | D_delta = np.abs(C3 * delta_pos - positions[i]) 61 | X3 = delta_pos - A3 * D_delta 62 | 63 | positions[i] = (X1 + X2 + X3) / 3 64 | 65 | return alpha_pos, alpha_score -------------------------------------------------------------------------------- /rat-swarm/rat-swarm.py: -------------------------------------------------------------------------------- 1 | import random 2 | import math 3 | 4 | def distance(x1, y1, x2, y2): 5 | return math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2) 6 | 7 | class RSO: 8 | def __init__(self, num_rats, num_iterations, search_space, objective_function): 9 | self.num_rats = num_rats 10 | self.num_iterations = num_iterations 11 | self.search_space = search_space 12 | self.objective_function = objective_function 13 | self.best_solution = None 14 | self.best_fitness = float('inf') 15 | self.rats = self.initialize_rats() 16 | 17 | def initialize_rats(self): 18 | rats = [] 19 | for i in range(self.num_rats): 20 | x = random.uniform(self.search_space[0], self.search_space[1]) 21 | y = random.uniform(self.search_space[0], self.search_space[1]) 22 | rats.append({'x': x, 'y': y}) 23 | return rats 24 | 25 | def run(self): 26 | for i in range(self.num_iterations): 27 | for rat in self.rats: 28 | rat['fitness'] = self.objective_function(rat['x'], rat['y']) 29 | if rat['fitness'] < self.best_fitness: 30 | self.best_fitness = rat['fitness'] 31 | self.best_solution = rat.copy() 32 | 33 | for rat in self.rats: 34 | other_rats = [r for r in self.rats if r != rat] 35 | random_rat = random.choice(other_rats) 36 | if rat['fitness'] < random_rat['fitness']: 37 | distance_to_random_rat = distance(rat['x'], rat['y'], random_rat['x'], random_rat['y']) 38 | dx = (random_rat['x'] - rat['x']) / distance_to_random_rat 39 | dy = (random_rat['y'] - rat['y']) / distance_to_random_rat 40 | step_size = random.uniform(0, 1) * distance_to_random_rat 41 | new_x = rat['x'] + step_size * dx 42 | new_y = rat['y'] + step_size * dy 43 | if self.search_space[0] <= new_x <= self.search_space[1] and self.search_space[0] <= new_y <= self.search_space[1]: 44 | rat['x'] = new_x 45 | rat['y'] = new_y 46 | 47 | def print_result(self): 48 | print(f"Best solution: {self.best_solution}") 49 | print(f"Best fitness: {self.best_fitness}") 50 | 51 | def objective_function(x, y): 52 | return (x - 1) ** 2 + (y - 2) ** 2 53 | 54 | rso = RSO(num_rats=10, num_iterations=100, search_space=(-10, 10), objective_function=objective_function) 55 | rso.run() 56 | rso.print_result() -------------------------------------------------------------------------------- /crow-search/crow-search.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | # Define the problem to be optimized 4 | def objective_function(x): 5 | return np.sum(np.square(x)) 6 | 7 | # Define the Crow Search Algorithm function 8 | def crow_search_algorithm(objective_function, lb, ub, max_iter, num_crows): 9 | # Initialize the population of crows randomly in the search space 10 | crows = np.random.uniform(lb, ub, (num_crows, len(lb))) 11 | 12 | # Initialize the best position and fitness 13 | best_position = np.zeros(len(lb)) 14 | best_fitness = np.inf 15 | 16 | # Evaluate the fitness of each crow in the population 17 | fitness = np.zeros(num_crows) 18 | for i in range(num_crows): 19 | fitness[i] = objective_function(crows[i]) 20 | if fitness[i] < best_fitness: 21 | best_fitness = fitness[i] 22 | best_position = crows[i].copy() 23 | 24 | # Iterate until a termination criterion is met 25 | for t in range(max_iter): 26 | # Generate a new population of crows 27 | new_crows = np.zeros((num_crows, len(lb))) 28 | for i in range(num_crows): 29 | # Determine the best crow in the population 30 | best_crow = np.argmin(fitness) 31 | 32 | # Generate a new position for the current crow using the best crow 33 | new_position = np.zeros(len(lb)) 34 | for j in range(len(lb)): 35 | a = np.random.uniform(-1, 1) 36 | b = np.random.uniform(-1, 1) 37 | new_position[j] = crows[i][j] + a * (crows[i][j] - crows[best_crow][j]) + b * (best_position[j] - crows[i][j]) 38 | if new_position[j] < lb[j]: 39 | new_position[j] = lb[j] 40 | elif new_position[j] > ub[j]: 41 | new_position[j] = ub[j] 42 | 43 | # Evaluate the fitness of the new position 44 | new_fitness = objective_function(new_position) 45 | 46 | # Update the best position and fitness 47 | if new_fitness < best_fitness: 48 | best_fitness = new_fitness 49 | best_position = new_position.copy() 50 | 51 | # Add the new crow to the new population 52 | new_crows[i] = new_position 53 | 54 | # Replace the worst crows in the current population with the best crows from the new population 55 | for i in range(num_crows): 56 | worst_crow = np.argmax(fitness) 57 | if fitness[i] > fitness[worst_crow]: 58 | crows[worst_crow] = new_crows[i] 59 | fitness[worst_crow] = fitness[i] 60 | 61 | return best_position, best_fitness -------------------------------------------------------------------------------- /flower-pollination/flower-pollination.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def objective_function(x): 4 | """Objective function to be optimized.""" 5 | return np.sum(np.square(x)) 6 | 7 | def levy_flight(beta): 8 | """Generate a random step using the Levy flight distribution.""" 9 | sigma = (np.math.gamma(1 + beta) * np.sin(np.pi * beta / 2) / 10 | (np.math.gamma((1 + beta) / 2) * beta * np.power(2, (beta - 1) / 2))) 11 | u = np.random.normal(0, sigma) 12 | v = np.random.normal(0, 1) 13 | step = u / np.power(np.abs(v), 1 / beta) 14 | return step 15 | 16 | def initialize_flower_population(num_flowers, num_dimensions, domain_bounds): 17 | """Initialize the flower population randomly within the search space.""" 18 | flowers = [] 19 | for i in range(num_flowers): 20 | flower = np.random.uniform(low=domain_bounds[0], high=domain_bounds[1], size=num_dimensions) 21 | flowers.append(flower) 22 | return flowers 23 | 24 | def flower_pollination_algorithm(objective_function, num_flowers, num_iterations, num_dimensions, domain_bounds): 25 | """Optimize the objective function using the Flower Pollination Algorithm.""" 26 | flowers = initialize_flower_population(num_flowers, num_dimensions, domain_bounds) 27 | fbest = np.inf 28 | best_flower = None 29 | for i in range(num_iterations): 30 | # Calculate the fitness of each flower 31 | fitness = np.array([objective_function(flower) for flower in flowers]) 32 | # Find the best flower 33 | index = np.argmin(fitness) 34 | if fitness[index] < fbest: 35 | fbest = fitness[index] 36 | best_flower = flowers[index] 37 | # Generate a new flower population using the flower reproduction and 38 | # pollination mechanisms 39 | new_flowers = [] 40 | for j in range(num_flowers): 41 | # Randomly select a flower to use for reproduction 42 | k = np.random.randint(num_flowers) 43 | # Generate a new flower using the reproduction mechanism 44 | new_flower = flowers[j] + np.random.uniform(low=-1, high=1, size=num_dimensions) * \ 45 | (flowers[j] - flowers[k]) 46 | # Apply the pollination mechanism to the new flower 47 | beta = 3 / 2 # Levy flight parameter 48 | step_size = levy_flight(beta) 49 | new_flower += step_size * (new_flower - best_flower) 50 | # Ensure the new flower is within the search space bounds 51 | new_flower = np.clip(new_flower, domain_bounds[0], domain_bounds[1]) 52 | new_flowers.append(new_flower) 53 | flowers = new_flowers 54 | return fbest, best_flower -------------------------------------------------------------------------------- /imperialist-competitive/imperialist-competitive.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def imperialist_competitive_algorithm(objective_func, bounds, n_countries=10, n_iterations=1000, revolution_rate=0.1, assimilation_rate=0.1): 4 | # Initialize the population of countries with random solutions 5 | n_variables = len(bounds) 6 | countries = np.zeros((n_countries, n_variables)) 7 | for i in range(n_countries): 8 | for j in range(n_variables): 9 | countries[i,j] = np.random.uniform(bounds[j][0], bounds[j][1]) 10 | 11 | for iteration in range(n_iterations): 12 | # Evaluate the fitness of each country 13 | fitness = np.array([objective_func(country) for country in countries]) 14 | imperialist_index = np.argmax(fitness) 15 | 16 | # Find the imperialist 17 | imperialist = countries[imperialist_index] 18 | imperialist_fitness = fitness[imperialist_index] 19 | 20 | # For each colony, calculate its normalized distance from the imperialist 21 | distances = np.zeros(n_countries) 22 | for i in range(n_countries): 23 | distances[i] = np.linalg.norm(countries[i] - imperialist) / np.linalg.norm(bounds) 24 | 25 | # Calculate the probability of revolution for each colony 26 | p = revolution_rate * (1 - distances) 27 | p[imperialist_index] = 0 28 | p = p / np.sum(p) 29 | 30 | # If a colony initiates a revolution, replace the imperialist with the colony and reset the hierarchy 31 | for i in range(n_countries): 32 | if np.random.rand() < p[i]: 33 | countries[imperialist_index] = countries[i] 34 | fitness[imperialist_index] = fitness[i] 35 | break 36 | 37 | # If no revolution occurs, redistribute some of the imperialist's resources to the weaker colonies 38 | if np.random.rand() > assimilation_rate: 39 | for i in range(n_countries): 40 | if i != imperialist_index: 41 | delta = np.random.uniform(0, 1) * (imperialist - countries[i]) 42 | countries[i] += delta 43 | 44 | # Evaluate the fitness of each colony and update the population 45 | for i in range(n_countries): 46 | if i != imperialist_index: 47 | fitness[i] = objective_func(countries[i]) 48 | 49 | # Print the best solution found so far 50 | best_fitness = np.max(fitness) 51 | best_solution = countries[np.argmax(fitness)] 52 | print("Iteration {}: Best fitness = {}".format(iteration, best_fitness)) 53 | 54 | return best_solution, best_fitness -------------------------------------------------------------------------------- /ant-colony/ant-colony.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | class AntColony: 4 | def __init__(self, dist, num_ants, num_iterations, decay, alpha=1, beta=2): 5 | self.dist = dist 6 | self.num_ants = num_ants 7 | self.num_iterations = num_iterations 8 | self.decay = decay 9 | self.alpha = alpha 10 | self.beta = beta 11 | self.pheromone = np.ones_like(dist) / len(dist) 12 | self.best_path = None 13 | self.best_dist = np.inf 14 | 15 | def run(self): 16 | for i in range(self.num_iterations): 17 | paths = self.generate_paths() 18 | self.update_pheromone(paths) 19 | best_path, best_dist = self.get_best_path(paths) 20 | if best_dist < self.best_dist: 21 | self.best_path = best_path 22 | self.best_dist = best_dist 23 | self.pheromone = self.pheromone * self.decay 24 | 25 | def generate_paths(self): 26 | paths = [] 27 | for ant in range(self.num_ants): 28 | path = [] 29 | visited = set() 30 | node = np.random.choice(len(self.dist)) 31 | path.append(node) 32 | visited.add(node) 33 | while len(visited) < len(self.dist): 34 | probs = self.get_probabilities(node, visited) 35 | node = np.random.choice(len(self.dist), p=probs) 36 | path.append(node) 37 | visited.add(node) 38 | paths.append(path) 39 | return paths 40 | 41 | def get_probabilities(self, node, visited): 42 | pheromone = np.copy(self.pheromone[node]) 43 | pheromone[list(visited)] = 0 44 | if np.sum(pheromone) == 0: 45 | return np.ones_like(pheromone) / len(pheromone) 46 | else: 47 | dist = self.dist[node] 48 | return (pheromone ** self.alpha) * ((1 / dist) ** self.beta) / np.sum((pheromone ** self.alpha) * ((1 / dist) ** self.beta)) 49 | 50 | def update_pheromone(self, paths): 51 | pheromone_delta = np.zeros_like(self.pheromone) 52 | for path in paths: 53 | for i in range(len(path) - 1): 54 | pheromone_delta[path[i], path[i+1]] += 1 / self.dist[path[i], path[i+1]] 55 | self.pheromone = self.pheromone + pheromone_delta 56 | 57 | def get_best_path(self, paths): 58 | best_path = None 59 | best_dist = np.inf 60 | for path in paths: 61 | dist = 0 62 | for i in range(len(path) - 1): 63 | dist += self.dist[path[i], path[i+1]] 64 | if dist < best_dist: 65 | best_path = path 66 | best_dist = dist 67 | return best_path, best_dist -------------------------------------------------------------------------------- /harmony-search/harmony-search.py: -------------------------------------------------------------------------------- 1 | import random 2 | import numpy as np 3 | 4 | class HarmonySearch: 5 | def __init__(self, obj_function, lb, ub, num_harmony=10, max_iter=1000, bw=0.01, hmcr=0.9, par=0.4): 6 | self.obj_function = obj_function 7 | self.lb = np.array(lb) 8 | self.ub = np.array(ub) 9 | self.num_harmony = num_harmony 10 | self.max_iter = max_iter 11 | self.bw = bw 12 | self.hmcr = hmcr 13 | self.par = par 14 | 15 | def optimize(self): 16 | dim = len(self.lb) 17 | # Initialize harmonies 18 | harmonies = np.zeros((self.num_harmony, dim)) 19 | for i in range(self.num_harmony): 20 | harmonies[i] = self.lb + (self.ub - self.lb) * np.random.rand(dim) 21 | 22 | # Evaluate harmonies 23 | obj_vals = np.array([self.obj_function(h) for h in harmonies]) 24 | 25 | # Sort harmonies by objective value 26 | sorted_idx = np.argsort(obj_vals) 27 | harmonies = harmonies[sorted_idx] 28 | obj_vals = obj_vals[sorted_idx] 29 | 30 | # Search for new harmonies 31 | for t in range(1, self.max_iter+1): 32 | new_harmonies = np.zeros((self.num_harmony, dim)) 33 | for i in range(self.num_harmony): 34 | if np.random.rand() < self.hmcr: # Memory consideration 35 | idx = np.random.choice(self.num_harmony, 1)[0] 36 | new_harmonies[i] = harmonies[idx] 37 | if np.random.rand() < self.par: # Pitch adjustment 38 | for j in range(dim): 39 | if np.random.rand() < 0.5: 40 | new_harmonies[i][j] += np.random.rand() * self.bw 41 | else: 42 | new_harmonies[i][j] -= np.random.rand() * self.bw 43 | else: # Random selection 44 | new_harmonies[i] = self.lb + (self.ub - self.lb) * np.random.rand(dim) 45 | 46 | # Evaluate new harmonies 47 | new_obj_vals = np.array([self.obj_function(h) for h in new_harmonies]) 48 | 49 | # Sort new harmonies by objective value 50 | all_harmonies = np.concatenate((harmonies, new_harmonies), axis=0) 51 | all_obj_vals = np.concatenate((obj_vals, new_obj_vals), axis=0) 52 | sorted_idx = np.argsort(all_obj_vals) 53 | all_harmonies = all_harmonies[sorted_idx] 54 | all_obj_vals = all_obj_vals[sorted_idx] 55 | 56 | # Keep top num_harmony harmonies 57 | harmonies = all_harmonies[:self.num_harmony] 58 | obj_vals = all_obj_vals[:self.num_harmony] 59 | 60 | # Return best harmony and its objective value 61 | return harmonies[0], obj_vals[0] -------------------------------------------------------------------------------- /red-deer/red-deer.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import math 3 | 4 | # Define the objective function 5 | def obj_func(x): 6 | return sum(x**2) 7 | 8 | # Define the Red Deer Algorithm 9 | def red_deer_algorithm(obj_func, lb, ub, max_iter, pop_size, num_red_deer, num_stags): 10 | # Initialization 11 | num_dim = len(lb) 12 | positions = np.zeros((pop_size, num_dim)) 13 | fitness = np.zeros(pop_size) 14 | for i in range(pop_size): 15 | positions[i] = lb + (ub - lb) * np.random.rand(num_dim) 16 | fitness[i] = obj_func(positions[i]) 17 | 18 | # Main loop 19 | for t in range(max_iter): 20 | # Sort the positions and fitness values 21 | sorted_indices = np.argsort(fitness) 22 | positions = positions[sorted_indices] 23 | fitness = fitness[sorted_indices] 24 | 25 | # Red deer phase 26 | for i in range(num_red_deer): 27 | # Select a random deer 28 | r = np.random.randint(num_stags, pop_size) 29 | 30 | # Move towards the centroid of the stags 31 | centroid = np.mean(positions[:num_stags], axis=0) 32 | direction = centroid - positions[r] 33 | distance = np.linalg.norm(direction) 34 | if distance > 0: 35 | direction /= distance 36 | step_size = np.random.normal(0, 1) * np.exp(-t / max_iter) 37 | new_position = positions[r] + step_size * direction 38 | 39 | # Clip the position to the search space 40 | new_position = np.clip(new_position, lb, ub) 41 | 42 | # Evaluate the new position 43 | new_fitness = obj_func(new_position) 44 | 45 | # Update the position and fitness if the new position is better 46 | if new_fitness < fitness[r]: 47 | positions[r] = new_position 48 | fitness[r] = new_fitness 49 | 50 | # Stag phase 51 | for i in range(num_stags, pop_size): 52 | # Select two stags 53 | s1, s2 = np.random.choice(num_stags, size=2, replace=False) 54 | 55 | # Compute the new position 56 | new_position = positions[i] + np.random.uniform() * (positions[s1] - positions[s2]) 57 | 58 | # Clip the position to the search space 59 | new_position = np.clip(new_position, lb, ub) 60 | 61 | # Evaluate the new position 62 | new_fitness = obj_func(new_position) 63 | 64 | # Update the position and fitness if the new position is better 65 | if new_fitness < fitness[i]: 66 | positions[i] = new_position 67 | fitness[i] = new_fitness 68 | 69 | # Return the best position and fitness 70 | best_index = np.argmin(fitness) 71 | best_position = positions[best_index] 72 | best_fitness = fitness[best_index] 73 | return best_position, best_fitness -------------------------------------------------------------------------------- /bat/bat.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def init_bats(num_bats, num_dimensions, lb, ub): 4 | """Initialize the bats with random positions and velocities""" 5 | bats = np.zeros((num_bats, num_dimensions + 2)) # create an array to hold the bats 6 | bats[:, :num_dimensions] = np.random.uniform(lb, ub, (num_bats, num_dimensions)) # initialize positions 7 | bats[:, num_dimensions] = np.random.uniform(0, 1, num_bats) # initialize velocities 8 | bats[:, num_dimensions+1] = np.inf # set initial fitness to infinity 9 | return bats 10 | 11 | def simple_bounds(pos, lb, ub): 12 | """Apply simple bounds to the bat's position""" 13 | pos[pos < lb] = lb[pos < lb] 14 | pos[pos > ub] = ub[pos > ub] 15 | return pos 16 | 17 | def update_velocity(bat, global_best, A, r, alpha, gamma): 18 | """Update the bat's velocity""" 19 | bat_velocity = bat[-2] 20 | bat_position = bat[:-2] 21 | rand_vect = np.random.uniform(0, 1, len(bat_position)) 22 | velocity = bat_velocity + (bat_position - global_best) * A 23 | velocity += alpha * (rand_vect - 0.5) 24 | velocity *= gamma 25 | return velocity 26 | 27 | def update_position(position, velocity): 28 | """Update the bat's position""" 29 | position += velocity 30 | return position 31 | 32 | def update_pulse_rate(bat, f_min, f_max, r): 33 | """Update the pulse rate of the bat""" 34 | pulse_rate = f_min + (f_max - f_min) * r 35 | return pulse_rate 36 | 37 | def update_loudness(loudness, alpha): 38 | """Update the loudness of the bat""" 39 | loudness *= alpha 40 | return loudness 41 | 42 | def update_fitness(bat, func): 43 | """Update the fitness of the bat""" 44 | fitness = func(bat[:-2]) 45 | if fitness < bat[-1]: 46 | bat[-1] = fitness 47 | return bat 48 | 49 | def bat_algorithm(func, num_bats, num_iterations, num_dimensions, lb, ub, A, f_min, f_max, alpha, gamma): 50 | """Bat algorithm for minimizing a function""" 51 | bats = init_bats(num_bats, num_dimensions, lb, ub) 52 | global_best = np.zeros(num_dimensions) 53 | global_best_fitness = np.inf 54 | for i in range(num_iterations): 55 | for j in range(num_bats): 56 | bat = bats[j] 57 | frequency = f_min + (f_max - f_min) * np.random.uniform(0, 1) 58 | velocity = update_velocity(bat, global_best, A, frequency, alpha, gamma) 59 | position = update_position(bat[:-2], velocity) 60 | position = simple_bounds(position, lb, ub) 61 | bat[:-2] = position 62 | bat[-2] = frequency 63 | bat = update_fitness(bat, func) 64 | if bat[-1] < global_best_fitness: 65 | global_best = bat[:-2] 66 | global_best_fitness = bat[-1] 67 | bats[j] = bat 68 | A *= gamma 69 | alpha *= 0.9 # damping factor for loudness 70 | return global_best, global_best_fitness -------------------------------------------------------------------------------- /culture/culture.py: -------------------------------------------------------------------------------- 1 | import random 2 | import math 3 | 4 | # Define the function to be optimized 5 | def func(x): 6 | return -x * math.sin(math.sqrt(abs(x))) 7 | 8 | # Define the parameters of the algorithm 9 | pop_size = 50 10 | num_subcultures = 5 11 | max_iterations = 100 12 | p_mig = 0.1 13 | p_mut = 0.1 14 | 15 | # Initialize the population and subcultures 16 | pop = [[random.uniform(-10, 10)] for i in range(pop_size)] 17 | subcultures = [[] for i in range(num_subcultures)] 18 | for i in range(pop_size): 19 | subcultures[i % num_subcultures].append(pop[i]) 20 | 21 | # Main loop of the algorithm 22 | for iteration in range(max_iterations): 23 | # Evaluate the fitness of each individual in each subculture 24 | fitness = [[func(individual[0])] for individual in pop] 25 | sub_fitness = [[] for i in range(num_subcultures)] 26 | for i in range(pop_size): 27 | sub_fitness[i % num_subcultures].append(fitness[i]) 28 | 29 | # Allow individuals in a subculture to interact and share their knowledge 30 | for i in range(num_subcultures): 31 | for j in range(len(subcultures[i])): 32 | for k in range(j+1, len(subcultures[i])): 33 | if sub_fitness[i][j][0] < sub_fitness[i][k][0]: 34 | subcultures[i][j] = subcultures[i][k] 35 | 36 | # Allow individuals to migrate between subcultures 37 | for i in range(pop_size): 38 | if random.random() < p_mig: 39 | source = i % num_subcultures 40 | target = (i + random.randint(1, num_subcultures-1)) % num_subcultures 41 | if sub_fitness[target] and fitness[i][0] > max(sub_fitness[target])[0]: 42 | subcultures[source].remove(pop[i]) 43 | subcultures[target].append(pop[i]) 44 | 45 | # Generate new individuals through crossover and mutation 46 | new_pop = [] 47 | for i in range(pop_size): 48 | subculture = subcultures[i % num_subcultures] 49 | parent1 = random.choice(subculture) 50 | parent2 = random.choice(subculture) 51 | child = [parent1[0] + random.uniform(-1, 1) * (parent1[0] - parent2[0])] 52 | if random.random() < p_mut: 53 | child[0] += random.gauss(0, 1) 54 | new_pop.append(child) 55 | 56 | # Evaluate the fitness of the new individuals 57 | new_fitness = [func(individual[0]) for individual in new_pop] 58 | 59 | # Select the best individuals in each subculture to form the new generation 60 | new_subcultures = [[] for i in range(num_subcultures)] 61 | for i in range(pop_size): 62 | new_subcultures[i % num_subcultures].append([new_pop[i]]) 63 | 64 | for i in range(num_subcultures): 65 | new_subcultures[i].sort(key=lambda x: func(x[0]), reverse=True) 66 | pop[i::num_subcultures] = new_subcultures[i][:pop_size//num_subcultures] 67 | 68 | # Print the best solution found 69 | best_solution = max(pop, key=lambda x: func(x[0])) 70 | print("Best solution:", best_solution[0]) -------------------------------------------------------------------------------- /pelican/pelican.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | class PelicanOptimizer: 4 | def __init__(self, obj_func, lb, ub, num_pelicans=30, max_iter=500, alpha=0.5, beta=1.5, gamma=0.1): 5 | self.obj_func = obj_func 6 | self.lb = lb 7 | self.ub = ub 8 | self.num_vars = len(lb) 9 | self.num_pelicans = num_pelicans 10 | self.max_iter = max_iter 11 | self.alpha = alpha 12 | self.beta = beta 13 | self.gamma = gamma 14 | self.best_solution = None 15 | self.best_fitness = np.inf 16 | 17 | def optimize(self): 18 | # Initialization 19 | positions = np.random.uniform(self.lb, self.ub, size=(self.num_pelicans, self.num_vars)) 20 | fitness = np.array([self.obj_func(p) for p in positions]) 21 | best_pelican_index = np.argmin(fitness) 22 | self.best_solution = positions[best_pelican_index] 23 | self.best_fitness = fitness[best_pelican_index] 24 | 25 | # Main loop 26 | for t in range(self.max_iter): 27 | # Update the leader pelican 28 | if t % 10 == 0: 29 | best_pelican_index = np.argmin(fitness) 30 | if fitness[best_pelican_index] < self.best_fitness: 31 | self.best_solution = positions[best_pelican_index] 32 | self.best_fitness = fitness[best_pelican_index] 33 | 34 | # Move the pelicans 35 | for i in range(self.num_pelicans): 36 | # Determine the leaders 37 | r1 = np.random.randint(self.num_pelicans) 38 | r2 = np.random.randint(self.num_pelicans) 39 | leader1 = positions[r1] 40 | leader2 = positions[r2] 41 | if fitness[r1] < fitness[r2]: 42 | leader = leader1 43 | else: 44 | leader = leader2 45 | 46 | # Update the position of the pelican 47 | r = np.random.uniform() 48 | if r < self.alpha: 49 | # Move towards the leader 50 | direction = leader - positions[i] 51 | positions[i] += np.random.uniform() * direction 52 | elif r < self.beta: 53 | # Move randomly 54 | positions[i] += np.random.uniform(-1, 1, size=self.num_vars) * (self.ub - self.lb) * self.gamma 55 | else: 56 | # Move towards the best pelican 57 | direction = self.best_solution - positions[i] 58 | positions[i] += np.random.uniform() * direction 59 | 60 | # Apply bounds 61 | positions[i] = np.clip(positions[i], self.lb, self.ub) 62 | 63 | # Update the fitness 64 | fitness[i] = self.obj_func(positions[i]) 65 | 66 | return self.best_solution, self.best_fitness -------------------------------------------------------------------------------- /lion-optimization/lion-optimization.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | class LionOptimizationAlgorithm: 4 | def __init__(self, objective_function, lb, ub, dimension, population_size, p=0.5, ub_init=2.0): 5 | self.objective_function = objective_function 6 | self.lb = lb 7 | self.ub = ub 8 | self.dimension = dimension 9 | self.population_size = population_size 10 | self.p = p 11 | self.ub_init = ub_init 12 | 13 | def optimize(self, max_iter): 14 | # initialize the population 15 | positions = np.random.uniform(low=self.lb, high=self.ub, size=(self.population_size, self.dimension)) 16 | ub_matrix = np.tile(self.ub_init * (self.ub - self.lb), (self.population_size, self.dimension)) 17 | fitness = np.apply_along_axis(self.objective_function, 1, positions) 18 | best_fitness_idx = np.argmin(fitness) 19 | best_fitness = fitness[best_fitness_idx] 20 | best_position = positions[best_fitness_idx].copy() 21 | 22 | for i in range(max_iter): 23 | # sort the population by fitness 24 | sorted_idx = np.argsort(fitness) 25 | positions = positions[sorted_idx] 26 | fitness = fitness[sorted_idx] 27 | 28 | # divide the population into two groups: lions and cubs 29 | n_lions = int(self.p * self.population_size) 30 | lions = positions[:n_lions] 31 | cubs = positions[n_lions:] 32 | 33 | # update the position of the lions 34 | mean_lion = np.mean(lions, axis=0) 35 | new_lions = np.zeros_like(lions) 36 | for j in range(n_lions): 37 | r = np.random.uniform(size=self.dimension) 38 | new_lions[j] = lions[j] + r * (mean_lion - lions[j]) 39 | new_lions[j] = np.clip(new_lions[j], self.lb, self.ub) 40 | new_lions_fitness = np.apply_along_axis(self.objective_function, 1, new_lions) 41 | lions_mask = new_lions_fitness < fitness[:n_lions] 42 | lions[lions_mask] = new_lions[lions_mask] 43 | fitness[:n_lions] = new_lions_fitness[lions_mask] 44 | 45 | # update the position of the cubs 46 | for j in range(n_lions, self.population_size): 47 | r = np.random.uniform(size=self.dimension) 48 | a = 2 * r - 1 49 | b = np.abs(a * lions[np.random.randint(n_lions)] - cubs[j]) 50 | new_cub = cubs[j] + a * b 51 | new_cub = np.clip(new_cub, self.lb, self.ub) 52 | new_cub_fitness = self.objective_function(new_cub) 53 | if new_cub_fitness < fitness[j]: 54 | cubs[j] = new_cub 55 | fitness[j] = new_cub_fitness 56 | 57 | # update the global best position 58 | if fitness.min() < best_fitness: 59 | best_fitness_idx = np.argmin(fitness) 60 | best_fitness = fitness[best_fitness_idx] 61 | best_position = positions[best_fitness_idx].copy() 62 | 63 | return best_position, -------------------------------------------------------------------------------- /harris-hawks/harris-hawks.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def hho(objective_func, lb, ub, dim, search_agents=50, max_iter=100): 4 | # Initialize the population of hawks 5 | x = np.random.uniform(lb, ub, (search_agents, dim)) 6 | 7 | # Initialize the fitness of each hawk 8 | fitness = np.zeros(search_agents) 9 | for i in range(search_agents): 10 | fitness[i] = objective_func(x[i, :]) 11 | 12 | # Find the index of the imperialist hawk (the one with the best fitness) 13 | imp = np.argmin(fitness) 14 | 15 | # Initialize the position of the imperialist hawk 16 | imp_pos = np.copy(x[imp, :]) 17 | 18 | # Initialize the position of the colonies 19 | col_pos = np.copy(x) 20 | 21 | for t in range(max_iter): 22 | # Calculate the fitness of each colony 23 | for i in range(search_agents): 24 | # Determine the current distance between the colony and the imperialist hawk 25 | dist = np.sqrt(np.sum((col_pos[i, :] - imp_pos) ** 2)) 26 | 27 | # Choose a strategy based on the distance 28 | if dist > 0.1: 29 | # Explore the search space 30 | r1 = np.random.uniform(size=dim) 31 | r2 = np.random.uniform(size=dim) 32 | col_pos[i, :] = (col_pos[i, :] + r1 * (imp_pos - dist * r2)) / 2 33 | else: 34 | # Exploit the current best solution 35 | r3 = np.random.uniform(size=dim) 36 | col_pos[i, :] = imp_pos - r3 * (2 * lb + ub) / 3 37 | 38 | # Update the fitness of each colony 39 | for i in range(search_agents): 40 | fitness[i] = objective_func(col_pos[i, :]) 41 | 42 | # Find the index of the best colony 43 | best_col = np.argmin(fitness) 44 | 45 | # Check if the best colony is better than the imperialist hawk 46 | if fitness[best_col] < fitness[imp]: 47 | imp_pos = np.copy(col_pos[best_col, :]) 48 | imp = np.copy(best_col) 49 | 50 | # Update the position of the colonies based on the position of the imperialist hawk 51 | for i in range(search_agents): 52 | if i != imp: 53 | # Calculate the distance between the current colony and the imperialist hawk 54 | dist = np.sqrt(np.sum((col_pos[i, :] - imp_pos) ** 2)) 55 | 56 | # Choose a strategy based on the distance 57 | if dist > 0.1: 58 | # Attack the imperialist hawk 59 | r1 = np.random.uniform(size=dim) 60 | r2 = np.random.uniform(size=dim) 61 | col_pos[i, :] = col_pos[i, :] + r1 * (imp_pos - dist * r2) 62 | else: 63 | # Invade the territory of the imperialist hawk 64 | r3 = np.random.uniform(size=dim) 65 | col_pos[i, :] = imp_pos + r3 * (2 * lb + ub) / 3 66 | 67 | # Update the fitness of each colony 68 | for i in range(search_agents): 69 | fitness[i] = objective_func(col_pos[i, :]) 70 | 71 | # Return the position of the best solution found 72 | return imp_pos --------------------------------------------------------------------------------