├── 0 Gradient Descent.py ├── 0 Newton's Method.py ├── 0 Stochastic Gradient Descent.py ├── 1 Nelder-Mead.py ├── 1 Powell’s Method.py ├── 2 Best-First Search.py ├── 2 Hill Climbing.py ├── 3 Tree-Structured Parzen Estimator.py ├── 4 African Vultures Optimization Algorithm.py ├── 4 Ant Colony Optimization.py ├── 4 Bat Algorithm.py ├── 4 Bees Algorithm.py ├── 4 Biogeography-Based Optimization.py ├── 4 Brain Storm Optimization.py ├── 4 Cuckoo Search.py ├── 4 Differential Evolution.py ├── 4 Evolutionary Strategy.py ├── 4 Firefly Algorithm.py ├── 4 Galaxy Gravity Optimization.py ├── 4 Genetic Algorithm.py ├── 4 Gravitational Search Algorithm.py ├── 4 Gray Wolf Optimizer.py ├── 4 Harmony Search.py ├── 4 Imperialist Competitive Algorithm.py ├── 4 MOEA D.py ├── 4 NSGA II.py ├── 4 Particle Swarm Optimization.py ├── 4 Simulated Annealing.py ├── 4 Tabu Search.py ├── 4 Teaching-Learning-Based Optimization.py ├── 4 Victoria Amazonica Optimization.py ├── 4 Weevil Damage Optimization Algorithm.py ├── 4 Whale Optimization Algorithm.py ├── Algorithms ├── 0 Gradient Descent.py ├── 0 Newton's Method.py ├── 0 Stochastic Gradient Descent.py ├── 1 Nelder-Mead.py ├── 1 Powell’s Method.py ├── 2 Best-First Search.py ├── 2 Hill Climbing.py ├── 3 Tree-Structured Parzen Estimator.py ├── 4 African Vultures Optimization Algorithm.py ├── 4 Ant Colony Optimization.py ├── 4 Bat Algorithm.py ├── 4 Bees Algorithm.py ├── 4 Biogeography-Based Optimization.py ├── 4 Brain Storm Optimization.py ├── 4 Cuckoo Search.py ├── 4 Differential Evolution.py ├── 4 Evolutionary Strategy.py ├── 4 Firefly Algorithm.py ├── 4 Galaxy Gravity Optimization.py ├── 4 Genetic Algorithm.py ├── 4 Gravitational Search Algorithm.py ├── 4 Gray Wolf Optimizer.py ├── 4 Harmony Search.py ├── 4 Imperialist Competitive Algorithm.py ├── 4 MOEA D.py ├── 4 NSGA II.py ├── 4 Particle Swarm Optimization.py ├── 4 Simulated Annealing.py ├── 4 Tabu Search.py ├── 4 Teaching-Learning-Based Optimization.py ├── 4 Victoria Amazonica Optimization.py ├── 4 Weevil Damage Optimization Algorithm.py └── 4 Whale Optimization Algorithm.py ├── Ant Colony Optimization Bin Packing Problem.py ├── Bat Algorithm Feature Selection.py ├── Bees CNN Optimized (weights and biases).py ├── Bees Economic Dispatching.py ├── Biogeography-Based Optimization Minimum Spanning Tree.py ├── Brain Storm Optimization Parallel Machine Scheduling.py ├── Cuckoo Search Traveling Salesman Problem.py ├── Differential Evolution Clustering.py ├── Differential Evolution Protein Structure Prediction.py ├── Firefly Image Segmentation.py ├── Firefly Space-Time Bending.py ├── Genetic Algorithm Evolutionary Art.py ├── Genetic Algorithm Exoplanetary Adaptation.py ├── Grey Wolf Optimizer VAE Optimized (Latent Space).py ├── Harmony Search Regression.py ├── LICENSE ├── Optimization and Metahuristics.pdf ├── Optimization and Metahuristics.pptx ├── Particle Swarm Optimization Evolutionary Art.py ├── Particle Swarm Optimization Evolved Antenna.py ├── Particle Swarm Optimization Image Segmentation.py ├── Problems ├── Ant Colony Optimization Bin Packing Problem.py ├── Bat Algorithm Feature Selection.py ├── Bees CNN Optimized (weights and biases).py ├── Bees Economic Dispatching.py ├── Biogeography-Based Optimization Minimum Spanning Tree.py ├── Brain Storm Optimization Parallel Machine Scheduling.py ├── Cuckoo Search Traveling Salesman Problem.py ├── Differential Evolution Clustering.py ├── Differential Evolution Protein Structure Prediction.py ├── Firefly Image Segmentation.py ├── Firefly Space-Time Bending.py ├── Genetic Algorithm Evolutionary Art.py ├── Genetic Algorithm Exoplanetary Adaptation.py ├── Grey Wolf Optimizer VAE Optimized (Latent Space).py ├── Harmony Search Regression.py ├── Particle Swarm Optimization Evolutionary Art.py ├── Particle Swarm Optimization Evolved Antenna.py ├── Particle Swarm Optimization Image Segmentation.py ├── Simulated Annealing Quadratic Assignment Problem.py ├── Stochastic Gradient Descent Resource Allocation.py ├── Teaching Learning Based Optimization Vehicle Routing Problem.py ├── Whale Optimization Algorithm Hub Location Allocation.py ├── f.jpg └── tst.jpg ├── README.md ├── Simulated Annealing Quadratic Assignment Problem.py ├── Stochastic Gradient Descent Resource Allocation.py ├── Teaching Learning Based Optimization Vehicle Routing Problem.py ├── Whale Optimization Algorithm Hub Location Allocation.py ├── f.jpg └── tst.jpg /1 Nelder-Mead.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | from memory_profiler import memory_usage 4 | import time 5 | from scipy.optimize import minimize 6 | 7 | # Define benchmark functions 8 | def ackley(x): 9 | a, b, c = 20, 0.2, 2 * np.pi 10 | d = len(x) 11 | sum1 = np.sum(x**2) 12 | sum2 = np.sum(np.cos(c * x)) 13 | return -a * np.exp(-b * np.sqrt(sum1 / d)) - np.exp(sum2 / d) + a + np.exp(1) 14 | 15 | def booth(x): 16 | return (x[0] + 2 * x[1] - 7)**2 + (2 * x[0] + x[1] - 5)**2 17 | 18 | def rastrigin(x): 19 | A = 10 20 | return A * len(x) + np.sum(x**2 - A * np.cos(2 * np.pi * x)) 21 | 22 | def rosenbrock(x): 23 | return np.sum(100 * (x[1:] - x[:-1]**2)**2 + (1 - x[:-1])**2) 24 | 25 | def schwefel(x): 26 | return 418.9829 * len(x) - np.sum(x * np.sin(np.sqrt(np.abs(x)))) 27 | 28 | def sphere(x): 29 | return np.sum(x**2) 30 | 31 | def michalewicz(x): 32 | m = 10 33 | return -np.sum(np.sin(x) * np.sin(((np.arange(len(x)) + 1) * x**2) / np.pi)**(2 * m)) 34 | 35 | def zakharov(x): 36 | sum1 = np.sum(x**2) 37 | sum2 = np.sum(0.5 * (np.arange(len(x)) + 1) * x) 38 | return sum1 + sum2**2 + sum2**4 39 | 40 | def eggholder(x): 41 | return -(x[1] + 47) * np.sin(np.sqrt(abs(x[0]/2 + (x[1] + 47)))) - x[0] * np.sin(np.sqrt(abs(x[0] - (x[1] + 47)))) 42 | 43 | def beale(x): 44 | return (1.5 - x[0] + x[0]*x[1])**2 + (2.25 - x[0] + x[0]*x[1]**2)**2 + (2.625 - x[0] + x[0]*x[1]**3)**2 45 | 46 | def trid(x): 47 | return np.sum((x - 1)**2) - np.sum(x[:-1] * x[1:]) 48 | 49 | def dixon_price(x): 50 | return (x[0] - 1)**2 + np.sum([(i + 1) * (2 * x[i]**2 - x[i-1])**2 for i in range(1, len(x))]) 51 | 52 | def cross_in_tray(x): 53 | fact1 = np.sin(x[0]) * np.sin(x[1]) 54 | fact2 = np.exp(abs(100 - np.sqrt(x[0]**2 + x[1]**2) / np.pi)) 55 | return -0.0001 * (abs(fact1 * fact2) + 1)**0.1 56 | 57 | def griewank(x): 58 | return 1 + np.sum(x**2 / 4000) - np.prod(np.cos(x / np.sqrt(np.arange(1, len(x) + 1)))) 59 | 60 | def levy(x): 61 | w = 1 + (x - 1) / 4 62 | term1 = np.sin(np.pi * w[0])**2 63 | term2 = np.sum((w[:-1] - 1)**2 * (1 + 10 * np.sin(np.pi * w[:-1] + 1)**2)) 64 | term3 = (w[-1] - 1)**2 * (1 + np.sin(2 * np.pi * w[-1])**2) 65 | return term1 + term2 + term3 66 | 67 | def matyas(x): 68 | return 0.26 * (x[0]**2 + x[1]**2) - 0.48 * x[0] * x[1] 69 | 70 | def goldstein_price(x): 71 | term1 = 1 + ((x[0] + x[1] + 1)**2) * (19 - 14*x[0] + 3*x[0]**2 - 14*x[1] + 6*x[0]*x[1] + 3*x[1]**2) 72 | term2 = 30 + ((2*x[0] - 3*x[1])**2) * (18 - 32*x[0] + 12*x[0]**2 + 48*x[1] - 36*x[0]*x[1] + 27*x[1]**2) 73 | return term1 * term2 74 | 75 | def powell(x): 76 | term1 = (x[0] + 10*x[1])**2 77 | term2 = 5 * (x[2] - x[3])**2 78 | term3 = (x[1] - 2*x[2])**4 79 | term4 = 10 * (x[0] - x[3])**4 80 | return term1 + term2 + term3 + term4 81 | 82 | def bird(x): 83 | return np.sin(x[0]) * np.exp((1 - np.cos(x[1]))**2) + np.cos(x[1]) * np.exp((1 - np.sin(x[0]))**2) + (x[0] - x[1])**2 84 | 85 | def pyramid(x): 86 | return np.sum(np.abs(x)) 87 | 88 | def nelder_mead(func, x0, max_iter=200): 89 | costs = [] 90 | 91 | def callback(xk): 92 | costs.append(func(xk)) 93 | 94 | result = minimize(func, x0, method='Nelder-Mead', options={'maxiter': max_iter, 'disp': False}, callback=callback) 95 | return result.x, costs 96 | 97 | # Prepare 20 functions 98 | functions = [ 99 | ("1. Ackley", ackley, np.random.uniform(-5, 5, 2)), 100 | ("2. Booth", booth, np.random.uniform(-5, 5, 2)), 101 | ("3. Rastrigin", rastrigin, np.random.uniform(-5, 5, 2)), 102 | ("4. Rosenbrock", rosenbrock, np.random.uniform(-5, 5, 2)), 103 | ("5. Schwefel", schwefel, np.random.uniform(-500, 500, 2)), 104 | ("6. Sphere", sphere, np.random.uniform(-5, 5, 2)), 105 | ("7. Michalewicz", michalewicz, np.random.uniform(0, np.pi, 2)), 106 | ("8. Zakharov", zakharov, np.random.uniform(-5, 5, 2)), 107 | ("9. Eggholder", eggholder, np.random.uniform(-512, 512, 2)), 108 | ("10. Beale", beale, np.random.uniform(-4.5, 4.5, 2)), 109 | ("11. Trid", trid, np.random.uniform(-5, 5, 2)), 110 | ("12. Dixon-Price", dixon_price, np.random.uniform(-5, 5, 2)), 111 | ("13. Cross-in-Tray", cross_in_tray, np.random.uniform(-10, 10, 2)), 112 | ("14. Griewank", griewank, np.random.uniform(-600, 600, 2)), 113 | ("15. Levy", levy, np.random.uniform(-10, 10, 2)), 114 | ("16. Matyas", matyas, np.random.uniform(-10, 10, 2)), 115 | ("17. Goldstein-Price", goldstein_price, np.random.uniform(-2, 2, 2)), 116 | ("18. Powell", powell, np.random.uniform(-5, 5, 4)), 117 | ("19. Bird", bird, np.random.uniform(-2 * np.pi, 2 * np.pi, 2)), 118 | ("20. Pyramid", pyramid, np.random.uniform(-5, 5, 2)) 119 | ] 120 | 121 | # Prepare the plot 122 | fig, axes = plt.subplots(4, 4, figsize=(20, 20)) 123 | axes = axes.ravel() 124 | 125 | # Run Nelder-Mead and display results for all functions 126 | for idx, (name, func, x0) in enumerate(functions): 127 | print(f"\nRunning {name}...") 128 | 129 | start_time = time.time() 130 | memory_before = memory_usage()[0] 131 | 132 | best_x, costs = nelder_mead(func, x0, max_iter=100) 133 | 134 | memory_after = memory_usage()[0] 135 | end_time = time.time() 136 | 137 | print(f"Function: {name}") 138 | print(f"Best Cost: {costs[-1] if costs else 'N/A'}") 139 | print(f"Convergence Time: {end_time - start_time} seconds") 140 | print(f"Memory Usage: {max(0, memory_after - memory_before)} MB") 141 | print("Complexity Class: O(n^2 * d)") 142 | 143 | if idx < len(axes): 144 | axes[idx].plot(costs) 145 | axes[idx].set_title(name) 146 | axes[idx].set_xlabel("Iterations") 147 | axes[idx].set_ylabel("Cost") 148 | 149 | plt.tight_layout() 150 | plt.show() 151 | -------------------------------------------------------------------------------- /1 Powell’s Method.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | from memory_profiler import memory_usage 4 | import time 5 | from scipy.optimize import minimize 6 | 7 | # Define benchmark functions 8 | def ackley(x): 9 | a, b, c = 20, 0.2, 2 * np.pi 10 | d = len(x) 11 | sum1 = np.sum(x**2) 12 | sum2 = np.sum(np.cos(c * x)) 13 | return -a * np.exp(-b * np.sqrt(sum1 / d)) - np.exp(sum2 / d) + a + np.exp(1) 14 | 15 | def booth(x): 16 | return (x[0] + 2 * x[1] - 7)**2 + (2 * x[0] + x[1] - 5)**2 17 | 18 | def rastrigin(x): 19 | A = 10 20 | return A * len(x) + np.sum(x**2 - A * np.cos(2 * np.pi * x)) 21 | 22 | def rosenbrock(x): 23 | return np.sum(100 * (x[1:] - x[:-1]**2)**2 + (1 - x[:-1])**2) 24 | 25 | def schwefel(x): 26 | return 418.9829 * len(x) - np.sum(x * np.sin(np.sqrt(np.abs(x)))) 27 | 28 | def sphere(x): 29 | return np.sum(x**2) 30 | 31 | def michalewicz(x): 32 | m = 10 33 | return -np.sum(np.sin(x) * np.sin(((np.arange(len(x)) + 1) * x**2) / np.pi)**(2 * m)) 34 | 35 | def zakharov(x): 36 | sum1 = np.sum(x**2) 37 | sum2 = np.sum(0.5 * (np.arange(len(x)) + 1) * x) 38 | return sum1 + sum2**2 + sum2**4 39 | 40 | def eggholder(x): 41 | return -(x[1] + 47) * np.sin(np.sqrt(abs(x[0]/2 + (x[1] + 47)))) - x[0] * np.sin(np.sqrt(abs(x[0] - (x[1] + 47)))) 42 | 43 | def beale(x): 44 | return (1.5 - x[0] + x[0]*x[1])**2 + (2.25 - x[0] + x[0]*x[1]**2)**2 + (2.625 - x[0] + x[0]*x[1]**3)**2 45 | 46 | def trid(x): 47 | return np.sum((x - 1)**2) - np.sum(x[:-1] * x[1:]) 48 | 49 | def dixon_price(x): 50 | return (x[0] - 1)**2 + np.sum([(i + 1) * (2 * x[i]**2 - x[i-1])**2 for i in range(1, len(x))]) 51 | 52 | def cross_in_tray(x): 53 | fact1 = np.sin(x[0]) * np.sin(x[1]) 54 | fact2 = np.exp(abs(100 - np.sqrt(x[0]**2 + x[1]**2) / np.pi)) 55 | return -0.0001 * (abs(fact1 * fact2) + 1)**0.1 56 | 57 | def griewank(x): 58 | return 1 + np.sum(x**2 / 4000) - np.prod(np.cos(x / np.sqrt(np.arange(1, len(x) + 1)))) 59 | 60 | def levy(x): 61 | w = 1 + (x - 1) / 4 62 | term1 = np.sin(np.pi * w[0])**2 63 | term2 = np.sum((w[:-1] - 1)**2 * (1 + 10 * np.sin(np.pi * w[:-1] + 1)**2)) 64 | term3 = (w[-1] - 1)**2 * (1 + np.sin(2 * np.pi * w[-1])**2) 65 | return term1 + term2 + term3 66 | 67 | def matyas(x): 68 | return 0.26 * (x[0]**2 + x[1]**2) - 0.48 * x[0] * x[1] 69 | 70 | def goldstein_price(x): 71 | term1 = 1 + ((x[0] + x[1] + 1)**2) * (19 - 14*x[0] + 3*x[0]**2 - 14*x[1] + 6*x[0]*x[1] + 3*x[1]**2) 72 | term2 = 30 + ((2*x[0] - 3*x[1])**2) * (18 - 32*x[0] + 12*x[0]**2 + 48*x[1] - 36*x[0]*x[1] + 27*x[1]**2) 73 | return term1 * term2 74 | 75 | def powell(x): 76 | term1 = (x[0] + 10*x[1])**2 77 | term2 = 5 * (x[2] - x[3])**2 78 | term3 = (x[1] - 2*x[2])**4 79 | term4 = 10 * (x[0] - x[3])**4 80 | return term1 + term2 + term3 + term4 81 | 82 | def bird(x): 83 | return np.sin(x[0]) * np.exp((1 - np.cos(x[1]))**2) + np.cos(x[1]) * np.exp((1 - np.sin(x[0]))**2) + (x[0] - x[1])**2 84 | 85 | def pyramid(x): 86 | return np.sum(np.abs(x)) 87 | 88 | def powells_method(func, x0, max_iter=200): 89 | costs = [] 90 | 91 | def callback(xk): 92 | costs.append(func(xk)) 93 | 94 | result = minimize(func, x0, method='Powell', options={'maxiter': max_iter, 'disp': False}, callback=callback) 95 | return result.x, costs 96 | 97 | # Prepare 20 functions 98 | functions = [ 99 | ("1. Ackley", ackley, np.random.uniform(-5, 5, 2)), 100 | ("2. Booth", booth, np.random.uniform(-5, 5, 2)), 101 | ("3. Rastrigin", rastrigin, np.random.uniform(-5, 5, 2)), 102 | ("4. Rosenbrock", rosenbrock, np.random.uniform(-5, 5, 2)), 103 | ("5. Schwefel", schwefel, np.random.uniform(-500, 500, 2)), 104 | ("6. Sphere", sphere, np.random.uniform(-5, 5, 2)), 105 | ("7. Michalewicz", michalewicz, np.random.uniform(0, np.pi, 2)), 106 | ("8. Zakharov", zakharov, np.random.uniform(-5, 5, 2)), 107 | ("9. Eggholder", eggholder, np.random.uniform(-512, 512, 2)), 108 | ("10. Beale", beale, np.random.uniform(-4.5, 4.5, 2)), 109 | ("11. Trid", trid, np.random.uniform(-5, 5, 2)), 110 | ("12. Dixon-Price", dixon_price, np.random.uniform(-5, 5, 2)), 111 | ("13. Cross-in-Tray", cross_in_tray, np.random.uniform(-10, 10, 2)), 112 | ("14. Griewank", griewank, np.random.uniform(-600, 600, 2)), 113 | ("15. Levy", levy, np.random.uniform(-10, 10, 2)), 114 | ("16. Matyas", matyas, np.random.uniform(-10, 10, 2)), 115 | ("17. Goldstein-Price", goldstein_price, np.random.uniform(-2, 2, 2)), 116 | ("18. Powell", powell, np.random.uniform(-5, 5, 4)), 117 | ("19. Bird", bird, np.random.uniform(-2 * np.pi, 2 * np.pi, 2)), 118 | ("20. Pyramid", pyramid, np.random.uniform(-5, 5, 2)) 119 | ] 120 | 121 | # Prepare the plot 122 | fig, axes = plt.subplots(4, 4, figsize=(20, 20)) 123 | axes = axes.ravel() 124 | 125 | # Run Powell's Method and display results for all functions 126 | for idx, (name, func, x0) in enumerate(functions): 127 | print(f"\nRunning {name}...") 128 | 129 | start_time = time.time() 130 | memory_before = memory_usage()[0] 131 | 132 | best_x, costs = powells_method(func, x0, max_iter=100) 133 | 134 | memory_after = memory_usage()[0] 135 | end_time = time.time() 136 | 137 | print(f"Function: {name}") 138 | print(f"Best Cost: {costs[-1] if costs else 'N/A'}") 139 | print(f"Convergence Time: {end_time - start_time} seconds") 140 | print(f"Memory Usage: {max(0, memory_after - memory_before)} MB") 141 | print("Complexity Class: O(n^2 * d)") 142 | 143 | if idx < len(axes): 144 | axes[idx].plot(costs) 145 | axes[idx].set_title(name) 146 | axes[idx].set_xlabel("Iterations") 147 | axes[idx].set_ylabel("Cost") 148 | 149 | plt.tight_layout() 150 | plt.show() 151 | -------------------------------------------------------------------------------- /2 Hill Climbing.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | from memory_profiler import memory_usage 4 | import time 5 | 6 | # Define benchmark functions 7 | def ackley(x): 8 | a, b, c = 20, 0.2, 2 * np.pi 9 | d = len(x) 10 | sum1 = np.sum(x**2) 11 | sum2 = np.sum(np.cos(c * x)) 12 | return -a * np.exp(-b * np.sqrt(sum1 / d)) - np.exp(sum2 / d) + a + np.exp(1) 13 | 14 | def booth(x): 15 | return (x[0] + 2 * x[1] - 7)**2 + (2 * x[0] + x[1] - 5)**2 16 | 17 | def rastrigin(x): 18 | A = 10 19 | return A * len(x) + np.sum(x**2 - A * np.cos(2 * np.pi * x)) 20 | 21 | def rosenbrock(x): 22 | return np.sum(100 * (x[1:] - x[:-1]**2)**2 + (1 - x[:-1])**2) 23 | 24 | def schwefel(x): 25 | return 418.9829 * len(x) - np.sum(x * np.sin(np.sqrt(np.abs(x)))) 26 | 27 | def sphere(x): 28 | return np.sum(x**2) 29 | 30 | def michalewicz(x): 31 | m = 10 32 | return -np.sum(np.sin(x) * np.sin(((np.arange(len(x)) + 1) * x**2) / np.pi)**(2 * m)) 33 | 34 | def zakharov(x): 35 | sum1 = np.sum(x**2) 36 | sum2 = np.sum(0.5 * (np.arange(len(x)) + 1) * x) 37 | return sum1 + sum2**2 + sum2**4 38 | 39 | def eggholder(x): 40 | return -(x[1] + 47) * np.sin(np.sqrt(abs(x[0]/2 + (x[1] + 47)))) - x[0] * np.sin(np.sqrt(abs(x[0] - (x[1] + 47)))) 41 | 42 | def beale(x): 43 | return (1.5 - x[0] + x[0]*x[1])**2 + (2.25 - x[0] + x[0]*x[1]**2)**2 + (2.625 - x[0] + x[0]*x[1]**3)**2 44 | 45 | def trid(x): 46 | return np.sum((x - 1)**2) - np.sum(x[:-1] * x[1:]) 47 | 48 | def dixon_price(x): 49 | return (x[0] - 1)**2 + np.sum([(i + 1) * (2 * x[i]**2 - x[i-1])**2 for i in range(1, len(x))]) 50 | 51 | def cross_in_tray(x): 52 | fact1 = np.sin(x[0]) * np.sin(x[1]) 53 | fact2 = np.exp(abs(100 - np.sqrt(x[0]**2 + x[1]**2) / np.pi)) 54 | return -0.0001 * (abs(fact1 * fact2) + 1)**0.1 55 | 56 | def griewank(x): 57 | return 1 + np.sum(x**2 / 4000) - np.prod(np.cos(x / np.sqrt(np.arange(1, len(x) + 1)))) 58 | 59 | def levy(x): 60 | w = 1 + (x - 1) / 4 61 | term1 = np.sin(np.pi * w[0])**2 62 | term2 = np.sum((w[:-1] - 1)**2 * (1 + 10 * np.sin(np.pi * w[:-1] + 1)**2)) 63 | term3 = (w[-1] - 1)**2 * (1 + np.sin(2 * np.pi * w[-1])**2) 64 | return term1 + term2 + term3 65 | 66 | def matyas(x): 67 | return 0.26 * (x[0]**2 + x[1]**2) - 0.48 * x[0] * x[1] 68 | 69 | def goldstein_price(x): 70 | term1 = 1 + ((x[0] + x[1] + 1)**2) * (19 - 14*x[0] + 3*x[0]**2 - 14*x[1] + 6*x[0]*x[1] + 3*x[1]**2) 71 | term2 = 30 + ((2*x[0] - 3*x[1])**2) * (18 - 32*x[0] + 12*x[0]**2 + 48*x[1] - 36*x[0]*x[1] + 27*x[1]**2) 72 | return term1 * term2 73 | 74 | def powell(x): 75 | term1 = (x[0] + 10*x[1])**2 76 | term2 = 5 * (x[2] - x[3])**2 77 | term3 = (x[1] - 2*x[2])**4 78 | term4 = 10 * (x[0] - x[3])**4 79 | return term1 + term2 + term3 + term4 80 | 81 | def bird(x): 82 | return np.sin(x[0]) * np.exp((1 - np.cos(x[1]))**2) + np.cos(x[1]) * np.exp((1 - np.sin(x[0]))**2) + (x[0] - x[1])**2 83 | 84 | def pyramid(x): 85 | return np.sum(np.abs(x)) 86 | 87 | def hill_climbing(func, x0, max_iter=200, step_size=0.1): 88 | x = np.array(x0) 89 | costs = [func(x)] 90 | 91 | for _ in range(max_iter): 92 | candidate = x + np.random.uniform(-step_size, step_size, size=x.shape) 93 | candidate_cost = func(candidate) 94 | 95 | if candidate_cost < costs[-1]: 96 | x = candidate 97 | costs.append(candidate_cost) 98 | else: 99 | costs.append(costs[-1]) 100 | 101 | return x, costs 102 | 103 | # Prepare 20 functions 104 | functions = [ 105 | ("1. Ackley", ackley, np.random.uniform(-5, 5, 2)), 106 | ("2. Booth", booth, np.random.uniform(-5, 5, 2)), 107 | ("3. Rastrigin", rastrigin, np.random.uniform(-5, 5, 2)), 108 | ("4. Rosenbrock", rosenbrock, np.random.uniform(-5, 5, 2)), 109 | ("5. Schwefel", schwefel, np.random.uniform(-500, 500, 2)), 110 | ("6. Sphere", sphere, np.random.uniform(-5, 5, 2)), 111 | ("7. Michalewicz", michalewicz, np.random.uniform(0, np.pi, 2)), 112 | ("8. Zakharov", zakharov, np.random.uniform(-5, 5, 2)), 113 | ("9. Eggholder", eggholder, np.random.uniform(-512, 512, 2)), 114 | ("10. Beale", beale, np.random.uniform(-4.5, 4.5, 2)), 115 | ("11. Trid", trid, np.random.uniform(-5, 5, 2)), 116 | ("12. Dixon-Price", dixon_price, np.random.uniform(-5, 5, 2)), 117 | ("13. Cross-in-Tray", cross_in_tray, np.random.uniform(-10, 10, 2)), 118 | ("14. Griewank", griewank, np.random.uniform(-600, 600, 2)), 119 | ("15. Levy", levy, np.random.uniform(-10, 10, 2)), 120 | ("16. Matyas", matyas, np.random.uniform(-10, 10, 2)), 121 | ("17. Goldstein-Price", goldstein_price, np.random.uniform(-2, 2, 2)), 122 | ("18. Powell", powell, np.random.uniform(-5, 5, 4)), 123 | ("19. Bird", bird, np.random.uniform(-2 * np.pi, 2 * np.pi, 2)), 124 | ("20. Pyramid", pyramid, np.random.uniform(-5, 5, 2)) 125 | ] 126 | 127 | # Prepare the plot 128 | fig, axes = plt.subplots(4, 4, figsize=(20, 20)) 129 | axes = axes.ravel() 130 | 131 | # Run Hill Climbing and display results for all functions 132 | for idx, (name, func, x0) in enumerate(functions): 133 | print(f"\nRunning {name}...") 134 | 135 | start_time = time.time() 136 | memory_before = memory_usage()[0] 137 | 138 | best_x, costs = hill_climbing(func, x0, max_iter=100, step_size=0.1) 139 | 140 | memory_after = memory_usage()[0] 141 | end_time = time.time() 142 | 143 | print(f"Function: {name}") 144 | print(f"Best Cost: {costs[-1] if costs else 'N/A'}") 145 | print(f"Convergence Time: {end_time - start_time} seconds") 146 | print(f"Memory Usage: {max(0, memory_after - memory_before)} MB") 147 | print("Complexity Class: O(n * d)") 148 | 149 | if idx < len(axes): 150 | axes[idx].plot(costs) 151 | axes[idx].set_title(name) 152 | axes[idx].set_xlabel("Iterations") 153 | axes[idx].set_ylabel("Cost") 154 | 155 | plt.tight_layout() 156 | plt.show() -------------------------------------------------------------------------------- /Algorithms/1 Nelder-Mead.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | from memory_profiler import memory_usage 4 | import time 5 | from scipy.optimize import minimize 6 | 7 | # Define benchmark functions 8 | def ackley(x): 9 | a, b, c = 20, 0.2, 2 * np.pi 10 | d = len(x) 11 | sum1 = np.sum(x**2) 12 | sum2 = np.sum(np.cos(c * x)) 13 | return -a * np.exp(-b * np.sqrt(sum1 / d)) - np.exp(sum2 / d) + a + np.exp(1) 14 | 15 | def booth(x): 16 | return (x[0] + 2 * x[1] - 7)**2 + (2 * x[0] + x[1] - 5)**2 17 | 18 | def rastrigin(x): 19 | A = 10 20 | return A * len(x) + np.sum(x**2 - A * np.cos(2 * np.pi * x)) 21 | 22 | def rosenbrock(x): 23 | return np.sum(100 * (x[1:] - x[:-1]**2)**2 + (1 - x[:-1])**2) 24 | 25 | def schwefel(x): 26 | return 418.9829 * len(x) - np.sum(x * np.sin(np.sqrt(np.abs(x)))) 27 | 28 | def sphere(x): 29 | return np.sum(x**2) 30 | 31 | def michalewicz(x): 32 | m = 10 33 | return -np.sum(np.sin(x) * np.sin(((np.arange(len(x)) + 1) * x**2) / np.pi)**(2 * m)) 34 | 35 | def zakharov(x): 36 | sum1 = np.sum(x**2) 37 | sum2 = np.sum(0.5 * (np.arange(len(x)) + 1) * x) 38 | return sum1 + sum2**2 + sum2**4 39 | 40 | def eggholder(x): 41 | return -(x[1] + 47) * np.sin(np.sqrt(abs(x[0]/2 + (x[1] + 47)))) - x[0] * np.sin(np.sqrt(abs(x[0] - (x[1] + 47)))) 42 | 43 | def beale(x): 44 | return (1.5 - x[0] + x[0]*x[1])**2 + (2.25 - x[0] + x[0]*x[1]**2)**2 + (2.625 - x[0] + x[0]*x[1]**3)**2 45 | 46 | def trid(x): 47 | return np.sum((x - 1)**2) - np.sum(x[:-1] * x[1:]) 48 | 49 | def dixon_price(x): 50 | return (x[0] - 1)**2 + np.sum([(i + 1) * (2 * x[i]**2 - x[i-1])**2 for i in range(1, len(x))]) 51 | 52 | def cross_in_tray(x): 53 | fact1 = np.sin(x[0]) * np.sin(x[1]) 54 | fact2 = np.exp(abs(100 - np.sqrt(x[0]**2 + x[1]**2) / np.pi)) 55 | return -0.0001 * (abs(fact1 * fact2) + 1)**0.1 56 | 57 | def griewank(x): 58 | return 1 + np.sum(x**2 / 4000) - np.prod(np.cos(x / np.sqrt(np.arange(1, len(x) + 1)))) 59 | 60 | def levy(x): 61 | w = 1 + (x - 1) / 4 62 | term1 = np.sin(np.pi * w[0])**2 63 | term2 = np.sum((w[:-1] - 1)**2 * (1 + 10 * np.sin(np.pi * w[:-1] + 1)**2)) 64 | term3 = (w[-1] - 1)**2 * (1 + np.sin(2 * np.pi * w[-1])**2) 65 | return term1 + term2 + term3 66 | 67 | def matyas(x): 68 | return 0.26 * (x[0]**2 + x[1]**2) - 0.48 * x[0] * x[1] 69 | 70 | def goldstein_price(x): 71 | term1 = 1 + ((x[0] + x[1] + 1)**2) * (19 - 14*x[0] + 3*x[0]**2 - 14*x[1] + 6*x[0]*x[1] + 3*x[1]**2) 72 | term2 = 30 + ((2*x[0] - 3*x[1])**2) * (18 - 32*x[0] + 12*x[0]**2 + 48*x[1] - 36*x[0]*x[1] + 27*x[1]**2) 73 | return term1 * term2 74 | 75 | def powell(x): 76 | term1 = (x[0] + 10*x[1])**2 77 | term2 = 5 * (x[2] - x[3])**2 78 | term3 = (x[1] - 2*x[2])**4 79 | term4 = 10 * (x[0] - x[3])**4 80 | return term1 + term2 + term3 + term4 81 | 82 | def bird(x): 83 | return np.sin(x[0]) * np.exp((1 - np.cos(x[1]))**2) + np.cos(x[1]) * np.exp((1 - np.sin(x[0]))**2) + (x[0] - x[1])**2 84 | 85 | def pyramid(x): 86 | return np.sum(np.abs(x)) 87 | 88 | def nelder_mead(func, x0, max_iter=200): 89 | costs = [] 90 | 91 | def callback(xk): 92 | costs.append(func(xk)) 93 | 94 | result = minimize(func, x0, method='Nelder-Mead', options={'maxiter': max_iter, 'disp': False}, callback=callback) 95 | return result.x, costs 96 | 97 | # Prepare 20 functions 98 | functions = [ 99 | ("1. Ackley", ackley, np.random.uniform(-5, 5, 2)), 100 | ("2. Booth", booth, np.random.uniform(-5, 5, 2)), 101 | ("3. Rastrigin", rastrigin, np.random.uniform(-5, 5, 2)), 102 | ("4. Rosenbrock", rosenbrock, np.random.uniform(-5, 5, 2)), 103 | ("5. Schwefel", schwefel, np.random.uniform(-500, 500, 2)), 104 | ("6. Sphere", sphere, np.random.uniform(-5, 5, 2)), 105 | ("7. Michalewicz", michalewicz, np.random.uniform(0, np.pi, 2)), 106 | ("8. Zakharov", zakharov, np.random.uniform(-5, 5, 2)), 107 | ("9. Eggholder", eggholder, np.random.uniform(-512, 512, 2)), 108 | ("10. Beale", beale, np.random.uniform(-4.5, 4.5, 2)), 109 | ("11. Trid", trid, np.random.uniform(-5, 5, 2)), 110 | ("12. Dixon-Price", dixon_price, np.random.uniform(-5, 5, 2)), 111 | ("13. Cross-in-Tray", cross_in_tray, np.random.uniform(-10, 10, 2)), 112 | ("14. Griewank", griewank, np.random.uniform(-600, 600, 2)), 113 | ("15. Levy", levy, np.random.uniform(-10, 10, 2)), 114 | ("16. Matyas", matyas, np.random.uniform(-10, 10, 2)), 115 | ("17. Goldstein-Price", goldstein_price, np.random.uniform(-2, 2, 2)), 116 | ("18. Powell", powell, np.random.uniform(-5, 5, 4)), 117 | ("19. Bird", bird, np.random.uniform(-2 * np.pi, 2 * np.pi, 2)), 118 | ("20. Pyramid", pyramid, np.random.uniform(-5, 5, 2)) 119 | ] 120 | 121 | # Prepare the plot 122 | fig, axes = plt.subplots(4, 4, figsize=(20, 20)) 123 | axes = axes.ravel() 124 | 125 | # Run Nelder-Mead and display results for all functions 126 | for idx, (name, func, x0) in enumerate(functions): 127 | print(f"\nRunning {name}...") 128 | 129 | start_time = time.time() 130 | memory_before = memory_usage()[0] 131 | 132 | best_x, costs = nelder_mead(func, x0, max_iter=100) 133 | 134 | memory_after = memory_usage()[0] 135 | end_time = time.time() 136 | 137 | print(f"Function: {name}") 138 | print(f"Best Cost: {costs[-1] if costs else 'N/A'}") 139 | print(f"Convergence Time: {end_time - start_time} seconds") 140 | print(f"Memory Usage: {max(0, memory_after - memory_before)} MB") 141 | print("Complexity Class: O(n^2 * d)") 142 | 143 | if idx < len(axes): 144 | axes[idx].plot(costs) 145 | axes[idx].set_title(name) 146 | axes[idx].set_xlabel("Iterations") 147 | axes[idx].set_ylabel("Cost") 148 | 149 | plt.tight_layout() 150 | plt.show() 151 | -------------------------------------------------------------------------------- /Algorithms/1 Powell’s Method.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | from memory_profiler import memory_usage 4 | import time 5 | from scipy.optimize import minimize 6 | 7 | # Define benchmark functions 8 | def ackley(x): 9 | a, b, c = 20, 0.2, 2 * np.pi 10 | d = len(x) 11 | sum1 = np.sum(x**2) 12 | sum2 = np.sum(np.cos(c * x)) 13 | return -a * np.exp(-b * np.sqrt(sum1 / d)) - np.exp(sum2 / d) + a + np.exp(1) 14 | 15 | def booth(x): 16 | return (x[0] + 2 * x[1] - 7)**2 + (2 * x[0] + x[1] - 5)**2 17 | 18 | def rastrigin(x): 19 | A = 10 20 | return A * len(x) + np.sum(x**2 - A * np.cos(2 * np.pi * x)) 21 | 22 | def rosenbrock(x): 23 | return np.sum(100 * (x[1:] - x[:-1]**2)**2 + (1 - x[:-1])**2) 24 | 25 | def schwefel(x): 26 | return 418.9829 * len(x) - np.sum(x * np.sin(np.sqrt(np.abs(x)))) 27 | 28 | def sphere(x): 29 | return np.sum(x**2) 30 | 31 | def michalewicz(x): 32 | m = 10 33 | return -np.sum(np.sin(x) * np.sin(((np.arange(len(x)) + 1) * x**2) / np.pi)**(2 * m)) 34 | 35 | def zakharov(x): 36 | sum1 = np.sum(x**2) 37 | sum2 = np.sum(0.5 * (np.arange(len(x)) + 1) * x) 38 | return sum1 + sum2**2 + sum2**4 39 | 40 | def eggholder(x): 41 | return -(x[1] + 47) * np.sin(np.sqrt(abs(x[0]/2 + (x[1] + 47)))) - x[0] * np.sin(np.sqrt(abs(x[0] - (x[1] + 47)))) 42 | 43 | def beale(x): 44 | return (1.5 - x[0] + x[0]*x[1])**2 + (2.25 - x[0] + x[0]*x[1]**2)**2 + (2.625 - x[0] + x[0]*x[1]**3)**2 45 | 46 | def trid(x): 47 | return np.sum((x - 1)**2) - np.sum(x[:-1] * x[1:]) 48 | 49 | def dixon_price(x): 50 | return (x[0] - 1)**2 + np.sum([(i + 1) * (2 * x[i]**2 - x[i-1])**2 for i in range(1, len(x))]) 51 | 52 | def cross_in_tray(x): 53 | fact1 = np.sin(x[0]) * np.sin(x[1]) 54 | fact2 = np.exp(abs(100 - np.sqrt(x[0]**2 + x[1]**2) / np.pi)) 55 | return -0.0001 * (abs(fact1 * fact2) + 1)**0.1 56 | 57 | def griewank(x): 58 | return 1 + np.sum(x**2 / 4000) - np.prod(np.cos(x / np.sqrt(np.arange(1, len(x) + 1)))) 59 | 60 | def levy(x): 61 | w = 1 + (x - 1) / 4 62 | term1 = np.sin(np.pi * w[0])**2 63 | term2 = np.sum((w[:-1] - 1)**2 * (1 + 10 * np.sin(np.pi * w[:-1] + 1)**2)) 64 | term3 = (w[-1] - 1)**2 * (1 + np.sin(2 * np.pi * w[-1])**2) 65 | return term1 + term2 + term3 66 | 67 | def matyas(x): 68 | return 0.26 * (x[0]**2 + x[1]**2) - 0.48 * x[0] * x[1] 69 | 70 | def goldstein_price(x): 71 | term1 = 1 + ((x[0] + x[1] + 1)**2) * (19 - 14*x[0] + 3*x[0]**2 - 14*x[1] + 6*x[0]*x[1] + 3*x[1]**2) 72 | term2 = 30 + ((2*x[0] - 3*x[1])**2) * (18 - 32*x[0] + 12*x[0]**2 + 48*x[1] - 36*x[0]*x[1] + 27*x[1]**2) 73 | return term1 * term2 74 | 75 | def powell(x): 76 | term1 = (x[0] + 10*x[1])**2 77 | term2 = 5 * (x[2] - x[3])**2 78 | term3 = (x[1] - 2*x[2])**4 79 | term4 = 10 * (x[0] - x[3])**4 80 | return term1 + term2 + term3 + term4 81 | 82 | def bird(x): 83 | return np.sin(x[0]) * np.exp((1 - np.cos(x[1]))**2) + np.cos(x[1]) * np.exp((1 - np.sin(x[0]))**2) + (x[0] - x[1])**2 84 | 85 | def pyramid(x): 86 | return np.sum(np.abs(x)) 87 | 88 | def powells_method(func, x0, max_iter=200): 89 | costs = [] 90 | 91 | def callback(xk): 92 | costs.append(func(xk)) 93 | 94 | result = minimize(func, x0, method='Powell', options={'maxiter': max_iter, 'disp': False}, callback=callback) 95 | return result.x, costs 96 | 97 | # Prepare 20 functions 98 | functions = [ 99 | ("1. Ackley", ackley, np.random.uniform(-5, 5, 2)), 100 | ("2. Booth", booth, np.random.uniform(-5, 5, 2)), 101 | ("3. Rastrigin", rastrigin, np.random.uniform(-5, 5, 2)), 102 | ("4. Rosenbrock", rosenbrock, np.random.uniform(-5, 5, 2)), 103 | ("5. Schwefel", schwefel, np.random.uniform(-500, 500, 2)), 104 | ("6. Sphere", sphere, np.random.uniform(-5, 5, 2)), 105 | ("7. Michalewicz", michalewicz, np.random.uniform(0, np.pi, 2)), 106 | ("8. Zakharov", zakharov, np.random.uniform(-5, 5, 2)), 107 | ("9. Eggholder", eggholder, np.random.uniform(-512, 512, 2)), 108 | ("10. Beale", beale, np.random.uniform(-4.5, 4.5, 2)), 109 | ("11. Trid", trid, np.random.uniform(-5, 5, 2)), 110 | ("12. Dixon-Price", dixon_price, np.random.uniform(-5, 5, 2)), 111 | ("13. Cross-in-Tray", cross_in_tray, np.random.uniform(-10, 10, 2)), 112 | ("14. Griewank", griewank, np.random.uniform(-600, 600, 2)), 113 | ("15. Levy", levy, np.random.uniform(-10, 10, 2)), 114 | ("16. Matyas", matyas, np.random.uniform(-10, 10, 2)), 115 | ("17. Goldstein-Price", goldstein_price, np.random.uniform(-2, 2, 2)), 116 | ("18. Powell", powell, np.random.uniform(-5, 5, 4)), 117 | ("19. Bird", bird, np.random.uniform(-2 * np.pi, 2 * np.pi, 2)), 118 | ("20. Pyramid", pyramid, np.random.uniform(-5, 5, 2)) 119 | ] 120 | 121 | # Prepare the plot 122 | fig, axes = plt.subplots(4, 4, figsize=(20, 20)) 123 | axes = axes.ravel() 124 | 125 | # Run Powell's Method and display results for all functions 126 | for idx, (name, func, x0) in enumerate(functions): 127 | print(f"\nRunning {name}...") 128 | 129 | start_time = time.time() 130 | memory_before = memory_usage()[0] 131 | 132 | best_x, costs = powells_method(func, x0, max_iter=100) 133 | 134 | memory_after = memory_usage()[0] 135 | end_time = time.time() 136 | 137 | print(f"Function: {name}") 138 | print(f"Best Cost: {costs[-1] if costs else 'N/A'}") 139 | print(f"Convergence Time: {end_time - start_time} seconds") 140 | print(f"Memory Usage: {max(0, memory_after - memory_before)} MB") 141 | print("Complexity Class: O(n^2 * d)") 142 | 143 | if idx < len(axes): 144 | axes[idx].plot(costs) 145 | axes[idx].set_title(name) 146 | axes[idx].set_xlabel("Iterations") 147 | axes[idx].set_ylabel("Cost") 148 | 149 | plt.tight_layout() 150 | plt.show() 151 | -------------------------------------------------------------------------------- /Ant Colony Optimization Bin Packing Problem.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import random 4 | 5 | # Ant Colony Optimization Parameters 6 | NUM_ANTS = 5 7 | MAX_ITERATIONS = 200 8 | NUM_ITEMS = 78 9 | BIN_CAPACITY = 256 10 | EVAPORATION_RATE = 0.5 11 | PHEROMONE_IMPORTANCE = 1 12 | HEURISTIC_IMPORTANCE = 2 13 | INITIAL_PHEROMONE = 1.0 14 | 15 | # Generate a random bin-packing problem 16 | def generate_bin_packing_problem(num_items, bin_capacity): 17 | weights = np.random.randint(1, bin_capacity // 2, size=num_items) 18 | return weights, bin_capacity 19 | 20 | # Calculate the cost of a solution (total bins used) 21 | def calculate_cost(solution, weights, bin_capacity): 22 | max_bin_index = max(solution) + 1 # Ensure bins are initialized for all indices 23 | bins = [0] * max_bin_index # Initialize bins dynamically 24 | 25 | for item_index, bin_index in enumerate(solution): 26 | bins[bin_index] += weights[item_index] 27 | if bins[bin_index] > bin_capacity: 28 | return float('inf') # Penalize invalid solutions exceeding bin capacity 29 | 30 | return len([b for b in bins if b > 0]) # Return total number of bins used 31 | 32 | # Generate a random initial solution 33 | def generate_initial_solution(weights, bin_capacity): 34 | solution = np.zeros_like(weights, dtype=int) 35 | bins = [0] 36 | for i, weight in enumerate(weights): 37 | assigned = False 38 | for bin_index in range(len(bins)): 39 | if bins[bin_index] + weight <= bin_capacity: 40 | bins[bin_index] += weight 41 | solution[i] = bin_index 42 | assigned = True 43 | break 44 | if not assigned: 45 | bins.append(weight) 46 | solution[i] = len(bins) - 1 47 | return solution 48 | 49 | # Ant Colony Optimization for Bin Packing Problem 50 | def aco_bin_packing(weights, bin_capacity): 51 | num_items = len(weights) 52 | pheromone = np.full((num_items, num_items), INITIAL_PHEROMONE) 53 | best_solution = None 54 | best_cost = float('inf') 55 | iteration_costs = [] 56 | 57 | for iteration in range(MAX_ITERATIONS): 58 | solutions = [] 59 | costs = [] 60 | 61 | for ant in range(NUM_ANTS): 62 | solution = [] 63 | bins = [0] 64 | 65 | for i in range(num_items): 66 | probabilities = [] 67 | for bin_index in range(len(bins) + 1): 68 | if bin_index == len(bins): 69 | # New bin 70 | if weights[i] <= bin_capacity: 71 | probabilities.append((pheromone[i][bin_index - 1] ** PHEROMONE_IMPORTANCE) * 72 | ((1.0 / (1 + weights[i])) ** HEURISTIC_IMPORTANCE)) 73 | else: 74 | probabilities.append(0) 75 | else: 76 | if bins[bin_index] + weights[i] <= bin_capacity: 77 | probabilities.append((pheromone[i][bin_index] ** PHEROMONE_IMPORTANCE) * 78 | ((1.0 / (1 + bins[bin_index] + weights[i])) ** HEURISTIC_IMPORTANCE)) 79 | else: 80 | probabilities.append(0) 81 | 82 | probabilities = np.array(probabilities) / sum(probabilities) 83 | chosen_bin = np.random.choice(range(len(probabilities)), p=probabilities) 84 | 85 | if chosen_bin == len(bins): 86 | bins.append(weights[i]) 87 | else: 88 | bins[chosen_bin] += weights[i] 89 | 90 | solution.append(chosen_bin) 91 | 92 | cost = calculate_cost(solution, weights, bin_capacity) 93 | solutions.append(solution) 94 | costs.append(cost) 95 | 96 | if cost < best_cost: 97 | best_solution = solution 98 | best_cost = cost 99 | 100 | # Update pheromones 101 | pheromone *= (1 - EVAPORATION_RATE) 102 | for solution, cost in zip(solutions, costs): 103 | for i, bin_index in enumerate(solution): 104 | pheromone[i][bin_index] += 1.0 / cost 105 | 106 | iteration_costs.append(best_cost) 107 | print(f"Iteration {iteration + 1}, Best Cost: {best_cost}") 108 | 109 | return best_solution, best_cost, iteration_costs 110 | 111 | def main(): 112 | weights, bin_capacity = generate_bin_packing_problem(NUM_ITEMS, BIN_CAPACITY) 113 | print("Weights:", weights) 114 | print("Bin Capacity:", bin_capacity) 115 | 116 | best_solution, best_cost, iteration_costs = aco_bin_packing(weights, bin_capacity) 117 | 118 | print("\nBest Solution:", best_solution) 119 | print("Number of Bins Used:", best_cost) 120 | 121 | # Plot optimization progress 122 | plt.figure(figsize=(10, 6)) 123 | plt.plot(iteration_costs, marker='o') 124 | plt.title("ACO Optimization Progress for Bin Packing") 125 | plt.xlabel("Iteration") 126 | plt.ylabel("Best Cost (Number of Bins)") 127 | plt.grid() 128 | plt.show() 129 | 130 | if __name__ == "__main__": 131 | main() -------------------------------------------------------------------------------- /Bat Algorithm Feature Selection.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import random 4 | from sklearn.model_selection import train_test_split 5 | from xgboost import XGBClassifier 6 | from sklearn.metrics import accuracy_score 7 | 8 | # Bat Algorithm Parameters 9 | POPULATION_SIZE = 3 10 | MAX_GENERATIONS = 30 11 | LOUDNESS = 0.5 12 | PULSE_RATE = 0.5 13 | FREQ_MIN = 0 14 | FREQ_MAX = 2 15 | NUM_SAMPLES = 500 16 | NUM_FEATURES = 10 17 | NUM_CLASSES = 3 18 | NUM_SELECTED_FEATURES = 5 19 | 20 | # Generate random dataset 21 | def generate_random_data(num_samples, num_features, num_classes): 22 | X = np.random.rand(num_samples, num_features) 23 | y = np.random.randint(0, num_classes, num_samples) 24 | return X, y 25 | 26 | # Cost function 27 | def cost_function(X, y, selected_features): 28 | if np.sum(selected_features) == 0: 29 | return float('inf') 30 | 31 | X_selected = X[:, selected_features == 1] 32 | X_train, X_test, y_train, y_test = train_test_split(X_selected, y, test_size=0.3, random_state=42) 33 | 34 | model = XGBClassifier(use_label_encoder=False, eval_metric='mlogloss') 35 | model.fit(X_train, y_train) 36 | y_pred = model.predict(X_test) 37 | return 1 - accuracy_score(y_test, y_pred) # Minimize error 38 | 39 | # Initialize population 40 | def initialize_population(size, num_features): 41 | population = np.zeros((size, num_features)) 42 | for i in range(size): 43 | selected_indices = np.random.choice(num_features, NUM_SELECTED_FEATURES, replace=False) 44 | population[i, selected_indices] = 1 45 | return population 46 | 47 | # Update position using Bat Algorithm 48 | def update_position(individual, velocity, frequency, best_individual): 49 | velocity += (individual - best_individual) * frequency 50 | new_position = individual + velocity 51 | return np.clip(np.round(new_position), 0, 1) 52 | 53 | # Bat Algorithm for Feature Selection 54 | def bat_algorithm(X, y): 55 | num_features = X.shape[1] 56 | population = initialize_population(POPULATION_SIZE, num_features) 57 | velocities = np.zeros((POPULATION_SIZE, num_features)) 58 | cost = np.array([cost_function(X, y, individual) for individual in population]) 59 | 60 | best_individual = population[np.argmin(cost)] 61 | best_cost = np.min(cost) 62 | costs_over_time = [] 63 | 64 | for generation in range(MAX_GENERATIONS): 65 | for i in range(POPULATION_SIZE): 66 | frequency = FREQ_MIN + (FREQ_MAX - FREQ_MIN) * random.random() 67 | velocities[i] = update_position(population[i], velocities[i], frequency, best_individual) 68 | candidate = update_position(population[i], velocities[i], frequency, best_individual) 69 | 70 | if random.random() > PULSE_RATE: 71 | candidate = best_individual.copy() 72 | mutation_index = random.randint(0, num_features - 1) 73 | candidate[mutation_index] = 1 - candidate[mutation_index] 74 | 75 | candidate_cost = cost_function(X, y, candidate) 76 | if candidate_cost < cost[i] and random.random() < LOUDNESS: 77 | population[i] = candidate 78 | cost[i] = candidate_cost 79 | 80 | if candidate_cost < best_cost: 81 | best_individual = candidate 82 | best_cost = candidate_cost 83 | 84 | costs_over_time.append(best_cost) 85 | print(f"Generation {generation + 1}, Best Cost: {best_cost:.4f}") 86 | 87 | return best_individual, 1 - best_cost, costs_over_time 88 | 89 | def main(): 90 | X, y = generate_random_data(NUM_SAMPLES, NUM_FEATURES, NUM_CLASSES) 91 | # Split data for original accuracy evaluation 92 | X_train_full, X_test_full, y_train_full, y_test_full = train_test_split(X, y, test_size=0.3, random_state=42) 93 | model_full = XGBClassifier(use_label_encoder=False, eval_metric='mlogloss') 94 | model_full.fit(X_train_full, y_train_full) 95 | y_pred_full = model_full.predict(X_test_full) 96 | original_accuracy = accuracy_score(y_test_full, y_pred_full) 97 | 98 | best_features, best_accuracy, costs_over_time = bat_algorithm(X, y) 99 | 100 | # Evaluate accuracy with selected features 101 | X_selected = X[:, best_features == 1] 102 | X_train_selected, X_test_selected, y_train_selected, y_test_selected = train_test_split(X_selected, y, test_size=0.3, random_state=42) 103 | model_selected = XGBClassifier(use_label_encoder=False, eval_metric='mlogloss') 104 | model_selected.fit(X_train_selected, y_train_selected) 105 | y_pred_selected = model_selected.predict(X_test_selected) 106 | selected_accuracy = accuracy_score(y_test_selected, y_pred_selected) 107 | 108 | print("\nOriginal Accuracy with All Features:", original_accuracy) 109 | print("Selected Features (1=selected, 0=not selected):", best_features) 110 | print("Accuracy with Selected Features:", selected_accuracy) 111 | 112 | # Count selected features 113 | print("Number of Selected Features:", np.sum(best_features)) 114 | 115 | # Plot optimization progress 116 | plt.figure(figsize=(10, 6)) 117 | plt.plot(costs_over_time, marker='o') 118 | plt.title("Bat Algorithm Optimization Progress") 119 | plt.xlabel("Generation") 120 | plt.ylabel("Best Cost") 121 | plt.grid() 122 | plt.show() 123 | 124 | if __name__ == "__main__": 125 | main() -------------------------------------------------------------------------------- /Bees CNN Optimized (weights and biases).py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from sklearn.datasets import load_iris 3 | from sklearn.model_selection import train_test_split 4 | from sklearn.preprocessing import OneHotEncoder, StandardScaler 5 | from sklearn.metrics import classification_report 6 | import tensorflow as tf 7 | 8 | # Load Iris dataset 9 | data = load_iris() 10 | X = data.data 11 | y = data.target 12 | 13 | # Preprocess the dataset 14 | scaler = StandardScaler() 15 | X = scaler.fit_transform(X) 16 | 17 | encoder = OneHotEncoder(sparse_output=False) 18 | y = encoder.fit_transform(y.reshape(-1, 1)) 19 | 20 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) 21 | 22 | # Define the Bees Algorithm 23 | class BeesAlgorithm: 24 | def __init__(self, model, X_train, y_train, n_bees=20, elite_bees=5, patch_size=0.1, iterations=50): 25 | self.model = model 26 | self.X_train = X_train 27 | self.y_train = y_train 28 | self.n_bees = n_bees 29 | self.elite_bees = elite_bees 30 | self.patch_size = patch_size 31 | self.iterations = iterations 32 | 33 | # Initialize bees (weights and biases) 34 | self.bees = [self.generate_solution() for _ in range(self.n_bees)] 35 | 36 | def generate_solution(self): 37 | # Flatten weights and biases into a single vector 38 | weights_and_biases = [] 39 | for layer in self.model.trainable_variables: 40 | weights_and_biases.append(layer.numpy().flatten()) 41 | return np.concatenate(weights_and_biases) 42 | 43 | def decode_solution(self, solution): 44 | # Decode the flat vector into weights and biases for the model 45 | shapes = [layer.shape for layer in self.model.trainable_variables] 46 | split_points = np.cumsum([np.prod(shape) for shape in shapes]) 47 | decoded = np.split(solution, split_points[:-1]) 48 | decoded = [np.reshape(arr, shape) for arr, shape in zip(decoded, shapes)] 49 | return decoded 50 | 51 | def set_weights_and_biases(self, solution): 52 | # Set the model's weights and biases 53 | decoded = self.decode_solution(solution) 54 | for layer, new_weights in zip(self.model.trainable_variables, decoded): 55 | layer.assign(new_weights) 56 | 57 | def fitness(self, solution): 58 | # Evaluate the model's accuracy on the training data 59 | self.set_weights_and_biases(solution) 60 | y_pred = self.model(self.X_train) 61 | accuracy = tf.reduce_mean( 62 | tf.cast(tf.equal(tf.argmax(y_pred, axis=1), tf.argmax(self.y_train, axis=1)), tf.float32) 63 | ).numpy() 64 | return accuracy 65 | 66 | def optimize(self): 67 | for iteration in range(self.iterations): 68 | # Evaluate fitness for all bees 69 | fitness_scores = [self.fitness(bee) for bee in self.bees] 70 | 71 | # Sort bees by fitness 72 | sorted_indices = np.argsort(fitness_scores)[::-1] 73 | self.bees = [self.bees[i] for i in sorted_indices] 74 | 75 | # Keep elite bees 76 | elite_bees = self.bees[:self.elite_bees] 77 | 78 | # Scout new bees around elite bees 79 | for i in range(self.elite_bees, self.n_bees): 80 | elite_index = i % self.elite_bees 81 | new_bee = elite_bees[elite_index] + np.random.uniform( 82 | -self.patch_size, self.patch_size, size=elite_bees[elite_index].shape 83 | ) 84 | self.bees[i] = new_bee 85 | 86 | # Reduce patch size over iterations 87 | self.patch_size *= 0.95 88 | 89 | # Print progress 90 | best_fitness = fitness_scores[sorted_indices[0]] 91 | print(f"Iteration {iteration + 1}/{self.iterations}, Best Fitness: {best_fitness:.4f}") 92 | 93 | # Return the best solution 94 | best_solution = self.bees[0] 95 | return best_solution 96 | 97 | 98 | # Define the neural network (simple feedforward model) 99 | def build_model(input_dim, output_dim): 100 | model = tf.keras.Sequential([ 101 | tf.keras.layers.Dense(10, activation='relu', input_dim=input_dim), 102 | tf.keras.layers.Dense(output_dim, activation='softmax') 103 | ]) 104 | return model 105 | 106 | 107 | # Build and compile the model 108 | input_dim = X_train.shape[1] 109 | output_dim = y_train.shape[1] 110 | model = build_model(input_dim, output_dim) 111 | 112 | # Initialize Bees Algorithm 113 | bees_algorithm = BeesAlgorithm(model, X_train, y_train, n_bees=30, elite_bees=5, patch_size=0.1, iterations=50) 114 | 115 | # Optimize weights and biases 116 | best_solution = bees_algorithm.optimize() 117 | 118 | # Set the optimized weights and biases to the model 119 | bees_algorithm.set_weights_and_biases(best_solution) 120 | 121 | # Evaluate the optimized model on the test set 122 | y_test_pred = model(X_test) 123 | y_test_pred_classes = tf.argmax(y_test_pred, axis=1).numpy() 124 | y_test_true_classes = tf.argmax(y_test, axis=1).numpy() 125 | 126 | # Print classification report 127 | print("\nClassification Report (Test Data):") 128 | print(classification_report(y_test_true_classes, y_test_pred_classes)) 129 | -------------------------------------------------------------------------------- /Bees Economic Dispatching.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | 4 | # Define the system model 5 | def make_model(): 6 | return { 7 | "PD": 1500, # Power demand 8 | "Plants": { 9 | "Pmin": np.array([100, 80, 50, 60, 40]), 10 | "Pmax": np.array([500, 400, 300, 250, 200]), 11 | "alpha": np.array([300, 280, 260, 240, 220]), 12 | "beta": np.array([8, 7.5, 7, 6.5, 6]), 13 | "gamma": np.array([0.03, 0.028, 0.027, 0.026, 0.025]), 14 | }, 15 | "nPlant": 5, # Number of plants 16 | } 17 | 18 | # Parse function to map x to actual power values 19 | def parse(x, model): 20 | Pmin = model["Plants"]["Pmin"] 21 | Pmax = model["Plants"]["Pmax"] 22 | P = Pmin + (Pmax - Pmin) * x 23 | return P 24 | 25 | # Define the cost function 26 | def cost_function(x, model): 27 | P = parse(x, model) 28 | alpha = model["Plants"]["alpha"] 29 | beta = model["Plants"]["beta"] 30 | gamma = model["Plants"]["gamma"] 31 | 32 | # Calculate cost 33 | cost = np.sum(alpha + beta * P + gamma * P ** 2) 34 | 35 | # Power balance constraint 36 | P_total = np.sum(P) 37 | PD = model["PD"] 38 | power_loss = 0.05 * P_total # Simplified power loss model 39 | power_balance_violation = max(0, PD - (P_total - power_loss)) 40 | 41 | penalty = 10 # Penalty for constraint violation 42 | z = cost + penalty * power_balance_violation 43 | 44 | return z, { 45 | "P": P, 46 | "Cost": cost, 47 | "PowerLoss": power_loss, 48 | "PowerBalanceViolation": power_balance_violation, 49 | } 50 | 51 | # Define fuzzy logic adjustment 52 | def fuzzy_adjustment(iteration, max_iter, violation): 53 | if violation > 0.1: 54 | penalty = 20 # Increase penalty for high violations 55 | else: 56 | penalty = 10 57 | 58 | if iteration / max_iter < 0.5: 59 | r = 0.2 # Larger neighborhood radius in early iterations 60 | else: 61 | r = 0.1 # Smaller radius for fine-tuning 62 | 63 | return penalty, r 64 | 65 | # Bee dance function 66 | def bee_dance(position, r): 67 | nVar = len(position) 68 | k = np.random.randint(0, nVar) 69 | new_position = position.copy() 70 | new_position[k] += np.random.uniform(-r, r) 71 | new_position = np.clip(new_position, 0, 1) # Ensure within bounds 72 | return new_position 73 | 74 | # Bees Algorithm implementation 75 | def bees_algorithm(model): 76 | # Parameters 77 | max_iter = 20 78 | n_scout_bees = 7 79 | n_elite_sites = 3 80 | n_selected_sites = 4 81 | n_elite_bees = 5 82 | n_selected_bees = 3 83 | rdamp = 0.7 84 | 85 | # Initialize scout bees 86 | bees = [{"position": np.random.uniform(0, 1, model["nPlant"]), "cost": None} for _ in range(n_scout_bees)] 87 | for bee in bees: 88 | bee["cost"], bee["details"] = cost_function(bee["position"], model) 89 | 90 | # Sort by cost 91 | bees = sorted(bees, key=lambda b: b["cost"]) 92 | best_costs = [] 93 | 94 | # Main loop 95 | for iteration in range(max_iter): 96 | print(f"Iteration {iteration + 1}/{max_iter}") 97 | 98 | # Adjust fuzzy parameters 99 | penalty, r = fuzzy_adjustment(iteration, max_iter, bees[0]["details"]["PowerBalanceViolation"]) 100 | 101 | # Elite sites 102 | for i in range(n_elite_sites): 103 | for _ in range(n_elite_bees): 104 | new_position = bee_dance(bees[i]["position"], r) 105 | new_cost, new_details = cost_function(new_position, model) 106 | if new_cost < bees[i]["cost"]: 107 | bees[i] = {"position": new_position, "cost": new_cost, "details": new_details} 108 | 109 | # Selected non-elite sites 110 | for i in range(n_elite_sites, n_selected_sites): 111 | for _ in range(n_selected_bees): 112 | new_position = bee_dance(bees[i]["position"], r) 113 | new_cost, new_details = cost_function(new_position, model) 114 | if new_cost < bees[i]["cost"]: 115 | bees[i] = {"position": new_position, "cost": new_cost, "details": new_details} 116 | 117 | # Non-selected sites 118 | for i in range(n_selected_sites, n_scout_bees): 119 | new_position = np.random.uniform(0, 1, model["nPlant"]) 120 | new_cost, new_details = cost_function(new_position, model) 121 | bees[i] = {"position": new_position, "cost": new_cost, "details": new_details} 122 | 123 | # Sort by cost 124 | bees = sorted(bees, key=lambda b: b["cost"]) 125 | 126 | # Store the best cost 127 | best_costs.append(bees[0]["cost"]) 128 | print(f"Best cost at iteration {iteration + 1}: {bees[0]['cost']:.2f}") 129 | 130 | # Final results 131 | best_solution = bees[0] 132 | print("\nBest Solution:") 133 | print(f"Cost: {best_solution['cost']:.2f}") 134 | print(f"Power Distribution: {best_solution['details']['P']}") 135 | 136 | # Plot the results 137 | plt.plot(best_costs, marker="o") 138 | plt.xlabel("Iteration") 139 | plt.ylabel("Best Cost") 140 | plt.title("Convergence of Bees Algorithm with Fuzzy Logic") 141 | plt.grid() 142 | plt.show() 143 | 144 | # Run the algorithm 145 | if __name__ == "__main__": 146 | model = make_model() 147 | bees_algorithm(model) 148 | -------------------------------------------------------------------------------- /Biogeography-Based Optimization Minimum Spanning Tree.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import networkx as nx 3 | import matplotlib.pyplot as plt 4 | import random 5 | 6 | # BBO Algorithm Parameters 7 | POPULATION_SIZE = 20 8 | MUTATION_RATE = 0.2 9 | MAX_GENERATIONS = 500 10 | NUM_NODES = 8 11 | MAX_EDGE_WEIGHT = 20 12 | 13 | # Create a random graph 14 | def create_random_graph(num_nodes, max_edge_weight): 15 | graph = nx.complete_graph(num_nodes) 16 | for (u, v) in graph.edges(): 17 | graph.edges[u, v]['weight'] = random.randint(1, max_edge_weight) 18 | return graph 19 | 20 | # Fitness function for MST (lower cost is better) 21 | def fitness_function(graph, individual): 22 | mst_cost = sum(graph.edges[edge]['weight'] for edge in individual) 23 | return 1 / mst_cost # Higher fitness for lower cost 24 | 25 | # Generate initial population 26 | def generate_population(graph, size): 27 | population = [] 28 | for _ in range(size): 29 | edges = list(graph.edges) 30 | random.shuffle(edges) 31 | population.append(edges[:NUM_NODES - 1]) # Ensure a spanning tree 32 | return population 33 | 34 | # Selection function (roulette wheel) 35 | def select(population, fitnesses): 36 | total_fitness = sum(fitnesses) 37 | probabilities = [f / total_fitness for f in fitnesses] 38 | selected_index = np.random.choice(len(population), p=probabilities) 39 | return population[selected_index] 40 | 41 | # Mutation function 42 | def mutate(individual, graph): 43 | if random.random() < MUTATION_RATE: 44 | new_edge = random.choice(list(graph.edges)) 45 | individual[random.randint(0, len(individual) - 1)] = new_edge 46 | return individual 47 | 48 | # Plot final MST 49 | def plot_final_mst(graph, edges): 50 | mst_graph = nx.Graph() 51 | mst_graph.add_edges_from(edges) 52 | pos = nx.spring_layout(graph) 53 | 54 | plt.figure(figsize=(8, 6)) 55 | nx.draw(graph, pos, with_labels=True, node_color='lightblue', edge_color='gray', node_size=500, font_size=10) 56 | nx.draw(mst_graph, pos, with_labels=True, edge_color='red', width=2) 57 | plt.title("Final MST") 58 | plt.show() 59 | 60 | # BBO Main Function 61 | def bbo_mst(graph): 62 | population = generate_population(graph, POPULATION_SIZE) 63 | best_solution = None 64 | best_cost = float('inf') 65 | iteration_costs = [] 66 | 67 | for generation in range(MAX_GENERATIONS): 68 | fitnesses = [fitness_function(graph, individual) for individual in population] 69 | best_index = np.argmax(fitnesses) 70 | current_best_cost = 1 / fitnesses[best_index] 71 | 72 | if current_best_cost < best_cost: 73 | best_solution = population[best_index] 74 | best_cost = current_best_cost 75 | 76 | iteration_costs.append(best_cost) 77 | 78 | new_population = [] 79 | for _ in range(POPULATION_SIZE): 80 | parent = select(population, fitnesses) 81 | offspring = mutate(parent.copy(), graph) 82 | new_population.append(offspring) 83 | 84 | population = new_population 85 | 86 | print(f"Generation {generation + 1}, Best MST Cost: {best_cost}") 87 | 88 | return best_solution, best_cost, iteration_costs 89 | 90 | # Main Execution 91 | if __name__ == "__main__": 92 | random_graph = create_random_graph(NUM_NODES, MAX_EDGE_WEIGHT) 93 | best_mst, best_mst_cost, costs_over_time = bbo_mst(random_graph) 94 | 95 | print("\nFinal Best MST Cost:", best_mst_cost) 96 | print("Best MST Edges:", best_mst) 97 | 98 | # Plot the final MST 99 | plot_final_mst(random_graph, best_mst) 100 | 101 | # Plot iterations over time 102 | plt.figure(figsize=(10, 6)) 103 | plt.plot(costs_over_time, marker='o') 104 | plt.title("BBO Optimization of MST") 105 | plt.xlabel("Generation") 106 | plt.ylabel("Best MST Cost") 107 | plt.grid() 108 | plt.show() 109 | -------------------------------------------------------------------------------- /Brain Storm Optimization Parallel Machine Scheduling.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import random 4 | 5 | # Function to generate a random parallel machine scheduling problem 6 | def generate_parallel_machine_problem(num_tasks, num_machines): 7 | tasks = np.arange(1, num_tasks + 1) # Task IDs 8 | processing_times = np.random.randint(10, 100, size=num_tasks) # Random processing times 9 | return tasks, processing_times, num_machines 10 | 11 | # Objective function: calculate makespan (Cmax) of a given schedule 12 | def calculate_makespan(schedule, processing_times, num_machines): 13 | machine_times = np.zeros(num_machines) 14 | for machine, task in enumerate(schedule): 15 | machine_times[machine % num_machines] += processing_times[task - 1] 16 | return max(machine_times) 17 | 18 | # Brain Storm Optimization (BSO) for Parallel Machine Scheduling 19 | def brain_storm_optimization(tasks, processing_times, num_machines, iterations=200, population_size=100): 20 | num_tasks = len(tasks) 21 | best_schedule = None 22 | best_makespan = float('inf') 23 | 24 | # Initial population of random schedules 25 | population = [np.random.permutation(tasks) for _ in range(population_size)] 26 | makespans = [calculate_makespan(schedule, processing_times, num_machines) for schedule in population] 27 | 28 | best_schedule = population[np.argmin(makespans)] 29 | best_makespan = min(makespans) 30 | 31 | makespan_progress = [best_makespan] 32 | 33 | for iteration in range(iterations): 34 | # Generate new solutions by mutation and combination 35 | new_population = [] 36 | for i in range(population_size): 37 | if random.random() < 0.5: # Mutation 38 | new_schedule = population[i].copy() 39 | idx1, idx2 = np.random.choice(num_tasks, 2, replace=False) 40 | new_schedule[idx1], new_schedule[idx2] = new_schedule[idx2], new_schedule[idx1] 41 | else: # Combination 42 | parent1, parent2 = random.sample(population, 2) 43 | split_point = np.random.randint(1, num_tasks - 1) 44 | new_schedule = np.concatenate((parent1[:split_point], parent2[split_point:])) 45 | new_schedule, _ = np.unique(new_schedule, return_index=True) 46 | new_schedule = np.append(new_schedule, np.setdiff1d(tasks, new_schedule)) 47 | 48 | new_population.append(new_schedule) 49 | 50 | # Evaluate new population 51 | new_makespans = [calculate_makespan(schedule, processing_times, num_machines) for schedule in new_population] 52 | 53 | # Update best solution 54 | min_new_makespan = min(new_makespans) 55 | if min_new_makespan < best_makespan: 56 | best_makespan = min_new_makespan 57 | best_schedule = new_population[np.argmin(new_makespans)] 58 | 59 | # Replace old population with new one 60 | population = new_population 61 | makespan_progress.append(best_makespan) 62 | 63 | return best_schedule, best_makespan, makespan_progress 64 | 65 | # Main execution for a single run 66 | num_tasks = 14 67 | num_machines = 4 68 | tasks, processing_times, num_machines = generate_parallel_machine_problem(num_tasks, num_machines) 69 | 70 | best_schedule, best_makespan, makespan_progress = brain_storm_optimization( 71 | tasks, processing_times, num_machines 72 | ) 73 | 74 | # Plotting iteration progress 75 | plt.figure(figsize=(10, 6)) 76 | plt.plot(makespan_progress, marker="o", linestyle="--") 77 | plt.title("Makespan over Iterations - Brain Storm Optimization") 78 | plt.xlabel("Iteration") 79 | plt.ylabel("Makespan (Cmax)") 80 | plt.grid() 81 | plt.show() 82 | 83 | # Plotting the solution similar to the provided image 84 | machine_assignments = [[] for _ in range(num_machines)] 85 | machine_times = np.zeros(num_machines) 86 | for task in best_schedule: 87 | machine = np.argmin(machine_times) 88 | machine_assignments[machine].append(task) 89 | machine_times[machine] += processing_times[task - 1] 90 | 91 | plt.figure(figsize=(12, 8)) 92 | for i, machine in enumerate(machine_assignments, 1): 93 | start = 0 94 | for task in machine: 95 | plt.barh(i, processing_times[task - 1], left=start, color="lime", edgecolor="black") 96 | plt.text(start + processing_times[task - 1] / 2, i, str(task), va='center', ha='center', fontsize=10, color="black") 97 | start += processing_times[task - 1] 98 | plt.axvline(best_makespan, color="yellow", linestyle="--", linewidth=2, label=f"Cmax = {best_makespan}") 99 | plt.title("Parallel Machine Scheduling") 100 | plt.xlabel("Tasks") 101 | plt.ylabel("Machines") 102 | plt.yticks(range(1, num_machines + 1)) 103 | plt.legend() 104 | plt.grid(axis="x") 105 | plt.show() 106 | 107 | # Display results 108 | import pandas as pd 109 | results = pd.DataFrame({ 110 | "Best Schedule": [best_schedule], 111 | "Best Makespan": [best_makespan], 112 | "Iterations": [len(makespan_progress)], 113 | }) 114 | 115 | # Display results in the console 116 | print("Parallel Machine Scheduling Results:") 117 | print(results) -------------------------------------------------------------------------------- /Cuckoo Search Traveling Salesman Problem.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import random 4 | from scipy.spatial import distance 5 | 6 | # Cuckoo Search Algorithm Parameters 7 | POPULATION_SIZE = 25 8 | MAX_GENERATIONS = 100 9 | NUM_LOCATIONS = 10 10 | MAX_COORDINATE = 100 11 | LEVIY_FLIGHT_STRENGTH = 1.5 12 | DISCOVERY_RATE = 0.25 13 | 14 | def create_random_locations(num_locations, max_coordinate): 15 | return np.random.randint(0, max_coordinate, size=(num_locations, 2)) 16 | 17 | def calculate_tsp_cost(locations, path): 18 | cost = 0 19 | for i in range(len(path)): 20 | cost += distance.euclidean(locations[path[i]], locations[path[(i + 1) % len(path)]]) 21 | return cost 22 | 23 | def levy_flight(Lambda): 24 | u = np.random.normal(0, 1) * (1 / abs(np.random.normal(0, 1))) ** (1 / Lambda) 25 | v = np.random.normal(0, 1) 26 | return u / abs(v) ** (1 / Lambda) 27 | 28 | def generate_initial_population(size, num_locations): 29 | population = [] 30 | for _ in range(size): 31 | individual = list(range(num_locations)) 32 | random.shuffle(individual) 33 | population.append(individual) 34 | return population 35 | 36 | def replace_worst_nests(population, fitness, discovery_rate): 37 | num_replace = int(len(population) * discovery_rate) 38 | worst_indices = np.argsort(fitness)[-num_replace:] 39 | for i in worst_indices: 40 | individual = list(range(len(population[0]))) 41 | random.shuffle(individual) 42 | population[i] = individual 43 | 44 | def cuckoo_search(locations): 45 | population = generate_initial_population(POPULATION_SIZE, len(locations)) 46 | best_solution = None 47 | best_cost = float('inf') 48 | costs_over_time = [] 49 | 50 | for generation in range(MAX_GENERATIONS): 51 | fitness = [calculate_tsp_cost(locations, individual) for individual in population] 52 | min_cost_index = np.argmin(fitness) 53 | current_best_cost = fitness[min_cost_index] 54 | 55 | if current_best_cost < best_cost: 56 | best_cost = current_best_cost 57 | best_solution = population[min_cost_index] 58 | 59 | costs_over_time.append(best_cost) 60 | 61 | for i in range(POPULATION_SIZE): 62 | cuckoo = population[i][:] 63 | index1, index2 = random.sample(range(len(cuckoo)), 2) 64 | cuckoo[index1], cuckoo[index2] = cuckoo[index2], cuckoo[index1] 65 | cuckoo_cost = calculate_tsp_cost(locations, cuckoo) 66 | 67 | if cuckoo_cost < fitness[i]: 68 | population[i] = cuckoo 69 | 70 | replace_worst_nests(population, fitness, DISCOVERY_RATE) 71 | 72 | print(f"Generation {generation + 1}, Best TSP Cost: {best_cost}") 73 | 74 | return best_solution, best_cost, costs_over_time 75 | 76 | def plot_tsp_solution(locations, solution, title): 77 | plt.figure(figsize=(8, 6)) 78 | x = [locations[city][0] for city in solution + [solution[0]]] 79 | y = [locations[city][1] for city in solution + [solution[0]]] 80 | plt.plot(x, y, marker="o", linestyle="-", color="blue", label="Path") 81 | plt.scatter(locations[:, 0], locations[:, 1], color="red", s=100, label="Cities") 82 | plt.title(title) 83 | plt.xlabel("X Coordinate") 84 | plt.ylabel("Y Coordinate") 85 | plt.legend() 86 | plt.grid() 87 | plt.show() 88 | 89 | def main(): 90 | locations = create_random_locations(NUM_LOCATIONS, MAX_COORDINATE) 91 | best_solution, best_cost, costs_over_time = cuckoo_search(locations) 92 | 93 | print("\nFinal Best TSP Cost:", best_cost) 94 | print("Best TSP Path:", best_solution) 95 | 96 | plot_tsp_solution(locations, best_solution, "Final TSP Solution") 97 | 98 | # Plot optimization progress 99 | plt.figure(figsize=(10, 6)) 100 | plt.plot(costs_over_time, marker='o') 101 | plt.title("Cuckoo Search Optimization of TSP") 102 | plt.xlabel("Generation") 103 | plt.ylabel("Best Cost") 104 | plt.grid() 105 | plt.show() 106 | 107 | if __name__ == "__main__": 108 | main() -------------------------------------------------------------------------------- /Differential Evolution Clustering.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | from sklearn.datasets import load_iris 4 | from sklearn.preprocessing import StandardScaler 5 | from sklearn.metrics import pairwise_distances_argmin 6 | import matplotlib.pyplot as plt 7 | 8 | # Step 1: Load and Prepare the Iris Dataset 9 | def load_and_preprocess_data(): 10 | iris = load_iris() 11 | X = iris.data # Features 12 | y = iris.target # Labels (not used in clustering) 13 | scaler = StandardScaler() 14 | X_scaled = scaler.fit_transform(X) 15 | return X_scaled, y 16 | 17 | # Step 2: Define Differential Evolution (DE) 18 | class DE: 19 | def __init__(self, n_clusters, n_population, n_iterations, X): 20 | self.n_clusters = n_clusters 21 | self.n_population = n_population 22 | self.n_iterations = n_iterations 23 | self.X = X 24 | self.n_samples, self.n_features = X.shape 25 | 26 | # Initialize population (random cluster centers) 27 | self.population = np.random.rand(n_population, n_clusters, self.n_features) 28 | self.global_best_position = None 29 | self.global_best_score = np.inf 30 | self.cost_history = [] 31 | 32 | def fitness(self, cluster_centers): 33 | # Assign points to nearest cluster center 34 | labels = pairwise_distances_argmin(self.X, cluster_centers) 35 | # Compute intra-cluster distance (sum of squared distances) 36 | score = sum(np.sum((self.X[labels == i] - center) ** 2) 37 | for i, center in enumerate(cluster_centers)) 38 | return score 39 | 40 | def optimize(self): 41 | F = 0.8 # Scaling factor 42 | CR = 0.9 # Crossover probability 43 | for iteration in range(self.n_iterations): 44 | new_population = np.copy(self.population) 45 | for i in range(self.n_population): 46 | # Mutation: Select three random individuals different from i 47 | indices = [idx for idx in range(self.n_population) if idx != i] 48 | a, b, c = self.population[np.random.choice(indices, 3, replace=False)] 49 | mutant_vector = a + F * (b - c) 50 | 51 | # Crossover: Combine mutant vector and target vector 52 | crossover_mask = np.random.rand(*mutant_vector.shape) < CR 53 | trial_vector = np.where(crossover_mask, mutant_vector, self.population[i]) 54 | 55 | # Selection: Evaluate and select the better individual 56 | trial_score = self.fitness(trial_vector) 57 | target_score = self.fitness(self.population[i]) 58 | if trial_score < target_score: 59 | new_population[i] = trial_vector 60 | if trial_score < self.global_best_score: 61 | self.global_best_score = trial_score 62 | self.global_best_position = trial_vector 63 | 64 | self.population = new_population 65 | self.cost_history.append(self.global_best_score) 66 | print(f"Iteration {iteration + 1}/{self.n_iterations}, Best Score: {self.global_best_score}") 67 | 68 | return self.global_best_position, self.cost_history 69 | 70 | # Step 3: Clustering with DE-generated Centers 71 | def clustering_with_de(X, n_clusters, n_population, n_iterations): 72 | de = DE(n_clusters, n_population, n_iterations, X) 73 | best_centers, cost_history = de.optimize() 74 | labels = pairwise_distances_argmin(X, best_centers) 75 | return labels, best_centers, cost_history 76 | 77 | # Step 4: Evaluate the Clustering 78 | def evaluate_clustering(X, labels, centers): 79 | quantization_error = sum(np.sum((X[labels == i] - center) ** 2) 80 | for i, center in enumerate(centers)) 81 | intra_cluster_distances = [np.sum((X[labels == i] - center) ** 2) 82 | for i, center in enumerate(centers)] 83 | inter_cluster_distances = np.min( 84 | [np.linalg.norm(center1 - center2) 85 | for i, center1 in enumerate(centers) 86 | for j, center2 in enumerate(centers) if i != j]) 87 | print(f"Quantization Error: {quantization_error:.4f}") 88 | print(f"Intra-cluster Distances: {intra_cluster_distances}") 89 | print(f"Inter-cluster Distance: {inter_cluster_distances:.4f}") 90 | return quantization_error, intra_cluster_distances, inter_cluster_distances 91 | 92 | # Step 5: Visualize the Clustering Result 93 | def visualize_results(X, labels, centers, cost_history): 94 | fig, axes = plt.subplots(1, 2, figsize=(12, 5)) 95 | 96 | # Clustering result 97 | axes[0].scatter(X[:, 0], X[:, 1], c=labels, cmap='viridis', marker='o', alpha=0.7) 98 | axes[0].scatter(centers[:, 0], centers[:, 1], c='red', marker='x', s=200, label='Centers') 99 | axes[0].set_title("Clustering Result with DE") 100 | axes[0].legend() 101 | 102 | # DE iteration cost 103 | axes[1].plot(range(1, len(cost_history) + 1), cost_history, marker='o') 104 | axes[1].set_title("DE Iteration Cost") 105 | axes[1].set_xlabel("Iteration") 106 | axes[1].set_ylabel("Cost (Fitness)") 107 | 108 | plt.tight_layout() 109 | plt.show() 110 | 111 | # Step 6: Main Function 112 | def main(): 113 | X, y = load_and_preprocess_data() 114 | n_clusters = 3 115 | n_population = 10 116 | n_iterations = 100 117 | 118 | labels, centers, cost_history = clustering_with_de(X, n_clusters, n_population, n_iterations) 119 | evaluate_clustering(X, labels, centers) 120 | visualize_results(X, labels, centers, cost_history) 121 | 122 | if __name__ == "__main__": 123 | main() 124 | -------------------------------------------------------------------------------- /Differential Evolution Protein Structure Prediction.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | from scipy.spatial.distance import pdist, squareform 4 | from scipy.interpolate import splprep, splev 5 | 6 | # Define the Enhanced Energy Function 7 | def energy_function(positions): 8 | """Calculate the energy with Lennard-Jones potential and harmonic bonds.""" 9 | distances = pdist(positions) # Pairwise distances 10 | distances_matrix = squareform(distances) 11 | 12 | # Lennard-Jones potential for non-adjacent residues 13 | lj_energy = np.sum(4 * ((1 / distances[distances > 0])**12 - (1 / distances[distances > 0])**6)) 14 | 15 | # Harmonic bond potential for adjacent residues 16 | bond_energy = 0.5 * np.sum((distances_matrix[np.arange(len(positions)-1), np.arange(1, len(positions))] - 1)**2) 17 | 18 | return lj_energy + bond_energy 19 | 20 | # Initialize DE Parameters 21 | num_particles = 40 22 | num_dimensions = 3 # 3D space 23 | num_amino_acids = 15 # Number of residues in the protein 24 | num_iterations = 400 25 | 26 | # Differential Evolution hyperparameters 27 | mutation_factor = 0.5 # Controls the step size 28 | crossover_probability = 0.9 # Probability of crossover 29 | 30 | # Initialize particle positions 31 | positions = np.random.uniform(-5, 5, (num_particles, num_amino_acids, num_dimensions)) 32 | 33 | # Evaluate initial fitness 34 | fitness_scores = np.array([energy_function(p) for p in positions]) 35 | 36 | # Track convergence 37 | convergence = [] 38 | 39 | # DE Main Loop 40 | for iteration in range(num_iterations): 41 | for i in range(num_particles): 42 | # Mutation: Create a donor vector 43 | indices = np.random.choice(np.delete(np.arange(num_particles), i), 3, replace=False) 44 | a, b, c = positions[indices] 45 | donor_vector = a + mutation_factor * (b - c) 46 | 47 | # Crossover: Create a trial vector 48 | trial_vector = np.copy(positions[i]) 49 | for j in range(num_amino_acids): 50 | if np.random.rand() < crossover_probability: 51 | trial_vector[j] = donor_vector[j] 52 | 53 | # Selection: Compare trial vector with target vector 54 | trial_fitness = energy_function(trial_vector) 55 | if trial_fitness < fitness_scores[i]: 56 | positions[i] = trial_vector 57 | fitness_scores[i] = trial_fitness 58 | 59 | # Track global best 60 | global_best_index = np.argmin(fitness_scores) 61 | global_best_position = positions[global_best_index] 62 | global_best_score = fitness_scores[global_best_index] 63 | 64 | # Track convergence 65 | convergence.append(global_best_score) 66 | print(f"Iteration {iteration + 1}/{num_iterations}, Best Score: {global_best_score:.4f}") 67 | 68 | # Plot the convergence 69 | plt.figure(figsize=(12, 6)) 70 | plt.plot(convergence, marker='o', linewidth=2) 71 | plt.title("Convergence of DE on Enhanced Energy Function") 72 | plt.xlabel("Iteration") 73 | plt.ylabel("Best Energy") 74 | plt.grid() 75 | plt.show() 76 | 77 | # Visualize the final protein structure 78 | from mpl_toolkits.mplot3d import Axes3D 79 | 80 | fig = plt.figure(figsize=(14, 10)) 81 | ax = fig.add_subplot(111, projection='3d') 82 | 83 | # Smooth the backbone with splines 84 | tck, u = splprep([global_best_position[:, 0], global_best_position[:, 1], global_best_position[:, 2]], s=2) 85 | smoothed_coords = splev(np.linspace(0, 1, 100), tck) 86 | 87 | # Plot amino acids 88 | ax.scatter(global_best_position[:, 0], global_best_position[:, 1], global_best_position[:, 2], c='r', s=100, label='Amino Acids') 89 | 90 | # Plot smoothed backbone 91 | ax.plot(smoothed_coords[0], smoothed_coords[1], smoothed_coords[2], c='b', linewidth=2, label='Backbone') 92 | 93 | # Annotate amino acids 94 | for i, (x, y, z) in enumerate(global_best_position): 95 | ax.text(x, y, z, str(i), color='black', fontsize=10) 96 | 97 | ax.set_title("Optimized Protein Structure with Enhanced DE") 98 | ax.set_xlabel("X") 99 | ax.set_ylabel("Y") 100 | ax.set_zlabel("Z") 101 | ax.legend() 102 | plt.show() 103 | 104 | # Print final details 105 | print("Final Optimized Amino Acid Positions:") 106 | print(global_best_position) 107 | print(f"Final Optimized Energy: {global_best_score:.4f}") 108 | -------------------------------------------------------------------------------- /Firefly Image Segmentation.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | from skimage import io, color, exposure 4 | from sklearn.metrics import pairwise_distances 5 | from sklearn.preprocessing import MinMaxScaler 6 | from skimage.color import label2rgb 7 | 8 | def cluster_cost(m, X): 9 | """ 10 | Calculate the cost for clustering. 11 | """ 12 | d = pairwise_distances(X, m, metric='euclidean') 13 | dmin = np.min(d, axis=1) 14 | ind = np.argmin(d, axis=1) 15 | WCD = np.sum(dmin) 16 | return WCD, {'d': d, 'dmin': dmin, 'ind': ind, 'WCD': WCD} 17 | 18 | # Load image 19 | img = io.imread('f.jpg') 20 | img = img / 255.0 # Normalize to [0, 1] 21 | gray = color.rgb2gray(img) 22 | gray = exposure.equalize_adapthist(gray) 23 | 24 | # Reshape image to vector 25 | X = gray.reshape(-1, 1) 26 | 27 | # Firefly Algorithm Parameters 28 | k = 10 # Number of clusters 29 | MaxIt = 50 # Maximum Number of Iterations 30 | nPop = 5 # Number of Fireflies 31 | gamma = 1 # Light Absorption Coefficient 32 | beta0 = 2 # Attraction Coefficient Base Value 33 | alpha = 0.2 # Mutation Coefficient 34 | alpha_damp = 0.98 # Mutation Coefficient Damping Ratio 35 | delta = 0.05 * (X.max() - X.min()) # Uniform Mutation Range 36 | m = 2 # Distance exponent 37 | 38 | dmax = np.linalg.norm(X.max() - X.min()) 39 | 40 | # Initialize firefly population 41 | fireflies = [{'Position': np.random.uniform(X.min(), X.max(), (k, 1)), 'Cost': np.inf, 'Out': None} for _ in range(nPop)] 42 | 43 | # Evaluate initial population 44 | BestSol = {'Cost': np.inf} 45 | for firefly in fireflies: 46 | firefly['Cost'], firefly['Out'] = cluster_cost(firefly['Position'], X) 47 | if firefly['Cost'] < BestSol['Cost']: 48 | BestSol = firefly.copy() 49 | 50 | BestCost = [] 51 | 52 | # Firefly Algorithm Main Loop 53 | for it in range(MaxIt): 54 | new_fireflies = [] 55 | for i, firefly_i in enumerate(fireflies): 56 | new_firefly = {'Cost': np.inf} 57 | for j, firefly_j in enumerate(fireflies): 58 | if firefly_j['Cost'] < firefly_i['Cost']: 59 | rij = np.linalg.norm(firefly_i['Position'] - firefly_j['Position']) / dmax 60 | beta = beta0 * np.exp(-gamma * rij**m) 61 | e = delta * np.random.uniform(-1, 1, firefly_i['Position'].shape) 62 | new_position = firefly_i['Position'] + beta * np.random.rand(*firefly_i['Position'].shape) * (firefly_j['Position'] - firefly_i['Position']) + alpha * e 63 | new_position = np.clip(new_position, X.min(), X.max()) 64 | cost, out = cluster_cost(new_position, X) 65 | if cost < new_firefly['Cost']: 66 | new_firefly = {'Position': new_position, 'Cost': cost, 'Out': out} 67 | if cost < BestSol['Cost']: 68 | BestSol = new_firefly.copy() 69 | new_fireflies.append(new_firefly) 70 | 71 | fireflies = sorted(fireflies + new_fireflies, key=lambda x: x['Cost'])[:nPop] 72 | BestCost.append(BestSol['Cost']) 73 | alpha *= alpha_damp 74 | print(f"Iteration {it + 1}: Best Cost = {BestSol['Cost']}") 75 | 76 | # Reshape best solution 77 | FAlbl = BestSol['Out']['ind'] 78 | segmented = label2rgb(FAlbl.reshape(gray.shape)) 79 | 80 | # Plot results 81 | plt.figure() 82 | plt.plot(BestCost, '--k', linewidth=1) 83 | plt.title('FA Train') 84 | plt.xlabel('FA Iteration Number') 85 | plt.ylabel('FA Best Cost Value') 86 | plt.show() 87 | 88 | plt.figure() 89 | plt.subplot(1, 2, 1) 90 | plt.imshow(img) 91 | plt.title('Original') 92 | plt.subplot(1, 2, 2) 93 | plt.imshow(segmented) 94 | plt.title('Segmented Image') 95 | plt.show() 96 | -------------------------------------------------------------------------------- /Firefly Space-Time Bending.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | 4 | # Objective function 5 | def objective_function(x, start, end, lambda_bend, warp_field): 6 | # Distance term (Geodesic distance using warp field) 7 | distance = np.linalg.norm((x - end) * (1 + warp_field)) # Adjusted distance with warp effect 8 | 9 | # Bending cost (simulating space-time distortion effort) 10 | bending_cost = lambda_bend * np.sum((x - start)**2) # Quadratic bending penalty 11 | 12 | # Effort to traverse (simulate time dilation or warp effort) 13 | effort_cost = np.sum(np.abs(x - start) * (1 + warp_field)) # Absolute effort adjusted by warp 14 | 15 | # Energy cost for maintaining the warp field 16 | energy_cost = np.sum(warp_field**2) # Energy required to maintain the warp 17 | 18 | # Combined cost function 19 | return distance + bending_cost + 0.5 * effort_cost + 0.2 * energy_cost 20 | 21 | # Parameters for Firefly Algorithm 22 | num_fireflies = 70 23 | num_dimensions = 2 24 | num_iterations = 40 25 | start = np.array([0, 0]) 26 | end = np.array([10, 10]) 27 | lambda_bend = 0.1 28 | warp_field = np.random.uniform(low=0.1, high=0.5, size=num_dimensions) # Random initial warp field 29 | alpha = 0.2 # Randomness strength 30 | beta0 = 1.0 # Base attractiveness 31 | gamma = 1.0 # Absorption coefficient 32 | 33 | # Initialize fireflies 34 | positions = np.random.uniform(low=-5, high=15, size=(num_fireflies, num_dimensions)) 35 | intensities = np.array([objective_function(p, start, end, lambda_bend, warp_field) for p in positions]) 36 | 37 | # Record the best cost at each iteration for plotting 38 | best_costs = [] 39 | 40 | # Optimization loop 41 | for iteration in range(num_iterations): 42 | for i in range(num_fireflies): 43 | for j in range(num_fireflies): 44 | if intensities[j] < intensities[i]: # Move firefly i towards firefly j 45 | distance = np.linalg.norm(positions[i] - positions[j]) 46 | beta = beta0 * np.exp(-gamma * distance**2) 47 | positions[i] += beta * (positions[j] - positions[i]) + alpha * (np.random.rand(num_dimensions) - 0.5) 48 | 49 | # Update intensity for firefly i 50 | intensities[i] = objective_function(positions[i], start, end, lambda_bend, warp_field) 51 | 52 | # Find the best firefly 53 | best_idx = np.argmin(intensities) 54 | best_costs.append(intensities[best_idx]) 55 | 56 | # Print progress 57 | print(f"Iteration {iteration + 1}: Best Fitness = {intensities[best_idx]}") 58 | 59 | # Final results 60 | best_position = positions[best_idx] 61 | print("\nOptimization Completed!") 62 | print(f"Global Best Position: {best_position}") 63 | print(f"Objective Value at Global Best: {intensities[best_idx]}") 64 | 65 | # Visualization 66 | plt.figure(figsize=(10, 6)) 67 | plt.plot(best_costs, label="Best Cost per Iteration") 68 | plt.xlabel("Iteration") 69 | plt.ylabel("Cost") 70 | plt.title("Convergence of Firefly Algorithm with Space-Time Bending Analogy") 71 | plt.legend() 72 | plt.grid() 73 | plt.show() 74 | 75 | # Plot the final positions of fireflies 76 | plt.figure(figsize=(8, 8)) 77 | plt.scatter(positions[:, 0], positions[:, 1], label="Final Firefly Positions", color="blue") 78 | plt.scatter(best_position[0], best_position[1], label="Global Best Position", color="red", marker="x", s=100) 79 | plt.scatter(end[0], end[1], label="Target Position", color="green", marker="*", s=200) 80 | plt.xlabel("X Coordinate") 81 | plt.ylabel("Y Coordinate") 82 | plt.title("Final Firefly Distribution") 83 | plt.legend() 84 | plt.grid() 85 | plt.show() 86 | -------------------------------------------------------------------------------- /Genetic Algorithm Evolutionary Art.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | 4 | # Genetic Algorithm Parameters 5 | population_size = 30 6 | num_generations = 100 7 | mutation_rate = 0.1 8 | image_resolution = (300, 300) 9 | 10 | # Fitness Function: Enhanced to reward spiral-like patterns and symmetry 11 | def fitness_function(image): 12 | complexity = calculate_complexity(image) 13 | symmetry = calculate_symmetry(image) 14 | contrast = global_contrast_factor(image) 15 | return complexity + symmetry + contrast 16 | 17 | # Calculate Complexity using edge detection 18 | def calculate_complexity(image): 19 | gradient_x = np.abs(np.diff(image, axis=0)) 20 | gradient_y = np.abs(np.diff(image, axis=1)) 21 | complexity = np.sum(gradient_x) + np.sum(gradient_y) 22 | return complexity / image.size 23 | 24 | # Global Contrast Factor (GCF) 25 | def global_contrast_factor(image): 26 | luminance = np.mean(image, axis=2) # Average across RGB channels 27 | contrast = np.std(luminance) # Standard deviation as a simple contrast measure 28 | return contrast 29 | 30 | # Calculate Symmetry 31 | def calculate_symmetry(image): 32 | vertical_symmetry = np.sum(np.abs(image - np.flip(image, axis=1))) 33 | horizontal_symmetry = np.sum(np.abs(image - np.flip(image, axis=0))) 34 | total_symmetry = -(vertical_symmetry + horizontal_symmetry) / image.size # Negate to reward symmetry 35 | return total_symmetry 36 | 37 | # Initialize Population with spiral-like patterns 38 | def initialize_population(size, resolution): 39 | population = [] 40 | for _ in range(size): 41 | x = np.linspace(-1.0, 1.0, resolution[0]) 42 | y = np.linspace(-1.0, 1.0, resolution[1]) 43 | x, y = np.meshgrid(x, y) 44 | r = np.sqrt(x**2 + y**2) 45 | theta = np.arctan2(y, x) 46 | spiral = (np.sin(10 * r + 5 * theta) * 127 + 128).astype(np.uint8) 47 | image = np.stack([spiral, spiral, spiral], axis=2) # Grayscale to RGB 48 | population.append(image) 49 | return population 50 | 51 | # Crossover Operation 52 | def crossover(parent1, parent2): 53 | crossover_point = np.random.randint(0, parent1.shape[1]) 54 | child = np.concatenate((parent1[:, :crossover_point], parent2[:, crossover_point:]), axis=1) 55 | return child 56 | 57 | # Mutation Operation (structured fractal adjustments) 58 | def mutate(image, rate): 59 | mutated_image = image.copy() 60 | num_pixels = np.prod(image.shape[:2]) 61 | num_mutations = int(rate * num_pixels) 62 | for _ in range(num_mutations): 63 | x, y = np.random.randint(0, image.shape[0]), np.random.randint(0, image.shape[1]) 64 | mutated_image[x, y] = np.clip(mutated_image[x, y] + np.random.randint(-50, 50), 0, 255) 65 | return mutated_image 66 | 67 | # Main GA Loop 68 | population = initialize_population(population_size, image_resolution) 69 | for generation in range(num_generations): 70 | print(f"Generation {generation + 1}: Evaluating fitness") 71 | 72 | # Evaluate Fitness 73 | fitness_scores = [fitness_function(image) for image in population] 74 | best_fitness = max(fitness_scores) 75 | print(f" Best fitness: {best_fitness:.2f}") 76 | 77 | # Select Parents (Roulette Wheel Selection) 78 | total_fitness = sum(fitness_scores) 79 | probabilities = [score / total_fitness for score in fitness_scores] 80 | indices = np.arange(len(population)) # Indices for the population 81 | selected_indices = np.random.choice(indices, size=population_size, p=probabilities, replace=True) 82 | parents = [population[i] for i in selected_indices] 83 | 84 | print(" Parents selected") 85 | 86 | # Generate New Population 87 | new_population = [] 88 | for i in range(0, len(parents), 2): 89 | parent1, parent2 = parents[i], parents[(i + 1) % len(parents)] 90 | child = crossover(parent1, parent2) 91 | child = mutate(child, mutation_rate) 92 | new_population.append(child) 93 | population = new_population 94 | 95 | print(" New population generated") 96 | 97 | # Safely find the best image 98 | best_index = np.argmax(fitness_scores) 99 | best_image = population[best_index] 100 | 101 | # Display the best image 102 | plt.imshow(best_image, cmap='inferno') 103 | plt.title(f"Generation {generation + 1} - Best Fitness: {best_fitness:.2f}") 104 | plt.axis('off') 105 | plt.pause(0.5) 106 | 107 | print("Evolutionary art generation complete!") -------------------------------------------------------------------------------- /Genetic Algorithm Exoplanetary Adaptation.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | 4 | # Generate random planet and its stellar system parameters 5 | def generate_planet(): 6 | """Generates random parameters for a hypothetical planet in a stellar system.""" 7 | planet_name = f"Planet_{np.random.randint(1000, 9999)}" 8 | star_name = f"Star_{np.random.randint(1000, 9999)}" 9 | 10 | # Randomize planet parameters 11 | gravity = np.random.uniform(0.1, 3.0) # Gravity in Earth G 12 | atmosphere_composition = { 13 | "O2": np.random.uniform(0.01, 0.5), # Oxygen levels (%) 14 | "CO2": np.random.uniform(0.01, 0.5), # Carbon dioxide levels (%) 15 | "Other Gases": np.random.uniform(0.01, 0.9), # Other gases (%) 16 | } 17 | radiation_level = np.random.uniform(1, 500) # Radiation level (mSv/year) 18 | temperature_range = (np.random.uniform(-100, 0), np.random.uniform(0, 100)) # Min and max temperature (°C) 19 | day_length = np.random.uniform(6, 48) # Day length in hours 20 | 21 | return { 22 | "planet_name": planet_name, 23 | "star_name": star_name, 24 | "gravity": gravity, 25 | "atmosphere_composition": atmosphere_composition, 26 | "radiation_level": radiation_level, 27 | "temperature_range": temperature_range, 28 | "day_length": day_length, 29 | } 30 | 31 | # Objective function to evaluate fitness of a genetic profile 32 | def objective_function(genetic_profile, planet_params): 33 | """Evaluates the fitness of a genetic profile based on planet conditions.""" 34 | # Extract planet parameters 35 | gravity = planet_params["gravity"] 36 | atmosphere = planet_params["atmosphere_composition"] 37 | radiation = planet_params["radiation_level"] 38 | temp_min, temp_max = planet_params["temperature_range"] 39 | day_length = planet_params["day_length"] 40 | 41 | # Genetic traits in the profile 42 | radiation_resistance, bone_density, oxygen_efficiency, temp_adaptability, stress_resilience = genetic_profile 43 | 44 | # Fitness components 45 | fitness_radiation = np.exp(-radiation / radiation_resistance) # Better resistance reduces impact 46 | fitness_gravity = np.exp(-abs(gravity - 1) / bone_density) # Closer to Earth's gravity is ideal 47 | fitness_oxygen = oxygen_efficiency * atmosphere["O2"] # Oxygen utilization adapts to O2 levels 48 | fitness_temperature = np.exp(-abs(temp_min + temp_max) / (2 * temp_adaptability)) # Avg temp adaptation 49 | fitness_stress = stress_resilience / day_length # Better stress handling for long days 50 | 51 | # Combined fitness score (weighted sum) 52 | fitness = (0.25 * fitness_radiation + 53 | 0.2 * fitness_gravity + 54 | 0.25 * fitness_oxygen + 55 | 0.2 * fitness_temperature + 56 | 0.1 * fitness_stress) 57 | return fitness 58 | 59 | # Parameters for Genetic Algorithm 60 | population_size = 150 61 | num_generations = 300 62 | num_genes = 5 # Number of genetic traits 63 | mutation_rate = 0.1 64 | 65 | # Initialize population 66 | population = np.random.uniform(0.5, 5.0, size=(population_size, num_genes)) # Random genetic profiles 67 | planet_params = generate_planet() 68 | fitness_history = [] 69 | 70 | # Optimization loop 71 | for generation in range(num_generations): 72 | # Evaluate fitness for each individual 73 | fitness = np.array([objective_function(individual, planet_params) for individual in population]) 74 | fitness_history.append(np.max(fitness)) # Track the best fitness in this generation 75 | 76 | # Print generation progress 77 | best_individual = population[np.argmax(fitness)] 78 | print(f"Generation {generation + 1}: Best Fitness = {np.max(fitness):.4f}") 79 | 80 | # Selection (roulette wheel selection) 81 | probabilities = fitness / fitness.sum() 82 | selected_indices = np.random.choice(np.arange(population_size), size=population_size, p=probabilities) 83 | selected_population = population[selected_indices] 84 | 85 | # Crossover (single-point) 86 | new_population = [] 87 | for i in range(0, population_size, 2): 88 | parent1, parent2 = selected_population[i], selected_population[(i + 1) % population_size] 89 | crossover_point = np.random.randint(1, num_genes) 90 | child1 = np.concatenate([parent1[:crossover_point], parent2[crossover_point:]]) 91 | child2 = np.concatenate([parent2[:crossover_point], parent1[crossover_point:]]) 92 | new_population.extend([child1, child2]) 93 | 94 | # Mutation 95 | new_population = np.array(new_population) 96 | mutation_mask = np.random.rand(population_size, num_genes) < mutation_rate 97 | new_population[mutation_mask] += np.random.normal(0, 0.1, size=mutation_mask.sum()) 98 | 99 | # Update population 100 | population = new_population 101 | 102 | # Final results 103 | final_fitness = np.array([objective_function(individual, planet_params) for individual in population]) 104 | best_individual = population[np.argmax(final_fitness)] 105 | print("\nOptimization Completed!") 106 | print(f"Planet Parameters: {planet_params}") 107 | print(f"Best Genetic Profile: {best_individual}") 108 | print(f"Best Fitness: {np.max(final_fitness):.4f}") 109 | 110 | # Visualization 111 | plt.figure(figsize=(10, 6)) 112 | plt.plot(fitness_history, label="Best Fitness per Generation") 113 | plt.xlabel("Generation") 114 | plt.ylabel("Fitness") 115 | plt.title("Genetic Algorithm Optimization for Human Survival on Exoplanet") 116 | plt.legend() 117 | plt.grid() 118 | plt.show() 119 | -------------------------------------------------------------------------------- /Grey Wolf Optimizer VAE Optimized (Latent Space).py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | from sklearn.datasets import load_iris 4 | from sklearn.model_selection import train_test_split 5 | from sklearn.preprocessing import StandardScaler, OneHotEncoder 6 | from sklearn.metrics import classification_report 7 | from sklearn.ensemble import RandomForestClassifier 8 | 9 | # Load and preprocess the Iris dataset 10 | iris = load_iris() 11 | X = iris.data 12 | y = iris.target 13 | 14 | scaler = StandardScaler() 15 | X = scaler.fit_transform(X) 16 | 17 | encoder = OneHotEncoder(sparse_output=False) 18 | y_encoded = encoder.fit_transform(y.reshape(-1, 1)) 19 | 20 | # Split the data 21 | X_train, X_test, y_train, y_test = train_test_split(X, y_encoded, test_size=0.6, random_state=42) 22 | 23 | # Build a Variational Autoencoder (VAE) 24 | class VAE(tf.keras.Model): 25 | def __init__(self, latent_dim): 26 | super(VAE, self).__init__() 27 | self.latent_dim = latent_dim 28 | 29 | # Encoder 30 | self.encoder = tf.keras.Sequential([ 31 | tf.keras.layers.InputLayer(input_shape=(X_train.shape[1],)), 32 | tf.keras.layers.Dense(16, activation="relu"), 33 | tf.keras.layers.Dense(latent_dim * 2) # Mean and LogVar 34 | ]) 35 | 36 | # Decoder 37 | self.decoder = tf.keras.Sequential([ 38 | tf.keras.layers.InputLayer(input_shape=(latent_dim,)), 39 | tf.keras.layers.Dense(16, activation="relu"), 40 | tf.keras.layers.Dense(X_train.shape[1]) 41 | ]) 42 | 43 | def reparameterize(self, mean, logvar): 44 | eps = tf.random.normal(shape=mean.shape) 45 | return eps * tf.exp(logvar * 0.5) + mean 46 | 47 | def call(self, inputs): 48 | x = self.encoder(inputs) 49 | mean, logvar = tf.split(x, num_or_size_splits=2, axis=1) 50 | z = self.reparameterize(mean, logvar) 51 | reconstructed = self.decoder(z) 52 | return reconstructed, mean, logvar 53 | 54 | # Define VAE loss 55 | def vae_loss(data, reconstructed, mean, logvar): 56 | reconstruction_loss = tf.reduce_mean(tf.keras.losses.mse(data, reconstructed)) 57 | kl_divergence = -0.5 * tf.reduce_sum(1 + logvar - tf.square(mean) - tf.exp(logvar)) 58 | return reconstruction_loss + kl_divergence 59 | 60 | # Train VAE 61 | latent_dim = 2 62 | vae = VAE(latent_dim) 63 | optimizer = tf.keras.optimizers.Adam(learning_rate=0.0001) 64 | 65 | @tf.function 66 | def train_step(data): 67 | with tf.GradientTape() as tape: 68 | reconstructed, mean, logvar = vae(data) 69 | loss = vae_loss(data, reconstructed, mean, logvar) 70 | gradients = tape.gradient(loss, vae.trainable_variables) 71 | optimizer.apply_gradients(zip(gradients, vae.trainable_variables)) 72 | return loss 73 | 74 | print("Training VAE...") 75 | for epoch in range(200): 76 | loss = train_step(X_train) 77 | if epoch % 10 == 0: 78 | print(f"Epoch {epoch}: Loss = {loss.numpy():.4f}") 79 | 80 | # Gray Wolf Optimizer (GWO) 81 | class GrayWolfOptimizer: 82 | def __init__(self, latent_dim, n_wolves=30, max_iters=200): 83 | self.latent_dim = latent_dim 84 | self.n_wolves = n_wolves 85 | self.max_iters = max_iters 86 | self.wolves = np.random.uniform(-2, 2, size=(n_wolves, latent_dim)) 87 | 88 | def fitness(self, wolves): 89 | synthetic_data = vae.decoder(tf.convert_to_tensor(wolves, dtype=tf.float32)).numpy() 90 | reconstruction_loss = np.mean((synthetic_data - np.mean(X_train, axis=0))**2) 91 | diversity_score = np.mean(np.std(synthetic_data, axis=0)) 92 | return -reconstruction_loss + diversity_score # Maximize diversity, minimize reconstruction error 93 | 94 | def optimize(self): 95 | for t in range(self.max_iters): 96 | fitness = self.fitness(self.wolves) 97 | sorted_indices = np.argsort(fitness)[::-1] 98 | self.wolves = self.wolves[sorted_indices] 99 | 100 | # Handle edge cases for population size 101 | if len(self.wolves) < 3: 102 | alpha = beta = delta = self.wolves[0] 103 | else: 104 | alpha, beta, delta = self.wolves[:3] 105 | 106 | for i in range(len(self.wolves)): 107 | a = 2 - t * (2 / self.max_iters) 108 | r1, r2 = np.random.rand(), np.random.rand() 109 | A1, A2, A3 = 2 * a * r1 - a, 2 * a * r2 - a, 2 * a * np.random.rand() - a 110 | D1, D2, D3 = abs(A1 * alpha - self.wolves[i]), abs(A2 * beta - self.wolves[i]), abs(A3 * delta - self.wolves[i]) 111 | X1, X2, X3 = alpha - A1 * D1, beta - A2 * D2, delta - A3 * D3 112 | self.wolves[i] = (X1 + X2 + X3) / 3 113 | return self.wolves[:min(len(self.wolves), 200)] # Return top 50 latent vectors 114 | 115 | # Generate synthetic data 116 | print("Optimizing latent space with GWO...") 117 | gwo = GrayWolfOptimizer(latent_dim=latent_dim) 118 | optimized_latents = gwo.optimize() 119 | synthetic_data = vae.decoder(tf.convert_to_tensor(optimized_latents, dtype=tf.float32)).numpy() 120 | 121 | 122 | # Combine original and synthetic data 123 | combined_X_train = np.vstack([X_train, synthetic_data]) 124 | synthetic_labels = np.tile(np.argmax(y_train[:len(synthetic_data)], axis=1), (len(synthetic_data) // len(y_train) + 1))[:len(synthetic_data)] 125 | combined_y_train = np.hstack([np.argmax(y_train, axis=1), synthetic_labels]) 126 | 127 | # Train classifier on combined data 128 | clf_combined = RandomForestClassifier(random_state=42) 129 | clf_combined.fit(combined_X_train, combined_y_train) 130 | 131 | # Evaluate on test data 132 | y_combined_pred = clf_combined.predict(X_test) 133 | 134 | # Print classification report 135 | print("\nClassification Report (Combined Original and Synthetic Data):") 136 | print(classification_report(np.argmax(y_test, axis=1), y_combined_pred)) 137 | -------------------------------------------------------------------------------- /Harmony Search Regression.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | from sklearn.metrics import mean_squared_error 4 | from sklearn.preprocessing import PolynomialFeatures 5 | from sklearn.model_selection import train_test_split 6 | from sklearn.datasets import make_friedman1 7 | 8 | # Load a sample regression dataset (Friedman #1 dataset) 9 | X, y = make_friedman1(n_samples=200, n_features=5, noise=0.1, random_state=42) 10 | 11 | # Use only the first feature for simplicity (can adjust as needed) 12 | X = X[:, :1] 13 | 14 | # Split into training and testing sets 15 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) 16 | 17 | # Define the Harmony Search algorithm 18 | class HarmonySearch: 19 | def __init__(self, obj_func, bounds, hms=20, hmcr=0.9, par=0.3, max_iter=200): 20 | self.obj_func = obj_func 21 | self.bounds = bounds 22 | self.hms = hms 23 | self.hmcr = hmcr 24 | self.par = par 25 | self.max_iter = max_iter 26 | self.harmony_memory = [] 27 | 28 | def initialize(self): 29 | for _ in range(self.hms): 30 | harmony = [np.random.uniform(low, high) for low, high in self.bounds] 31 | self.harmony_memory.append(harmony) 32 | 33 | def improvise(self): 34 | new_harmony = [] 35 | for i, (low, high) in enumerate(self.bounds): 36 | if np.random.rand() < self.hmcr: 37 | new_value = np.random.choice([h[i] for h in self.harmony_memory]) 38 | if np.random.rand() < self.par: 39 | new_value += np.random.uniform(-1, 1) * (high - low) * 0.01 40 | else: 41 | new_value = np.random.uniform(low, high) 42 | new_harmony.append(np.clip(new_value, low, high)) 43 | return new_harmony 44 | 45 | def optimize(self): 46 | self.initialize() 47 | for _ in range(self.max_iter): 48 | new_harmony = self.improvise() 49 | new_score = self.obj_func(new_harmony) 50 | worst_idx = np.argmax([self.obj_func(h) for h in self.harmony_memory]) 51 | if new_score < self.obj_func(self.harmony_memory[worst_idx]): 52 | self.harmony_memory[worst_idx] = new_harmony 53 | best_idx = np.argmin([self.obj_func(h) for h in self.harmony_memory]) 54 | return self.harmony_memory[best_idx] 55 | 56 | # Objective function for regression (minimize MSE) 57 | def objective(params): 58 | degree = int(params[0]) 59 | coeffs = params[1:degree + 2] # Adjust number of coefficients to match degree + intercept 60 | poly = PolynomialFeatures(degree=degree) 61 | X_poly_train = poly.fit_transform(X_train) # Add intercept term 62 | y_pred = np.dot(X_poly_train, np.array(coeffs)) # Predict with coefficients 63 | return mean_squared_error(y_train, y_pred) 64 | 65 | # Set bounds for Harmony Search 66 | max_poly_degree = 5 # Maximum degree of the polynomial 67 | bounds = [(1, max_poly_degree)] + [(-10, 10) for _ in range(max_poly_degree + 1)] # +1 for intercept 68 | 69 | # Run Harmony Search 70 | hs = HarmonySearch(obj_func=objective, bounds=bounds, max_iter=200) 71 | best_params = hs.optimize() 72 | 73 | # Extract the best degree and coefficients 74 | best_degree = int(best_params[0]) 75 | best_coeffs = best_params[1:best_degree + 2] # Include intercept term 76 | 77 | # Use the best polynomial degree and coefficients for plotting 78 | poly = PolynomialFeatures(degree=best_degree) 79 | X_poly_train = poly.fit_transform(X_train) 80 | X_poly_test = poly.transform(X_test) 81 | y_train_pred = np.dot(X_poly_train, np.array(best_coeffs)) 82 | y_test_pred = np.dot(X_poly_test, np.array(best_coeffs)) 83 | 84 | # Calculate Mean Squared Error (MSE) 85 | mse_train = mean_squared_error(y_train, y_train_pred) 86 | mse_test = mean_squared_error(y_test, y_test_pred) 87 | print(f"MSE (Train): {mse_train:.4f}") 88 | print(f"MSE (Test): {mse_test:.4f}") 89 | print(f"Optimized Polynomial Degree: {best_degree}") 90 | 91 | # Plot the regression curve 92 | X_range = np.linspace(X.min(), X.max(), 500).reshape(-1, 1) 93 | X_range_poly = poly.transform(X_range) 94 | y_range_pred = np.dot(X_range_poly, np.array(best_coeffs)) 95 | 96 | plt.scatter(X, y, color='blue', label='Data Samples') 97 | plt.plot(X_range, y_range_pred, color='red', linewidth=2, label=f'Degree {best_degree} Fit') 98 | plt.title('Nonlinear Regression') 99 | plt.xlabel('X') 100 | plt.ylabel('y') 101 | plt.legend() 102 | plt.grid() 103 | plt.show() 104 | -------------------------------------------------------------------------------- /Optimization and Metahuristics.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SeyedMuhammadHosseinMousavi/Optimization-Algorithms-and-Problems/e39c3beeff81574071eec6bcc81a30ff694c8c5c/Optimization and Metahuristics.pdf -------------------------------------------------------------------------------- /Optimization and Metahuristics.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SeyedMuhammadHosseinMousavi/Optimization-Algorithms-and-Problems/e39c3beeff81574071eec6bcc81a30ff694c8c5c/Optimization and Metahuristics.pptx -------------------------------------------------------------------------------- /Particle Swarm Optimization Evolutionary Art.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | 4 | # Particle Swarm Optimization (PSO) Parameters 5 | population_size = 20 6 | num_iterations = 50 7 | inertia = 0.5 8 | c1 = 2.0 # Cognitive parameter 9 | c2 = 2.0 # Social parameter 10 | image_resolution = (300, 300) 11 | 12 | # Fitness Function: Enhanced to reward structured patterns 13 | 14 | def fitness_function(image): 15 | complexity = calculate_complexity(image) 16 | symmetry = calculate_symmetry(image) 17 | contrast = global_contrast_factor(image) 18 | color_diversity = calculate_color_diversity(image) 19 | return complexity + symmetry + contrast + color_diversity 20 | 21 | # Calculate Complexity using edge detection 22 | def calculate_complexity(image): 23 | gradient_x = np.abs(np.diff(image, axis=0)) 24 | gradient_y = np.abs(np.diff(image, axis=1)) 25 | complexity = np.sum(gradient_x) + np.sum(gradient_y) 26 | return complexity / image.size 27 | 28 | # Global Contrast Factor (GCF) 29 | def global_contrast_factor(image): 30 | luminance = np.mean(image, axis=2) # Average across RGB channels 31 | contrast = np.std(luminance) # Standard deviation as a simple contrast measure 32 | return contrast 33 | 34 | # Calculate Symmetry 35 | def calculate_symmetry(image): 36 | vertical_symmetry = np.sum(np.abs(image - np.flip(image, axis=1))) 37 | horizontal_symmetry = np.sum(np.abs(image - np.flip(image, axis=0))) 38 | total_symmetry = -(vertical_symmetry + horizontal_symmetry) / image.size # Negate to reward symmetry 39 | return total_symmetry 40 | 41 | # Calculate Color Diversity 42 | def calculate_color_diversity(image): 43 | unique_colors = len(np.unique(image.reshape(-1, image.shape[2]), axis=0)) 44 | return unique_colors / (image.shape[0] * image.shape[1]) 45 | 46 | # Initialize Population with fractal-like patterns and colorful variations 47 | def initialize_population(size, resolution): 48 | population = [] 49 | for _ in range(size): 50 | x = np.linspace(-2.0, 2.0, resolution[0]) 51 | y = np.linspace(-2.0, 2.0, resolution[1]) 52 | x, y = np.meshgrid(x, y) 53 | r = np.sqrt(x**2 + y**2) 54 | theta = np.arctan2(y, x) 55 | fractal_r = (np.sin(10 * r + 5 * theta) * 127 + 128).astype(np.uint8) 56 | fractal_g = (np.cos(10 * r - 5 * theta) * 127 + 128).astype(np.uint8) 57 | fractal_b = ((np.sin(10 * theta) + np.cos(10 * r)) * 127 + 128).astype(np.uint8) 58 | image = np.stack([fractal_r, fractal_g, fractal_b], axis=2) 59 | population.append(image) 60 | return population 61 | 62 | # PSO Update Function 63 | def update_particles(positions, velocities, personal_best_positions, global_best_position, inertia, c1, c2): 64 | for i in range(len(positions)): 65 | r1, r2 = np.random.random(), np.random.random() 66 | cognitive_component = c1 * r1 * (personal_best_positions[i] - positions[i]) 67 | social_component = c2 * r2 * (global_best_position - positions[i]) 68 | velocities[i] = inertia * velocities[i] + cognitive_component + social_component 69 | positions[i] = np.clip(positions[i] + velocities[i], 0, 255) # Keep positions within valid range 70 | 71 | # Main PSO Loop 72 | population = initialize_population(population_size, image_resolution) 73 | velocities = [np.random.uniform(-1, 1, (image_resolution[0], image_resolution[1], 3)) for _ in range(population_size)] 74 | personal_best_positions = population[:] 75 | personal_best_scores = [fitness_function(image) for image in population] 76 | global_best_position = personal_best_positions[np.argmax(personal_best_scores)] 77 | global_best_score = max(personal_best_scores) 78 | 79 | for iteration in range(num_iterations): 80 | print(f"Iteration {iteration + 1}: Evaluating fitness") 81 | 82 | # Evaluate Fitness 83 | fitness_scores = [fitness_function(image) for image in population] 84 | 85 | for i in range(population_size): 86 | if fitness_scores[i] > personal_best_scores[i]: 87 | personal_best_scores[i] = fitness_scores[i] 88 | personal_best_positions[i] = population[i] 89 | 90 | best_particle_index = np.argmax(personal_best_scores) 91 | if personal_best_scores[best_particle_index] > global_best_score: 92 | global_best_score = personal_best_scores[best_particle_index] 93 | global_best_position = personal_best_positions[best_particle_index] 94 | 95 | print(f" Best fitness: {global_best_score:.2f}") 96 | 97 | # Update Particles 98 | update_particles(population, velocities, personal_best_positions, global_best_position, inertia, c1, c2) 99 | 100 | # Display the best image 101 | plt.imshow(global_best_position.astype(np.uint8)) 102 | plt.title(f"Iteration {iteration + 1} - Best Fitness: {global_best_score:.2f}") 103 | plt.axis('off') 104 | plt.pause(0.5) 105 | 106 | print("Intelligent art generation with PSO complete!") 107 | -------------------------------------------------------------------------------- /Particle Swarm Optimization Evolved Antenna.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | 4 | # Define the objective function 5 | def objective_function(antenna_points): 6 | """ 7 | Objective Function for Antenna Design Optimization 8 | 9 | This function evaluates the quality of an antenna design by combining multiple 10 | factors such as: 11 | - Total length of the antenna (to ensure it remains compact and efficient). 12 | - Smoothness, penalizing excessive twists and encouraging smooth transitions. 13 | 14 | Parameters: 15 | antenna_points (numpy.ndarray): Array of 3D coordinates representing the antenna geometry. 16 | 17 | Returns: 18 | float: The computed cost for the given antenna design. 19 | """ 20 | # Compute total length of the antenna 21 | total_length = np.sum(np.sqrt(np.sum(np.diff(antenna_points, axis=0)**2, axis=1))) 22 | 23 | # Compute smoothness penalty (penalize excessive variation in angles between segments) 24 | smoothness_penalty = np.sum(np.abs(np.diff(antenna_points[:, 2]))) 25 | 26 | # Combine metrics into the cost function 27 | cost = total_length + 0.3 * smoothness_penalty 28 | return cost 29 | 30 | # Function to generate initial antenna with seven joints 31 | def generate_initial_antenna(): 32 | """ 33 | Generate an initial antenna geometry with seven joints. 34 | 35 | Returns: 36 | numpy.ndarray: Array of 3D coordinates representing the initial antenna geometry. 37 | """ 38 | joints = 7 39 | z = np.linspace(0, 10, joints + 1) # Antenna progresses upward 40 | x = np.random.uniform(-1, 1, joints + 1) 41 | y = np.random.uniform(-1, 1, joints + 1) 42 | return np.column_stack((x, y, z)) 43 | 44 | # PSO Parameters 45 | num_particles = 30 46 | num_iterations = 200 47 | joints = 7 # Number of joints 48 | dimensions = joints * 3 # 3D coordinates for each joint 49 | 50 | # Function to run PSO and return results 51 | def run_pso(): 52 | # Initialize particle positions and velocities 53 | particles = np.random.uniform(-1, 1, (num_particles, dimensions)) 54 | velocities = np.random.uniform(-0.1, 0.1, (num_particles, dimensions)) 55 | best_particle_positions = particles.copy() 56 | best_particle_costs = np.array([objective_function(p.reshape(-1, 3)) for p in particles]) 57 | global_best_position = particles[np.argmin(best_particle_costs)] 58 | global_best_cost = np.min(best_particle_costs) 59 | 60 | # PSO Hyperparameters 61 | w = 0.5 # Inertia weight 62 | c1 = 1.5 # Cognitive coefficient 63 | c2 = 1.5 # Social coefficient 64 | 65 | # Track cost over iterations 66 | cost_history = [] 67 | 68 | # PSO Main Loop 69 | for iteration in range(num_iterations): 70 | for i, particle in enumerate(particles): 71 | # Update velocity 72 | r1, r2 = np.random.random(2) 73 | velocities[i] = (w * velocities[i] + 74 | c1 * r1 * (best_particle_positions[i] - particle) + 75 | c2 * r2 * (global_best_position - particle)) 76 | 77 | # Update position 78 | particles[i] += velocities[i] 79 | 80 | # Constrain particles within bounds 81 | particles[i] = np.clip(particles[i], -1, 1) 82 | 83 | # Evaluate cost 84 | reshaped_particle = particles[i].reshape(-1, 3) 85 | cost = objective_function(reshaped_particle) 86 | 87 | # Update personal best 88 | if cost < best_particle_costs[i]: 89 | best_particle_costs[i] = cost 90 | best_particle_positions[i] = particles[i] 91 | 92 | # Update global best 93 | if cost < global_best_cost: 94 | global_best_cost = cost 95 | global_best_position = particles[i] 96 | 97 | cost_history.append(global_best_cost) 98 | 99 | return global_best_position.reshape(-1, 3), cost_history 100 | 101 | # Plot 4 antennas and their costs in a 2x4 layout 102 | fig, axes = plt.subplots(2, 4, figsize=(20, 10), subplot_kw={}) 103 | 104 | for i in range(4): 105 | best_antenna_points, cost_history = run_pso() 106 | 107 | # Plot antenna geometry (3D plot) 108 | ax = fig.add_subplot(2, 4, i + 1, projection='3d') 109 | ax.plot(best_antenna_points[:, 0], best_antenna_points[:, 1], best_antenna_points[:, 2], marker='o', linewidth=2) 110 | ax.set_title(f"Optimized Antenna {i + 1}", fontsize=14) 111 | ax.set_xlabel("X", fontsize=12) 112 | ax.set_ylabel("Y", fontsize=12) 113 | ax.set_zlabel("Z", fontsize=12) 114 | ax.scatter(best_antenna_points[0, 0], best_antenna_points[0, 1], best_antenna_points[0, 2], color='red', label='Start', s=100) 115 | ax.scatter(best_antenna_points[-1, 0], best_antenna_points[-1, 1], best_antenna_points[-1, 2], color='green', label='End', s=100) 116 | ax.legend(fontsize=10) 117 | 118 | # Plot cost history (2D plot) 119 | ax2 = fig.add_subplot(2, 4, i + 5) 120 | ax2.plot(range(1, num_iterations + 1), cost_history, marker='o', color='blue', linewidth=2) 121 | ax2.set_title(f"Cost Over Iterations {i + 1}", fontsize=14) 122 | ax2.set_xlabel("Iteration", fontsize=12) 123 | ax2.set_ylabel("Cost", fontsize=12) 124 | 125 | plt.tight_layout() 126 | plt.show() 127 | -------------------------------------------------------------------------------- /Problems/Ant Colony Optimization Bin Packing Problem.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import random 4 | 5 | # Ant Colony Optimization Parameters 6 | NUM_ANTS = 5 7 | MAX_ITERATIONS = 200 8 | NUM_ITEMS = 78 9 | BIN_CAPACITY = 256 10 | EVAPORATION_RATE = 0.5 11 | PHEROMONE_IMPORTANCE = 1 12 | HEURISTIC_IMPORTANCE = 2 13 | INITIAL_PHEROMONE = 1.0 14 | 15 | # Generate a random bin-packing problem 16 | def generate_bin_packing_problem(num_items, bin_capacity): 17 | weights = np.random.randint(1, bin_capacity // 2, size=num_items) 18 | return weights, bin_capacity 19 | 20 | # Calculate the cost of a solution (total bins used) 21 | def calculate_cost(solution, weights, bin_capacity): 22 | max_bin_index = max(solution) + 1 # Ensure bins are initialized for all indices 23 | bins = [0] * max_bin_index # Initialize bins dynamically 24 | 25 | for item_index, bin_index in enumerate(solution): 26 | bins[bin_index] += weights[item_index] 27 | if bins[bin_index] > bin_capacity: 28 | return float('inf') # Penalize invalid solutions exceeding bin capacity 29 | 30 | return len([b for b in bins if b > 0]) # Return total number of bins used 31 | 32 | # Generate a random initial solution 33 | def generate_initial_solution(weights, bin_capacity): 34 | solution = np.zeros_like(weights, dtype=int) 35 | bins = [0] 36 | for i, weight in enumerate(weights): 37 | assigned = False 38 | for bin_index in range(len(bins)): 39 | if bins[bin_index] + weight <= bin_capacity: 40 | bins[bin_index] += weight 41 | solution[i] = bin_index 42 | assigned = True 43 | break 44 | if not assigned: 45 | bins.append(weight) 46 | solution[i] = len(bins) - 1 47 | return solution 48 | 49 | # Ant Colony Optimization for Bin Packing Problem 50 | def aco_bin_packing(weights, bin_capacity): 51 | num_items = len(weights) 52 | pheromone = np.full((num_items, num_items), INITIAL_PHEROMONE) 53 | best_solution = None 54 | best_cost = float('inf') 55 | iteration_costs = [] 56 | 57 | for iteration in range(MAX_ITERATIONS): 58 | solutions = [] 59 | costs = [] 60 | 61 | for ant in range(NUM_ANTS): 62 | solution = [] 63 | bins = [0] 64 | 65 | for i in range(num_items): 66 | probabilities = [] 67 | for bin_index in range(len(bins) + 1): 68 | if bin_index == len(bins): 69 | # New bin 70 | if weights[i] <= bin_capacity: 71 | probabilities.append((pheromone[i][bin_index - 1] ** PHEROMONE_IMPORTANCE) * 72 | ((1.0 / (1 + weights[i])) ** HEURISTIC_IMPORTANCE)) 73 | else: 74 | probabilities.append(0) 75 | else: 76 | if bins[bin_index] + weights[i] <= bin_capacity: 77 | probabilities.append((pheromone[i][bin_index] ** PHEROMONE_IMPORTANCE) * 78 | ((1.0 / (1 + bins[bin_index] + weights[i])) ** HEURISTIC_IMPORTANCE)) 79 | else: 80 | probabilities.append(0) 81 | 82 | probabilities = np.array(probabilities) / sum(probabilities) 83 | chosen_bin = np.random.choice(range(len(probabilities)), p=probabilities) 84 | 85 | if chosen_bin == len(bins): 86 | bins.append(weights[i]) 87 | else: 88 | bins[chosen_bin] += weights[i] 89 | 90 | solution.append(chosen_bin) 91 | 92 | cost = calculate_cost(solution, weights, bin_capacity) 93 | solutions.append(solution) 94 | costs.append(cost) 95 | 96 | if cost < best_cost: 97 | best_solution = solution 98 | best_cost = cost 99 | 100 | # Update pheromones 101 | pheromone *= (1 - EVAPORATION_RATE) 102 | for solution, cost in zip(solutions, costs): 103 | for i, bin_index in enumerate(solution): 104 | pheromone[i][bin_index] += 1.0 / cost 105 | 106 | iteration_costs.append(best_cost) 107 | print(f"Iteration {iteration + 1}, Best Cost: {best_cost}") 108 | 109 | return best_solution, best_cost, iteration_costs 110 | 111 | def main(): 112 | weights, bin_capacity = generate_bin_packing_problem(NUM_ITEMS, BIN_CAPACITY) 113 | print("Weights:", weights) 114 | print("Bin Capacity:", bin_capacity) 115 | 116 | best_solution, best_cost, iteration_costs = aco_bin_packing(weights, bin_capacity) 117 | 118 | print("\nBest Solution:", best_solution) 119 | print("Number of Bins Used:", best_cost) 120 | 121 | # Plot optimization progress 122 | plt.figure(figsize=(10, 6)) 123 | plt.plot(iteration_costs, marker='o') 124 | plt.title("ACO Optimization Progress for Bin Packing") 125 | plt.xlabel("Iteration") 126 | plt.ylabel("Best Cost (Number of Bins)") 127 | plt.grid() 128 | plt.show() 129 | 130 | if __name__ == "__main__": 131 | main() -------------------------------------------------------------------------------- /Problems/Bat Algorithm Feature Selection.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import random 4 | from sklearn.model_selection import train_test_split 5 | from xgboost import XGBClassifier 6 | from sklearn.metrics import accuracy_score 7 | 8 | # Bat Algorithm Parameters 9 | POPULATION_SIZE = 3 10 | MAX_GENERATIONS = 30 11 | LOUDNESS = 0.5 12 | PULSE_RATE = 0.5 13 | FREQ_MIN = 0 14 | FREQ_MAX = 2 15 | NUM_SAMPLES = 500 16 | NUM_FEATURES = 10 17 | NUM_CLASSES = 3 18 | NUM_SELECTED_FEATURES = 5 19 | 20 | # Generate random dataset 21 | def generate_random_data(num_samples, num_features, num_classes): 22 | X = np.random.rand(num_samples, num_features) 23 | y = np.random.randint(0, num_classes, num_samples) 24 | return X, y 25 | 26 | # Cost function 27 | def cost_function(X, y, selected_features): 28 | if np.sum(selected_features) == 0: 29 | return float('inf') 30 | 31 | X_selected = X[:, selected_features == 1] 32 | X_train, X_test, y_train, y_test = train_test_split(X_selected, y, test_size=0.3, random_state=42) 33 | 34 | model = XGBClassifier(use_label_encoder=False, eval_metric='mlogloss') 35 | model.fit(X_train, y_train) 36 | y_pred = model.predict(X_test) 37 | return 1 - accuracy_score(y_test, y_pred) # Minimize error 38 | 39 | # Initialize population 40 | def initialize_population(size, num_features): 41 | population = np.zeros((size, num_features)) 42 | for i in range(size): 43 | selected_indices = np.random.choice(num_features, NUM_SELECTED_FEATURES, replace=False) 44 | population[i, selected_indices] = 1 45 | return population 46 | 47 | # Update position using Bat Algorithm 48 | def update_position(individual, velocity, frequency, best_individual): 49 | velocity += (individual - best_individual) * frequency 50 | new_position = individual + velocity 51 | return np.clip(np.round(new_position), 0, 1) 52 | 53 | # Bat Algorithm for Feature Selection 54 | def bat_algorithm(X, y): 55 | num_features = X.shape[1] 56 | population = initialize_population(POPULATION_SIZE, num_features) 57 | velocities = np.zeros((POPULATION_SIZE, num_features)) 58 | cost = np.array([cost_function(X, y, individual) for individual in population]) 59 | 60 | best_individual = population[np.argmin(cost)] 61 | best_cost = np.min(cost) 62 | costs_over_time = [] 63 | 64 | for generation in range(MAX_GENERATIONS): 65 | for i in range(POPULATION_SIZE): 66 | frequency = FREQ_MIN + (FREQ_MAX - FREQ_MIN) * random.random() 67 | velocities[i] = update_position(population[i], velocities[i], frequency, best_individual) 68 | candidate = update_position(population[i], velocities[i], frequency, best_individual) 69 | 70 | if random.random() > PULSE_RATE: 71 | candidate = best_individual.copy() 72 | mutation_index = random.randint(0, num_features - 1) 73 | candidate[mutation_index] = 1 - candidate[mutation_index] 74 | 75 | candidate_cost = cost_function(X, y, candidate) 76 | if candidate_cost < cost[i] and random.random() < LOUDNESS: 77 | population[i] = candidate 78 | cost[i] = candidate_cost 79 | 80 | if candidate_cost < best_cost: 81 | best_individual = candidate 82 | best_cost = candidate_cost 83 | 84 | costs_over_time.append(best_cost) 85 | print(f"Generation {generation + 1}, Best Cost: {best_cost:.4f}") 86 | 87 | return best_individual, 1 - best_cost, costs_over_time 88 | 89 | def main(): 90 | X, y = generate_random_data(NUM_SAMPLES, NUM_FEATURES, NUM_CLASSES) 91 | # Split data for original accuracy evaluation 92 | X_train_full, X_test_full, y_train_full, y_test_full = train_test_split(X, y, test_size=0.3, random_state=42) 93 | model_full = XGBClassifier(use_label_encoder=False, eval_metric='mlogloss') 94 | model_full.fit(X_train_full, y_train_full) 95 | y_pred_full = model_full.predict(X_test_full) 96 | original_accuracy = accuracy_score(y_test_full, y_pred_full) 97 | 98 | best_features, best_accuracy, costs_over_time = bat_algorithm(X, y) 99 | 100 | # Evaluate accuracy with selected features 101 | X_selected = X[:, best_features == 1] 102 | X_train_selected, X_test_selected, y_train_selected, y_test_selected = train_test_split(X_selected, y, test_size=0.3, random_state=42) 103 | model_selected = XGBClassifier(use_label_encoder=False, eval_metric='mlogloss') 104 | model_selected.fit(X_train_selected, y_train_selected) 105 | y_pred_selected = model_selected.predict(X_test_selected) 106 | selected_accuracy = accuracy_score(y_test_selected, y_pred_selected) 107 | 108 | print("\nOriginal Accuracy with All Features:", original_accuracy) 109 | print("Selected Features (1=selected, 0=not selected):", best_features) 110 | print("Accuracy with Selected Features:", selected_accuracy) 111 | 112 | # Count selected features 113 | print("Number of Selected Features:", np.sum(best_features)) 114 | 115 | # Plot optimization progress 116 | plt.figure(figsize=(10, 6)) 117 | plt.plot(costs_over_time, marker='o') 118 | plt.title("Bat Algorithm Optimization Progress") 119 | plt.xlabel("Generation") 120 | plt.ylabel("Best Cost") 121 | plt.grid() 122 | plt.show() 123 | 124 | if __name__ == "__main__": 125 | main() -------------------------------------------------------------------------------- /Problems/Bees CNN Optimized (weights and biases).py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from sklearn.datasets import load_iris 3 | from sklearn.model_selection import train_test_split 4 | from sklearn.preprocessing import OneHotEncoder, StandardScaler 5 | from sklearn.metrics import classification_report 6 | import tensorflow as tf 7 | 8 | # Load Iris dataset 9 | data = load_iris() 10 | X = data.data 11 | y = data.target 12 | 13 | # Preprocess the dataset 14 | scaler = StandardScaler() 15 | X = scaler.fit_transform(X) 16 | 17 | encoder = OneHotEncoder(sparse_output=False) 18 | y = encoder.fit_transform(y.reshape(-1, 1)) 19 | 20 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) 21 | 22 | # Define the Bees Algorithm 23 | class BeesAlgorithm: 24 | def __init__(self, model, X_train, y_train, n_bees=20, elite_bees=5, patch_size=0.1, iterations=50): 25 | self.model = model 26 | self.X_train = X_train 27 | self.y_train = y_train 28 | self.n_bees = n_bees 29 | self.elite_bees = elite_bees 30 | self.patch_size = patch_size 31 | self.iterations = iterations 32 | 33 | # Initialize bees (weights and biases) 34 | self.bees = [self.generate_solution() for _ in range(self.n_bees)] 35 | 36 | def generate_solution(self): 37 | # Flatten weights and biases into a single vector 38 | weights_and_biases = [] 39 | for layer in self.model.trainable_variables: 40 | weights_and_biases.append(layer.numpy().flatten()) 41 | return np.concatenate(weights_and_biases) 42 | 43 | def decode_solution(self, solution): 44 | # Decode the flat vector into weights and biases for the model 45 | shapes = [layer.shape for layer in self.model.trainable_variables] 46 | split_points = np.cumsum([np.prod(shape) for shape in shapes]) 47 | decoded = np.split(solution, split_points[:-1]) 48 | decoded = [np.reshape(arr, shape) for arr, shape in zip(decoded, shapes)] 49 | return decoded 50 | 51 | def set_weights_and_biases(self, solution): 52 | # Set the model's weights and biases 53 | decoded = self.decode_solution(solution) 54 | for layer, new_weights in zip(self.model.trainable_variables, decoded): 55 | layer.assign(new_weights) 56 | 57 | def fitness(self, solution): 58 | # Evaluate the model's accuracy on the training data 59 | self.set_weights_and_biases(solution) 60 | y_pred = self.model(self.X_train) 61 | accuracy = tf.reduce_mean( 62 | tf.cast(tf.equal(tf.argmax(y_pred, axis=1), tf.argmax(self.y_train, axis=1)), tf.float32) 63 | ).numpy() 64 | return accuracy 65 | 66 | def optimize(self): 67 | for iteration in range(self.iterations): 68 | # Evaluate fitness for all bees 69 | fitness_scores = [self.fitness(bee) for bee in self.bees] 70 | 71 | # Sort bees by fitness 72 | sorted_indices = np.argsort(fitness_scores)[::-1] 73 | self.bees = [self.bees[i] for i in sorted_indices] 74 | 75 | # Keep elite bees 76 | elite_bees = self.bees[:self.elite_bees] 77 | 78 | # Scout new bees around elite bees 79 | for i in range(self.elite_bees, self.n_bees): 80 | elite_index = i % self.elite_bees 81 | new_bee = elite_bees[elite_index] + np.random.uniform( 82 | -self.patch_size, self.patch_size, size=elite_bees[elite_index].shape 83 | ) 84 | self.bees[i] = new_bee 85 | 86 | # Reduce patch size over iterations 87 | self.patch_size *= 0.95 88 | 89 | # Print progress 90 | best_fitness = fitness_scores[sorted_indices[0]] 91 | print(f"Iteration {iteration + 1}/{self.iterations}, Best Fitness: {best_fitness:.4f}") 92 | 93 | # Return the best solution 94 | best_solution = self.bees[0] 95 | return best_solution 96 | 97 | 98 | # Define the neural network (simple feedforward model) 99 | def build_model(input_dim, output_dim): 100 | model = tf.keras.Sequential([ 101 | tf.keras.layers.Dense(10, activation='relu', input_dim=input_dim), 102 | tf.keras.layers.Dense(output_dim, activation='softmax') 103 | ]) 104 | return model 105 | 106 | 107 | # Build and compile the model 108 | input_dim = X_train.shape[1] 109 | output_dim = y_train.shape[1] 110 | model = build_model(input_dim, output_dim) 111 | 112 | # Initialize Bees Algorithm 113 | bees_algorithm = BeesAlgorithm(model, X_train, y_train, n_bees=30, elite_bees=5, patch_size=0.1, iterations=50) 114 | 115 | # Optimize weights and biases 116 | best_solution = bees_algorithm.optimize() 117 | 118 | # Set the optimized weights and biases to the model 119 | bees_algorithm.set_weights_and_biases(best_solution) 120 | 121 | # Evaluate the optimized model on the test set 122 | y_test_pred = model(X_test) 123 | y_test_pred_classes = tf.argmax(y_test_pred, axis=1).numpy() 124 | y_test_true_classes = tf.argmax(y_test, axis=1).numpy() 125 | 126 | # Print classification report 127 | print("\nClassification Report (Test Data):") 128 | print(classification_report(y_test_true_classes, y_test_pred_classes)) 129 | -------------------------------------------------------------------------------- /Problems/Bees Economic Dispatching.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | 4 | # Define the system model 5 | def make_model(): 6 | return { 7 | "PD": 1500, # Power demand 8 | "Plants": { 9 | "Pmin": np.array([100, 80, 50, 60, 40]), 10 | "Pmax": np.array([500, 400, 300, 250, 200]), 11 | "alpha": np.array([300, 280, 260, 240, 220]), 12 | "beta": np.array([8, 7.5, 7, 6.5, 6]), 13 | "gamma": np.array([0.03, 0.028, 0.027, 0.026, 0.025]), 14 | }, 15 | "nPlant": 5, # Number of plants 16 | } 17 | 18 | # Parse function to map x to actual power values 19 | def parse(x, model): 20 | Pmin = model["Plants"]["Pmin"] 21 | Pmax = model["Plants"]["Pmax"] 22 | P = Pmin + (Pmax - Pmin) * x 23 | return P 24 | 25 | # Define the cost function 26 | def cost_function(x, model): 27 | P = parse(x, model) 28 | alpha = model["Plants"]["alpha"] 29 | beta = model["Plants"]["beta"] 30 | gamma = model["Plants"]["gamma"] 31 | 32 | # Calculate cost 33 | cost = np.sum(alpha + beta * P + gamma * P ** 2) 34 | 35 | # Power balance constraint 36 | P_total = np.sum(P) 37 | PD = model["PD"] 38 | power_loss = 0.05 * P_total # Simplified power loss model 39 | power_balance_violation = max(0, PD - (P_total - power_loss)) 40 | 41 | penalty = 10 # Penalty for constraint violation 42 | z = cost + penalty * power_balance_violation 43 | 44 | return z, { 45 | "P": P, 46 | "Cost": cost, 47 | "PowerLoss": power_loss, 48 | "PowerBalanceViolation": power_balance_violation, 49 | } 50 | 51 | # Define fuzzy logic adjustment 52 | def fuzzy_adjustment(iteration, max_iter, violation): 53 | if violation > 0.1: 54 | penalty = 20 # Increase penalty for high violations 55 | else: 56 | penalty = 10 57 | 58 | if iteration / max_iter < 0.5: 59 | r = 0.2 # Larger neighborhood radius in early iterations 60 | else: 61 | r = 0.1 # Smaller radius for fine-tuning 62 | 63 | return penalty, r 64 | 65 | # Bee dance function 66 | def bee_dance(position, r): 67 | nVar = len(position) 68 | k = np.random.randint(0, nVar) 69 | new_position = position.copy() 70 | new_position[k] += np.random.uniform(-r, r) 71 | new_position = np.clip(new_position, 0, 1) # Ensure within bounds 72 | return new_position 73 | 74 | # Bees Algorithm implementation 75 | def bees_algorithm(model): 76 | # Parameters 77 | max_iter = 20 78 | n_scout_bees = 7 79 | n_elite_sites = 3 80 | n_selected_sites = 4 81 | n_elite_bees = 5 82 | n_selected_bees = 3 83 | rdamp = 0.7 84 | 85 | # Initialize scout bees 86 | bees = [{"position": np.random.uniform(0, 1, model["nPlant"]), "cost": None} for _ in range(n_scout_bees)] 87 | for bee in bees: 88 | bee["cost"], bee["details"] = cost_function(bee["position"], model) 89 | 90 | # Sort by cost 91 | bees = sorted(bees, key=lambda b: b["cost"]) 92 | best_costs = [] 93 | 94 | # Main loop 95 | for iteration in range(max_iter): 96 | print(f"Iteration {iteration + 1}/{max_iter}") 97 | 98 | # Adjust fuzzy parameters 99 | penalty, r = fuzzy_adjustment(iteration, max_iter, bees[0]["details"]["PowerBalanceViolation"]) 100 | 101 | # Elite sites 102 | for i in range(n_elite_sites): 103 | for _ in range(n_elite_bees): 104 | new_position = bee_dance(bees[i]["position"], r) 105 | new_cost, new_details = cost_function(new_position, model) 106 | if new_cost < bees[i]["cost"]: 107 | bees[i] = {"position": new_position, "cost": new_cost, "details": new_details} 108 | 109 | # Selected non-elite sites 110 | for i in range(n_elite_sites, n_selected_sites): 111 | for _ in range(n_selected_bees): 112 | new_position = bee_dance(bees[i]["position"], r) 113 | new_cost, new_details = cost_function(new_position, model) 114 | if new_cost < bees[i]["cost"]: 115 | bees[i] = {"position": new_position, "cost": new_cost, "details": new_details} 116 | 117 | # Non-selected sites 118 | for i in range(n_selected_sites, n_scout_bees): 119 | new_position = np.random.uniform(0, 1, model["nPlant"]) 120 | new_cost, new_details = cost_function(new_position, model) 121 | bees[i] = {"position": new_position, "cost": new_cost, "details": new_details} 122 | 123 | # Sort by cost 124 | bees = sorted(bees, key=lambda b: b["cost"]) 125 | 126 | # Store the best cost 127 | best_costs.append(bees[0]["cost"]) 128 | print(f"Best cost at iteration {iteration + 1}: {bees[0]['cost']:.2f}") 129 | 130 | # Final results 131 | best_solution = bees[0] 132 | print("\nBest Solution:") 133 | print(f"Cost: {best_solution['cost']:.2f}") 134 | print(f"Power Distribution: {best_solution['details']['P']}") 135 | 136 | # Plot the results 137 | plt.plot(best_costs, marker="o") 138 | plt.xlabel("Iteration") 139 | plt.ylabel("Best Cost") 140 | plt.title("Convergence of Bees Algorithm with Fuzzy Logic") 141 | plt.grid() 142 | plt.show() 143 | 144 | # Run the algorithm 145 | if __name__ == "__main__": 146 | model = make_model() 147 | bees_algorithm(model) 148 | -------------------------------------------------------------------------------- /Problems/Biogeography-Based Optimization Minimum Spanning Tree.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import networkx as nx 3 | import matplotlib.pyplot as plt 4 | import random 5 | 6 | # BBO Algorithm Parameters 7 | POPULATION_SIZE = 20 8 | MUTATION_RATE = 0.2 9 | MAX_GENERATIONS = 500 10 | NUM_NODES = 8 11 | MAX_EDGE_WEIGHT = 20 12 | 13 | # Create a random graph 14 | def create_random_graph(num_nodes, max_edge_weight): 15 | graph = nx.complete_graph(num_nodes) 16 | for (u, v) in graph.edges(): 17 | graph.edges[u, v]['weight'] = random.randint(1, max_edge_weight) 18 | return graph 19 | 20 | # Fitness function for MST (lower cost is better) 21 | def fitness_function(graph, individual): 22 | mst_cost = sum(graph.edges[edge]['weight'] for edge in individual) 23 | return 1 / mst_cost # Higher fitness for lower cost 24 | 25 | # Generate initial population 26 | def generate_population(graph, size): 27 | population = [] 28 | for _ in range(size): 29 | edges = list(graph.edges) 30 | random.shuffle(edges) 31 | population.append(edges[:NUM_NODES - 1]) # Ensure a spanning tree 32 | return population 33 | 34 | # Selection function (roulette wheel) 35 | def select(population, fitnesses): 36 | total_fitness = sum(fitnesses) 37 | probabilities = [f / total_fitness for f in fitnesses] 38 | selected_index = np.random.choice(len(population), p=probabilities) 39 | return population[selected_index] 40 | 41 | # Mutation function 42 | def mutate(individual, graph): 43 | if random.random() < MUTATION_RATE: 44 | new_edge = random.choice(list(graph.edges)) 45 | individual[random.randint(0, len(individual) - 1)] = new_edge 46 | return individual 47 | 48 | # Plot final MST 49 | def plot_final_mst(graph, edges): 50 | mst_graph = nx.Graph() 51 | mst_graph.add_edges_from(edges) 52 | pos = nx.spring_layout(graph) 53 | 54 | plt.figure(figsize=(8, 6)) 55 | nx.draw(graph, pos, with_labels=True, node_color='lightblue', edge_color='gray', node_size=500, font_size=10) 56 | nx.draw(mst_graph, pos, with_labels=True, edge_color='red', width=2) 57 | plt.title("Final MST") 58 | plt.show() 59 | 60 | # BBO Main Function 61 | def bbo_mst(graph): 62 | population = generate_population(graph, POPULATION_SIZE) 63 | best_solution = None 64 | best_cost = float('inf') 65 | iteration_costs = [] 66 | 67 | for generation in range(MAX_GENERATIONS): 68 | fitnesses = [fitness_function(graph, individual) for individual in population] 69 | best_index = np.argmax(fitnesses) 70 | current_best_cost = 1 / fitnesses[best_index] 71 | 72 | if current_best_cost < best_cost: 73 | best_solution = population[best_index] 74 | best_cost = current_best_cost 75 | 76 | iteration_costs.append(best_cost) 77 | 78 | new_population = [] 79 | for _ in range(POPULATION_SIZE): 80 | parent = select(population, fitnesses) 81 | offspring = mutate(parent.copy(), graph) 82 | new_population.append(offspring) 83 | 84 | population = new_population 85 | 86 | print(f"Generation {generation + 1}, Best MST Cost: {best_cost}") 87 | 88 | return best_solution, best_cost, iteration_costs 89 | 90 | # Main Execution 91 | if __name__ == "__main__": 92 | random_graph = create_random_graph(NUM_NODES, MAX_EDGE_WEIGHT) 93 | best_mst, best_mst_cost, costs_over_time = bbo_mst(random_graph) 94 | 95 | print("\nFinal Best MST Cost:", best_mst_cost) 96 | print("Best MST Edges:", best_mst) 97 | 98 | # Plot the final MST 99 | plot_final_mst(random_graph, best_mst) 100 | 101 | # Plot iterations over time 102 | plt.figure(figsize=(10, 6)) 103 | plt.plot(costs_over_time, marker='o') 104 | plt.title("BBO Optimization of MST") 105 | plt.xlabel("Generation") 106 | plt.ylabel("Best MST Cost") 107 | plt.grid() 108 | plt.show() 109 | -------------------------------------------------------------------------------- /Problems/Brain Storm Optimization Parallel Machine Scheduling.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import random 4 | 5 | # Function to generate a random parallel machine scheduling problem 6 | def generate_parallel_machine_problem(num_tasks, num_machines): 7 | tasks = np.arange(1, num_tasks + 1) # Task IDs 8 | processing_times = np.random.randint(10, 100, size=num_tasks) # Random processing times 9 | return tasks, processing_times, num_machines 10 | 11 | # Objective function: calculate makespan (Cmax) of a given schedule 12 | def calculate_makespan(schedule, processing_times, num_machines): 13 | machine_times = np.zeros(num_machines) 14 | for machine, task in enumerate(schedule): 15 | machine_times[machine % num_machines] += processing_times[task - 1] 16 | return max(machine_times) 17 | 18 | # Brain Storm Optimization (BSO) for Parallel Machine Scheduling 19 | def brain_storm_optimization(tasks, processing_times, num_machines, iterations=200, population_size=100): 20 | num_tasks = len(tasks) 21 | best_schedule = None 22 | best_makespan = float('inf') 23 | 24 | # Initial population of random schedules 25 | population = [np.random.permutation(tasks) for _ in range(population_size)] 26 | makespans = [calculate_makespan(schedule, processing_times, num_machines) for schedule in population] 27 | 28 | best_schedule = population[np.argmin(makespans)] 29 | best_makespan = min(makespans) 30 | 31 | makespan_progress = [best_makespan] 32 | 33 | for iteration in range(iterations): 34 | # Generate new solutions by mutation and combination 35 | new_population = [] 36 | for i in range(population_size): 37 | if random.random() < 0.5: # Mutation 38 | new_schedule = population[i].copy() 39 | idx1, idx2 = np.random.choice(num_tasks, 2, replace=False) 40 | new_schedule[idx1], new_schedule[idx2] = new_schedule[idx2], new_schedule[idx1] 41 | else: # Combination 42 | parent1, parent2 = random.sample(population, 2) 43 | split_point = np.random.randint(1, num_tasks - 1) 44 | new_schedule = np.concatenate((parent1[:split_point], parent2[split_point:])) 45 | new_schedule, _ = np.unique(new_schedule, return_index=True) 46 | new_schedule = np.append(new_schedule, np.setdiff1d(tasks, new_schedule)) 47 | 48 | new_population.append(new_schedule) 49 | 50 | # Evaluate new population 51 | new_makespans = [calculate_makespan(schedule, processing_times, num_machines) for schedule in new_population] 52 | 53 | # Update best solution 54 | min_new_makespan = min(new_makespans) 55 | if min_new_makespan < best_makespan: 56 | best_makespan = min_new_makespan 57 | best_schedule = new_population[np.argmin(new_makespans)] 58 | 59 | # Replace old population with new one 60 | population = new_population 61 | makespan_progress.append(best_makespan) 62 | 63 | return best_schedule, best_makespan, makespan_progress 64 | 65 | # Main execution for a single run 66 | num_tasks = 14 67 | num_machines = 4 68 | tasks, processing_times, num_machines = generate_parallel_machine_problem(num_tasks, num_machines) 69 | 70 | best_schedule, best_makespan, makespan_progress = brain_storm_optimization( 71 | tasks, processing_times, num_machines 72 | ) 73 | 74 | # Plotting iteration progress 75 | plt.figure(figsize=(10, 6)) 76 | plt.plot(makespan_progress, marker="o", linestyle="--") 77 | plt.title("Makespan over Iterations - Brain Storm Optimization") 78 | plt.xlabel("Iteration") 79 | plt.ylabel("Makespan (Cmax)") 80 | plt.grid() 81 | plt.show() 82 | 83 | # Plotting the solution similar to the provided image 84 | machine_assignments = [[] for _ in range(num_machines)] 85 | machine_times = np.zeros(num_machines) 86 | for task in best_schedule: 87 | machine = np.argmin(machine_times) 88 | machine_assignments[machine].append(task) 89 | machine_times[machine] += processing_times[task - 1] 90 | 91 | plt.figure(figsize=(12, 8)) 92 | for i, machine in enumerate(machine_assignments, 1): 93 | start = 0 94 | for task in machine: 95 | plt.barh(i, processing_times[task - 1], left=start, color="lime", edgecolor="black") 96 | plt.text(start + processing_times[task - 1] / 2, i, str(task), va='center', ha='center', fontsize=10, color="black") 97 | start += processing_times[task - 1] 98 | plt.axvline(best_makespan, color="yellow", linestyle="--", linewidth=2, label=f"Cmax = {best_makespan}") 99 | plt.title("Parallel Machine Scheduling") 100 | plt.xlabel("Tasks") 101 | plt.ylabel("Machines") 102 | plt.yticks(range(1, num_machines + 1)) 103 | plt.legend() 104 | plt.grid(axis="x") 105 | plt.show() 106 | 107 | # Display results 108 | import pandas as pd 109 | results = pd.DataFrame({ 110 | "Best Schedule": [best_schedule], 111 | "Best Makespan": [best_makespan], 112 | "Iterations": [len(makespan_progress)], 113 | }) 114 | 115 | # Display results in the console 116 | print("Parallel Machine Scheduling Results:") 117 | print(results) -------------------------------------------------------------------------------- /Problems/Cuckoo Search Traveling Salesman Problem.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import random 4 | from scipy.spatial import distance 5 | 6 | # Cuckoo Search Algorithm Parameters 7 | POPULATION_SIZE = 25 8 | MAX_GENERATIONS = 100 9 | NUM_LOCATIONS = 10 10 | MAX_COORDINATE = 100 11 | LEVIY_FLIGHT_STRENGTH = 1.5 12 | DISCOVERY_RATE = 0.25 13 | 14 | def create_random_locations(num_locations, max_coordinate): 15 | return np.random.randint(0, max_coordinate, size=(num_locations, 2)) 16 | 17 | def calculate_tsp_cost(locations, path): 18 | cost = 0 19 | for i in range(len(path)): 20 | cost += distance.euclidean(locations[path[i]], locations[path[(i + 1) % len(path)]]) 21 | return cost 22 | 23 | def levy_flight(Lambda): 24 | u = np.random.normal(0, 1) * (1 / abs(np.random.normal(0, 1))) ** (1 / Lambda) 25 | v = np.random.normal(0, 1) 26 | return u / abs(v) ** (1 / Lambda) 27 | 28 | def generate_initial_population(size, num_locations): 29 | population = [] 30 | for _ in range(size): 31 | individual = list(range(num_locations)) 32 | random.shuffle(individual) 33 | population.append(individual) 34 | return population 35 | 36 | def replace_worst_nests(population, fitness, discovery_rate): 37 | num_replace = int(len(population) * discovery_rate) 38 | worst_indices = np.argsort(fitness)[-num_replace:] 39 | for i in worst_indices: 40 | individual = list(range(len(population[0]))) 41 | random.shuffle(individual) 42 | population[i] = individual 43 | 44 | def cuckoo_search(locations): 45 | population = generate_initial_population(POPULATION_SIZE, len(locations)) 46 | best_solution = None 47 | best_cost = float('inf') 48 | costs_over_time = [] 49 | 50 | for generation in range(MAX_GENERATIONS): 51 | fitness = [calculate_tsp_cost(locations, individual) for individual in population] 52 | min_cost_index = np.argmin(fitness) 53 | current_best_cost = fitness[min_cost_index] 54 | 55 | if current_best_cost < best_cost: 56 | best_cost = current_best_cost 57 | best_solution = population[min_cost_index] 58 | 59 | costs_over_time.append(best_cost) 60 | 61 | for i in range(POPULATION_SIZE): 62 | cuckoo = population[i][:] 63 | index1, index2 = random.sample(range(len(cuckoo)), 2) 64 | cuckoo[index1], cuckoo[index2] = cuckoo[index2], cuckoo[index1] 65 | cuckoo_cost = calculate_tsp_cost(locations, cuckoo) 66 | 67 | if cuckoo_cost < fitness[i]: 68 | population[i] = cuckoo 69 | 70 | replace_worst_nests(population, fitness, DISCOVERY_RATE) 71 | 72 | print(f"Generation {generation + 1}, Best TSP Cost: {best_cost}") 73 | 74 | return best_solution, best_cost, costs_over_time 75 | 76 | def plot_tsp_solution(locations, solution, title): 77 | plt.figure(figsize=(8, 6)) 78 | x = [locations[city][0] for city in solution + [solution[0]]] 79 | y = [locations[city][1] for city in solution + [solution[0]]] 80 | plt.plot(x, y, marker="o", linestyle="-", color="blue", label="Path") 81 | plt.scatter(locations[:, 0], locations[:, 1], color="red", s=100, label="Cities") 82 | plt.title(title) 83 | plt.xlabel("X Coordinate") 84 | plt.ylabel("Y Coordinate") 85 | plt.legend() 86 | plt.grid() 87 | plt.show() 88 | 89 | def main(): 90 | locations = create_random_locations(NUM_LOCATIONS, MAX_COORDINATE) 91 | best_solution, best_cost, costs_over_time = cuckoo_search(locations) 92 | 93 | print("\nFinal Best TSP Cost:", best_cost) 94 | print("Best TSP Path:", best_solution) 95 | 96 | plot_tsp_solution(locations, best_solution, "Final TSP Solution") 97 | 98 | # Plot optimization progress 99 | plt.figure(figsize=(10, 6)) 100 | plt.plot(costs_over_time, marker='o') 101 | plt.title("Cuckoo Search Optimization of TSP") 102 | plt.xlabel("Generation") 103 | plt.ylabel("Best Cost") 104 | plt.grid() 105 | plt.show() 106 | 107 | if __name__ == "__main__": 108 | main() -------------------------------------------------------------------------------- /Problems/Differential Evolution Clustering.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | from sklearn.datasets import load_iris 4 | from sklearn.preprocessing import StandardScaler 5 | from sklearn.metrics import pairwise_distances_argmin 6 | import matplotlib.pyplot as plt 7 | 8 | # Step 1: Load and Prepare the Iris Dataset 9 | def load_and_preprocess_data(): 10 | iris = load_iris() 11 | X = iris.data # Features 12 | y = iris.target # Labels (not used in clustering) 13 | scaler = StandardScaler() 14 | X_scaled = scaler.fit_transform(X) 15 | return X_scaled, y 16 | 17 | # Step 2: Define Differential Evolution (DE) 18 | class DE: 19 | def __init__(self, n_clusters, n_population, n_iterations, X): 20 | self.n_clusters = n_clusters 21 | self.n_population = n_population 22 | self.n_iterations = n_iterations 23 | self.X = X 24 | self.n_samples, self.n_features = X.shape 25 | 26 | # Initialize population (random cluster centers) 27 | self.population = np.random.rand(n_population, n_clusters, self.n_features) 28 | self.global_best_position = None 29 | self.global_best_score = np.inf 30 | self.cost_history = [] 31 | 32 | def fitness(self, cluster_centers): 33 | # Assign points to nearest cluster center 34 | labels = pairwise_distances_argmin(self.X, cluster_centers) 35 | # Compute intra-cluster distance (sum of squared distances) 36 | score = sum(np.sum((self.X[labels == i] - center) ** 2) 37 | for i, center in enumerate(cluster_centers)) 38 | return score 39 | 40 | def optimize(self): 41 | F = 0.8 # Scaling factor 42 | CR = 0.9 # Crossover probability 43 | for iteration in range(self.n_iterations): 44 | new_population = np.copy(self.population) 45 | for i in range(self.n_population): 46 | # Mutation: Select three random individuals different from i 47 | indices = [idx for idx in range(self.n_population) if idx != i] 48 | a, b, c = self.population[np.random.choice(indices, 3, replace=False)] 49 | mutant_vector = a + F * (b - c) 50 | 51 | # Crossover: Combine mutant vector and target vector 52 | crossover_mask = np.random.rand(*mutant_vector.shape) < CR 53 | trial_vector = np.where(crossover_mask, mutant_vector, self.population[i]) 54 | 55 | # Selection: Evaluate and select the better individual 56 | trial_score = self.fitness(trial_vector) 57 | target_score = self.fitness(self.population[i]) 58 | if trial_score < target_score: 59 | new_population[i] = trial_vector 60 | if trial_score < self.global_best_score: 61 | self.global_best_score = trial_score 62 | self.global_best_position = trial_vector 63 | 64 | self.population = new_population 65 | self.cost_history.append(self.global_best_score) 66 | print(f"Iteration {iteration + 1}/{self.n_iterations}, Best Score: {self.global_best_score}") 67 | 68 | return self.global_best_position, self.cost_history 69 | 70 | # Step 3: Clustering with DE-generated Centers 71 | def clustering_with_de(X, n_clusters, n_population, n_iterations): 72 | de = DE(n_clusters, n_population, n_iterations, X) 73 | best_centers, cost_history = de.optimize() 74 | labels = pairwise_distances_argmin(X, best_centers) 75 | return labels, best_centers, cost_history 76 | 77 | # Step 4: Evaluate the Clustering 78 | def evaluate_clustering(X, labels, centers): 79 | quantization_error = sum(np.sum((X[labels == i] - center) ** 2) 80 | for i, center in enumerate(centers)) 81 | intra_cluster_distances = [np.sum((X[labels == i] - center) ** 2) 82 | for i, center in enumerate(centers)] 83 | inter_cluster_distances = np.min( 84 | [np.linalg.norm(center1 - center2) 85 | for i, center1 in enumerate(centers) 86 | for j, center2 in enumerate(centers) if i != j]) 87 | print(f"Quantization Error: {quantization_error:.4f}") 88 | print(f"Intra-cluster Distances: {intra_cluster_distances}") 89 | print(f"Inter-cluster Distance: {inter_cluster_distances:.4f}") 90 | return quantization_error, intra_cluster_distances, inter_cluster_distances 91 | 92 | # Step 5: Visualize the Clustering Result 93 | def visualize_results(X, labels, centers, cost_history): 94 | fig, axes = plt.subplots(1, 2, figsize=(12, 5)) 95 | 96 | # Clustering result 97 | axes[0].scatter(X[:, 0], X[:, 1], c=labels, cmap='viridis', marker='o', alpha=0.7) 98 | axes[0].scatter(centers[:, 0], centers[:, 1], c='red', marker='x', s=200, label='Centers') 99 | axes[0].set_title("Clustering Result with DE") 100 | axes[0].legend() 101 | 102 | # DE iteration cost 103 | axes[1].plot(range(1, len(cost_history) + 1), cost_history, marker='o') 104 | axes[1].set_title("DE Iteration Cost") 105 | axes[1].set_xlabel("Iteration") 106 | axes[1].set_ylabel("Cost (Fitness)") 107 | 108 | plt.tight_layout() 109 | plt.show() 110 | 111 | # Step 6: Main Function 112 | def main(): 113 | X, y = load_and_preprocess_data() 114 | n_clusters = 3 115 | n_population = 10 116 | n_iterations = 100 117 | 118 | labels, centers, cost_history = clustering_with_de(X, n_clusters, n_population, n_iterations) 119 | evaluate_clustering(X, labels, centers) 120 | visualize_results(X, labels, centers, cost_history) 121 | 122 | if __name__ == "__main__": 123 | main() 124 | -------------------------------------------------------------------------------- /Problems/Differential Evolution Protein Structure Prediction.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | from scipy.spatial.distance import pdist, squareform 4 | from scipy.interpolate import splprep, splev 5 | 6 | # Define the Enhanced Energy Function 7 | def energy_function(positions): 8 | """Calculate the energy with Lennard-Jones potential and harmonic bonds.""" 9 | distances = pdist(positions) # Pairwise distances 10 | distances_matrix = squareform(distances) 11 | 12 | # Lennard-Jones potential for non-adjacent residues 13 | lj_energy = np.sum(4 * ((1 / distances[distances > 0])**12 - (1 / distances[distances > 0])**6)) 14 | 15 | # Harmonic bond potential for adjacent residues 16 | bond_energy = 0.5 * np.sum((distances_matrix[np.arange(len(positions)-1), np.arange(1, len(positions))] - 1)**2) 17 | 18 | return lj_energy + bond_energy 19 | 20 | # Initialize DE Parameters 21 | num_particles = 40 22 | num_dimensions = 3 # 3D space 23 | num_amino_acids = 15 # Number of residues in the protein 24 | num_iterations = 400 25 | 26 | # Differential Evolution hyperparameters 27 | mutation_factor = 0.5 # Controls the step size 28 | crossover_probability = 0.9 # Probability of crossover 29 | 30 | # Initialize particle positions 31 | positions = np.random.uniform(-5, 5, (num_particles, num_amino_acids, num_dimensions)) 32 | 33 | # Evaluate initial fitness 34 | fitness_scores = np.array([energy_function(p) for p in positions]) 35 | 36 | # Track convergence 37 | convergence = [] 38 | 39 | # DE Main Loop 40 | for iteration in range(num_iterations): 41 | for i in range(num_particles): 42 | # Mutation: Create a donor vector 43 | indices = np.random.choice(np.delete(np.arange(num_particles), i), 3, replace=False) 44 | a, b, c = positions[indices] 45 | donor_vector = a + mutation_factor * (b - c) 46 | 47 | # Crossover: Create a trial vector 48 | trial_vector = np.copy(positions[i]) 49 | for j in range(num_amino_acids): 50 | if np.random.rand() < crossover_probability: 51 | trial_vector[j] = donor_vector[j] 52 | 53 | # Selection: Compare trial vector with target vector 54 | trial_fitness = energy_function(trial_vector) 55 | if trial_fitness < fitness_scores[i]: 56 | positions[i] = trial_vector 57 | fitness_scores[i] = trial_fitness 58 | 59 | # Track global best 60 | global_best_index = np.argmin(fitness_scores) 61 | global_best_position = positions[global_best_index] 62 | global_best_score = fitness_scores[global_best_index] 63 | 64 | # Track convergence 65 | convergence.append(global_best_score) 66 | print(f"Iteration {iteration + 1}/{num_iterations}, Best Score: {global_best_score:.4f}") 67 | 68 | # Plot the convergence 69 | plt.figure(figsize=(12, 6)) 70 | plt.plot(convergence, marker='o', linewidth=2) 71 | plt.title("Convergence of DE on Enhanced Energy Function") 72 | plt.xlabel("Iteration") 73 | plt.ylabel("Best Energy") 74 | plt.grid() 75 | plt.show() 76 | 77 | # Visualize the final protein structure 78 | from mpl_toolkits.mplot3d import Axes3D 79 | 80 | fig = plt.figure(figsize=(14, 10)) 81 | ax = fig.add_subplot(111, projection='3d') 82 | 83 | # Smooth the backbone with splines 84 | tck, u = splprep([global_best_position[:, 0], global_best_position[:, 1], global_best_position[:, 2]], s=2) 85 | smoothed_coords = splev(np.linspace(0, 1, 100), tck) 86 | 87 | # Plot amino acids 88 | ax.scatter(global_best_position[:, 0], global_best_position[:, 1], global_best_position[:, 2], c='r', s=100, label='Amino Acids') 89 | 90 | # Plot smoothed backbone 91 | ax.plot(smoothed_coords[0], smoothed_coords[1], smoothed_coords[2], c='b', linewidth=2, label='Backbone') 92 | 93 | # Annotate amino acids 94 | for i, (x, y, z) in enumerate(global_best_position): 95 | ax.text(x, y, z, str(i), color='black', fontsize=10) 96 | 97 | ax.set_title("Optimized Protein Structure with Enhanced DE") 98 | ax.set_xlabel("X") 99 | ax.set_ylabel("Y") 100 | ax.set_zlabel("Z") 101 | ax.legend() 102 | plt.show() 103 | 104 | # Print final details 105 | print("Final Optimized Amino Acid Positions:") 106 | print(global_best_position) 107 | print(f"Final Optimized Energy: {global_best_score:.4f}") 108 | -------------------------------------------------------------------------------- /Problems/Firefly Image Segmentation.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | from skimage import io, color, exposure 4 | from sklearn.metrics import pairwise_distances 5 | from sklearn.preprocessing import MinMaxScaler 6 | from skimage.color import label2rgb 7 | 8 | def cluster_cost(m, X): 9 | """ 10 | Calculate the cost for clustering. 11 | """ 12 | d = pairwise_distances(X, m, metric='euclidean') 13 | dmin = np.min(d, axis=1) 14 | ind = np.argmin(d, axis=1) 15 | WCD = np.sum(dmin) 16 | return WCD, {'d': d, 'dmin': dmin, 'ind': ind, 'WCD': WCD} 17 | 18 | # Load image 19 | img = io.imread('f.jpg') 20 | img = img / 255.0 # Normalize to [0, 1] 21 | gray = color.rgb2gray(img) 22 | gray = exposure.equalize_adapthist(gray) 23 | 24 | # Reshape image to vector 25 | X = gray.reshape(-1, 1) 26 | 27 | # Firefly Algorithm Parameters 28 | k = 10 # Number of clusters 29 | MaxIt = 50 # Maximum Number of Iterations 30 | nPop = 5 # Number of Fireflies 31 | gamma = 1 # Light Absorption Coefficient 32 | beta0 = 2 # Attraction Coefficient Base Value 33 | alpha = 0.2 # Mutation Coefficient 34 | alpha_damp = 0.98 # Mutation Coefficient Damping Ratio 35 | delta = 0.05 * (X.max() - X.min()) # Uniform Mutation Range 36 | m = 2 # Distance exponent 37 | 38 | dmax = np.linalg.norm(X.max() - X.min()) 39 | 40 | # Initialize firefly population 41 | fireflies = [{'Position': np.random.uniform(X.min(), X.max(), (k, 1)), 'Cost': np.inf, 'Out': None} for _ in range(nPop)] 42 | 43 | # Evaluate initial population 44 | BestSol = {'Cost': np.inf} 45 | for firefly in fireflies: 46 | firefly['Cost'], firefly['Out'] = cluster_cost(firefly['Position'], X) 47 | if firefly['Cost'] < BestSol['Cost']: 48 | BestSol = firefly.copy() 49 | 50 | BestCost = [] 51 | 52 | # Firefly Algorithm Main Loop 53 | for it in range(MaxIt): 54 | new_fireflies = [] 55 | for i, firefly_i in enumerate(fireflies): 56 | new_firefly = {'Cost': np.inf} 57 | for j, firefly_j in enumerate(fireflies): 58 | if firefly_j['Cost'] < firefly_i['Cost']: 59 | rij = np.linalg.norm(firefly_i['Position'] - firefly_j['Position']) / dmax 60 | beta = beta0 * np.exp(-gamma * rij**m) 61 | e = delta * np.random.uniform(-1, 1, firefly_i['Position'].shape) 62 | new_position = firefly_i['Position'] + beta * np.random.rand(*firefly_i['Position'].shape) * (firefly_j['Position'] - firefly_i['Position']) + alpha * e 63 | new_position = np.clip(new_position, X.min(), X.max()) 64 | cost, out = cluster_cost(new_position, X) 65 | if cost < new_firefly['Cost']: 66 | new_firefly = {'Position': new_position, 'Cost': cost, 'Out': out} 67 | if cost < BestSol['Cost']: 68 | BestSol = new_firefly.copy() 69 | new_fireflies.append(new_firefly) 70 | 71 | fireflies = sorted(fireflies + new_fireflies, key=lambda x: x['Cost'])[:nPop] 72 | BestCost.append(BestSol['Cost']) 73 | alpha *= alpha_damp 74 | print(f"Iteration {it + 1}: Best Cost = {BestSol['Cost']}") 75 | 76 | # Reshape best solution 77 | FAlbl = BestSol['Out']['ind'] 78 | segmented = label2rgb(FAlbl.reshape(gray.shape)) 79 | 80 | # Plot results 81 | plt.figure() 82 | plt.plot(BestCost, '--k', linewidth=1) 83 | plt.title('FA Train') 84 | plt.xlabel('FA Iteration Number') 85 | plt.ylabel('FA Best Cost Value') 86 | plt.show() 87 | 88 | plt.figure() 89 | plt.subplot(1, 2, 1) 90 | plt.imshow(img) 91 | plt.title('Original') 92 | plt.subplot(1, 2, 2) 93 | plt.imshow(segmented) 94 | plt.title('Segmented Image') 95 | plt.show() 96 | -------------------------------------------------------------------------------- /Problems/Firefly Space-Time Bending.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | 4 | # Objective function 5 | def objective_function(x, start, end, lambda_bend, warp_field): 6 | # Distance term (Geodesic distance using warp field) 7 | distance = np.linalg.norm((x - end) * (1 + warp_field)) # Adjusted distance with warp effect 8 | 9 | # Bending cost (simulating space-time distortion effort) 10 | bending_cost = lambda_bend * np.sum((x - start)**2) # Quadratic bending penalty 11 | 12 | # Effort to traverse (simulate time dilation or warp effort) 13 | effort_cost = np.sum(np.abs(x - start) * (1 + warp_field)) # Absolute effort adjusted by warp 14 | 15 | # Energy cost for maintaining the warp field 16 | energy_cost = np.sum(warp_field**2) # Energy required to maintain the warp 17 | 18 | # Combined cost function 19 | return distance + bending_cost + 0.5 * effort_cost + 0.2 * energy_cost 20 | 21 | # Parameters for Firefly Algorithm 22 | num_fireflies = 70 23 | num_dimensions = 2 24 | num_iterations = 40 25 | start = np.array([0, 0]) 26 | end = np.array([10, 10]) 27 | lambda_bend = 0.1 28 | warp_field = np.random.uniform(low=0.1, high=0.5, size=num_dimensions) # Random initial warp field 29 | alpha = 0.2 # Randomness strength 30 | beta0 = 1.0 # Base attractiveness 31 | gamma = 1.0 # Absorption coefficient 32 | 33 | # Initialize fireflies 34 | positions = np.random.uniform(low=-5, high=15, size=(num_fireflies, num_dimensions)) 35 | intensities = np.array([objective_function(p, start, end, lambda_bend, warp_field) for p in positions]) 36 | 37 | # Record the best cost at each iteration for plotting 38 | best_costs = [] 39 | 40 | # Optimization loop 41 | for iteration in range(num_iterations): 42 | for i in range(num_fireflies): 43 | for j in range(num_fireflies): 44 | if intensities[j] < intensities[i]: # Move firefly i towards firefly j 45 | distance = np.linalg.norm(positions[i] - positions[j]) 46 | beta = beta0 * np.exp(-gamma * distance**2) 47 | positions[i] += beta * (positions[j] - positions[i]) + alpha * (np.random.rand(num_dimensions) - 0.5) 48 | 49 | # Update intensity for firefly i 50 | intensities[i] = objective_function(positions[i], start, end, lambda_bend, warp_field) 51 | 52 | # Find the best firefly 53 | best_idx = np.argmin(intensities) 54 | best_costs.append(intensities[best_idx]) 55 | 56 | # Print progress 57 | print(f"Iteration {iteration + 1}: Best Fitness = {intensities[best_idx]}") 58 | 59 | # Final results 60 | best_position = positions[best_idx] 61 | print("\nOptimization Completed!") 62 | print(f"Global Best Position: {best_position}") 63 | print(f"Objective Value at Global Best: {intensities[best_idx]}") 64 | 65 | # Visualization 66 | plt.figure(figsize=(10, 6)) 67 | plt.plot(best_costs, label="Best Cost per Iteration") 68 | plt.xlabel("Iteration") 69 | plt.ylabel("Cost") 70 | plt.title("Convergence of Firefly Algorithm with Space-Time Bending Analogy") 71 | plt.legend() 72 | plt.grid() 73 | plt.show() 74 | 75 | # Plot the final positions of fireflies 76 | plt.figure(figsize=(8, 8)) 77 | plt.scatter(positions[:, 0], positions[:, 1], label="Final Firefly Positions", color="blue") 78 | plt.scatter(best_position[0], best_position[1], label="Global Best Position", color="red", marker="x", s=100) 79 | plt.scatter(end[0], end[1], label="Target Position", color="green", marker="*", s=200) 80 | plt.xlabel("X Coordinate") 81 | plt.ylabel("Y Coordinate") 82 | plt.title("Final Firefly Distribution") 83 | plt.legend() 84 | plt.grid() 85 | plt.show() 86 | -------------------------------------------------------------------------------- /Problems/Genetic Algorithm Evolutionary Art.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | 4 | # Genetic Algorithm Parameters 5 | population_size = 30 6 | num_generations = 100 7 | mutation_rate = 0.1 8 | image_resolution = (300, 300) 9 | 10 | # Fitness Function: Enhanced to reward spiral-like patterns and symmetry 11 | def fitness_function(image): 12 | complexity = calculate_complexity(image) 13 | symmetry = calculate_symmetry(image) 14 | contrast = global_contrast_factor(image) 15 | return complexity + symmetry + contrast 16 | 17 | # Calculate Complexity using edge detection 18 | def calculate_complexity(image): 19 | gradient_x = np.abs(np.diff(image, axis=0)) 20 | gradient_y = np.abs(np.diff(image, axis=1)) 21 | complexity = np.sum(gradient_x) + np.sum(gradient_y) 22 | return complexity / image.size 23 | 24 | # Global Contrast Factor (GCF) 25 | def global_contrast_factor(image): 26 | luminance = np.mean(image, axis=2) # Average across RGB channels 27 | contrast = np.std(luminance) # Standard deviation as a simple contrast measure 28 | return contrast 29 | 30 | # Calculate Symmetry 31 | def calculate_symmetry(image): 32 | vertical_symmetry = np.sum(np.abs(image - np.flip(image, axis=1))) 33 | horizontal_symmetry = np.sum(np.abs(image - np.flip(image, axis=0))) 34 | total_symmetry = -(vertical_symmetry + horizontal_symmetry) / image.size # Negate to reward symmetry 35 | return total_symmetry 36 | 37 | # Initialize Population with spiral-like patterns 38 | def initialize_population(size, resolution): 39 | population = [] 40 | for _ in range(size): 41 | x = np.linspace(-1.0, 1.0, resolution[0]) 42 | y = np.linspace(-1.0, 1.0, resolution[1]) 43 | x, y = np.meshgrid(x, y) 44 | r = np.sqrt(x**2 + y**2) 45 | theta = np.arctan2(y, x) 46 | spiral = (np.sin(10 * r + 5 * theta) * 127 + 128).astype(np.uint8) 47 | image = np.stack([spiral, spiral, spiral], axis=2) # Grayscale to RGB 48 | population.append(image) 49 | return population 50 | 51 | # Crossover Operation 52 | def crossover(parent1, parent2): 53 | crossover_point = np.random.randint(0, parent1.shape[1]) 54 | child = np.concatenate((parent1[:, :crossover_point], parent2[:, crossover_point:]), axis=1) 55 | return child 56 | 57 | # Mutation Operation (structured fractal adjustments) 58 | def mutate(image, rate): 59 | mutated_image = image.copy() 60 | num_pixels = np.prod(image.shape[:2]) 61 | num_mutations = int(rate * num_pixels) 62 | for _ in range(num_mutations): 63 | x, y = np.random.randint(0, image.shape[0]), np.random.randint(0, image.shape[1]) 64 | mutated_image[x, y] = np.clip(mutated_image[x, y] + np.random.randint(-50, 50), 0, 255) 65 | return mutated_image 66 | 67 | # Main GA Loop 68 | population = initialize_population(population_size, image_resolution) 69 | for generation in range(num_generations): 70 | print(f"Generation {generation + 1}: Evaluating fitness") 71 | 72 | # Evaluate Fitness 73 | fitness_scores = [fitness_function(image) for image in population] 74 | best_fitness = max(fitness_scores) 75 | print(f" Best fitness: {best_fitness:.2f}") 76 | 77 | # Select Parents (Roulette Wheel Selection) 78 | total_fitness = sum(fitness_scores) 79 | probabilities = [score / total_fitness for score in fitness_scores] 80 | indices = np.arange(len(population)) # Indices for the population 81 | selected_indices = np.random.choice(indices, size=population_size, p=probabilities, replace=True) 82 | parents = [population[i] for i in selected_indices] 83 | 84 | print(" Parents selected") 85 | 86 | # Generate New Population 87 | new_population = [] 88 | for i in range(0, len(parents), 2): 89 | parent1, parent2 = parents[i], parents[(i + 1) % len(parents)] 90 | child = crossover(parent1, parent2) 91 | child = mutate(child, mutation_rate) 92 | new_population.append(child) 93 | population = new_population 94 | 95 | print(" New population generated") 96 | 97 | # Safely find the best image 98 | best_index = np.argmax(fitness_scores) 99 | best_image = population[best_index] 100 | 101 | # Display the best image 102 | plt.imshow(best_image, cmap='inferno') 103 | plt.title(f"Generation {generation + 1} - Best Fitness: {best_fitness:.2f}") 104 | plt.axis('off') 105 | plt.pause(0.5) 106 | 107 | print("Evolutionary art generation complete!") -------------------------------------------------------------------------------- /Problems/Genetic Algorithm Exoplanetary Adaptation.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | 4 | # Generate random planet and its stellar system parameters 5 | def generate_planet(): 6 | """Generates random parameters for a hypothetical planet in a stellar system.""" 7 | planet_name = f"Planet_{np.random.randint(1000, 9999)}" 8 | star_name = f"Star_{np.random.randint(1000, 9999)}" 9 | 10 | # Randomize planet parameters 11 | gravity = np.random.uniform(0.1, 3.0) # Gravity in Earth G 12 | atmosphere_composition = { 13 | "O2": np.random.uniform(0.01, 0.5), # Oxygen levels (%) 14 | "CO2": np.random.uniform(0.01, 0.5), # Carbon dioxide levels (%) 15 | "Other Gases": np.random.uniform(0.01, 0.9), # Other gases (%) 16 | } 17 | radiation_level = np.random.uniform(1, 500) # Radiation level (mSv/year) 18 | temperature_range = (np.random.uniform(-100, 0), np.random.uniform(0, 100)) # Min and max temperature (°C) 19 | day_length = np.random.uniform(6, 48) # Day length in hours 20 | 21 | return { 22 | "planet_name": planet_name, 23 | "star_name": star_name, 24 | "gravity": gravity, 25 | "atmosphere_composition": atmosphere_composition, 26 | "radiation_level": radiation_level, 27 | "temperature_range": temperature_range, 28 | "day_length": day_length, 29 | } 30 | 31 | # Objective function to evaluate fitness of a genetic profile 32 | def objective_function(genetic_profile, planet_params): 33 | """Evaluates the fitness of a genetic profile based on planet conditions.""" 34 | # Extract planet parameters 35 | gravity = planet_params["gravity"] 36 | atmosphere = planet_params["atmosphere_composition"] 37 | radiation = planet_params["radiation_level"] 38 | temp_min, temp_max = planet_params["temperature_range"] 39 | day_length = planet_params["day_length"] 40 | 41 | # Genetic traits in the profile 42 | radiation_resistance, bone_density, oxygen_efficiency, temp_adaptability, stress_resilience = genetic_profile 43 | 44 | # Fitness components 45 | fitness_radiation = np.exp(-radiation / radiation_resistance) # Better resistance reduces impact 46 | fitness_gravity = np.exp(-abs(gravity - 1) / bone_density) # Closer to Earth's gravity is ideal 47 | fitness_oxygen = oxygen_efficiency * atmosphere["O2"] # Oxygen utilization adapts to O2 levels 48 | fitness_temperature = np.exp(-abs(temp_min + temp_max) / (2 * temp_adaptability)) # Avg temp adaptation 49 | fitness_stress = stress_resilience / day_length # Better stress handling for long days 50 | 51 | # Combined fitness score (weighted sum) 52 | fitness = (0.25 * fitness_radiation + 53 | 0.2 * fitness_gravity + 54 | 0.25 * fitness_oxygen + 55 | 0.2 * fitness_temperature + 56 | 0.1 * fitness_stress) 57 | return fitness 58 | 59 | # Parameters for Genetic Algorithm 60 | population_size = 150 61 | num_generations = 300 62 | num_genes = 5 # Number of genetic traits 63 | mutation_rate = 0.1 64 | 65 | # Initialize population 66 | population = np.random.uniform(0.5, 5.0, size=(population_size, num_genes)) # Random genetic profiles 67 | planet_params = generate_planet() 68 | fitness_history = [] 69 | 70 | # Optimization loop 71 | for generation in range(num_generations): 72 | # Evaluate fitness for each individual 73 | fitness = np.array([objective_function(individual, planet_params) for individual in population]) 74 | fitness_history.append(np.max(fitness)) # Track the best fitness in this generation 75 | 76 | # Print generation progress 77 | best_individual = population[np.argmax(fitness)] 78 | print(f"Generation {generation + 1}: Best Fitness = {np.max(fitness):.4f}") 79 | 80 | # Selection (roulette wheel selection) 81 | probabilities = fitness / fitness.sum() 82 | selected_indices = np.random.choice(np.arange(population_size), size=population_size, p=probabilities) 83 | selected_population = population[selected_indices] 84 | 85 | # Crossover (single-point) 86 | new_population = [] 87 | for i in range(0, population_size, 2): 88 | parent1, parent2 = selected_population[i], selected_population[(i + 1) % population_size] 89 | crossover_point = np.random.randint(1, num_genes) 90 | child1 = np.concatenate([parent1[:crossover_point], parent2[crossover_point:]]) 91 | child2 = np.concatenate([parent2[:crossover_point], parent1[crossover_point:]]) 92 | new_population.extend([child1, child2]) 93 | 94 | # Mutation 95 | new_population = np.array(new_population) 96 | mutation_mask = np.random.rand(population_size, num_genes) < mutation_rate 97 | new_population[mutation_mask] += np.random.normal(0, 0.1, size=mutation_mask.sum()) 98 | 99 | # Update population 100 | population = new_population 101 | 102 | # Final results 103 | final_fitness = np.array([objective_function(individual, planet_params) for individual in population]) 104 | best_individual = population[np.argmax(final_fitness)] 105 | print("\nOptimization Completed!") 106 | print(f"Planet Parameters: {planet_params}") 107 | print(f"Best Genetic Profile: {best_individual}") 108 | print(f"Best Fitness: {np.max(final_fitness):.4f}") 109 | 110 | # Visualization 111 | plt.figure(figsize=(10, 6)) 112 | plt.plot(fitness_history, label="Best Fitness per Generation") 113 | plt.xlabel("Generation") 114 | plt.ylabel("Fitness") 115 | plt.title("Genetic Algorithm Optimization for Human Survival on Exoplanet") 116 | plt.legend() 117 | plt.grid() 118 | plt.show() 119 | -------------------------------------------------------------------------------- /Problems/Grey Wolf Optimizer VAE Optimized (Latent Space).py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | from sklearn.datasets import load_iris 4 | from sklearn.model_selection import train_test_split 5 | from sklearn.preprocessing import StandardScaler, OneHotEncoder 6 | from sklearn.metrics import classification_report 7 | from sklearn.ensemble import RandomForestClassifier 8 | 9 | # Load and preprocess the Iris dataset 10 | iris = load_iris() 11 | X = iris.data 12 | y = iris.target 13 | 14 | scaler = StandardScaler() 15 | X = scaler.fit_transform(X) 16 | 17 | encoder = OneHotEncoder(sparse_output=False) 18 | y_encoded = encoder.fit_transform(y.reshape(-1, 1)) 19 | 20 | # Split the data 21 | X_train, X_test, y_train, y_test = train_test_split(X, y_encoded, test_size=0.6, random_state=42) 22 | 23 | # Build a Variational Autoencoder (VAE) 24 | class VAE(tf.keras.Model): 25 | def __init__(self, latent_dim): 26 | super(VAE, self).__init__() 27 | self.latent_dim = latent_dim 28 | 29 | # Encoder 30 | self.encoder = tf.keras.Sequential([ 31 | tf.keras.layers.InputLayer(input_shape=(X_train.shape[1],)), 32 | tf.keras.layers.Dense(16, activation="relu"), 33 | tf.keras.layers.Dense(latent_dim * 2) # Mean and LogVar 34 | ]) 35 | 36 | # Decoder 37 | self.decoder = tf.keras.Sequential([ 38 | tf.keras.layers.InputLayer(input_shape=(latent_dim,)), 39 | tf.keras.layers.Dense(16, activation="relu"), 40 | tf.keras.layers.Dense(X_train.shape[1]) 41 | ]) 42 | 43 | def reparameterize(self, mean, logvar): 44 | eps = tf.random.normal(shape=mean.shape) 45 | return eps * tf.exp(logvar * 0.5) + mean 46 | 47 | def call(self, inputs): 48 | x = self.encoder(inputs) 49 | mean, logvar = tf.split(x, num_or_size_splits=2, axis=1) 50 | z = self.reparameterize(mean, logvar) 51 | reconstructed = self.decoder(z) 52 | return reconstructed, mean, logvar 53 | 54 | # Define VAE loss 55 | def vae_loss(data, reconstructed, mean, logvar): 56 | reconstruction_loss = tf.reduce_mean(tf.keras.losses.mse(data, reconstructed)) 57 | kl_divergence = -0.5 * tf.reduce_sum(1 + logvar - tf.square(mean) - tf.exp(logvar)) 58 | return reconstruction_loss + kl_divergence 59 | 60 | # Train VAE 61 | latent_dim = 2 62 | vae = VAE(latent_dim) 63 | optimizer = tf.keras.optimizers.Adam(learning_rate=0.0001) 64 | 65 | @tf.function 66 | def train_step(data): 67 | with tf.GradientTape() as tape: 68 | reconstructed, mean, logvar = vae(data) 69 | loss = vae_loss(data, reconstructed, mean, logvar) 70 | gradients = tape.gradient(loss, vae.trainable_variables) 71 | optimizer.apply_gradients(zip(gradients, vae.trainable_variables)) 72 | return loss 73 | 74 | print("Training VAE...") 75 | for epoch in range(200): 76 | loss = train_step(X_train) 77 | if epoch % 10 == 0: 78 | print(f"Epoch {epoch}: Loss = {loss.numpy():.4f}") 79 | 80 | # Gray Wolf Optimizer (GWO) 81 | class GrayWolfOptimizer: 82 | def __init__(self, latent_dim, n_wolves=30, max_iters=200): 83 | self.latent_dim = latent_dim 84 | self.n_wolves = n_wolves 85 | self.max_iters = max_iters 86 | self.wolves = np.random.uniform(-2, 2, size=(n_wolves, latent_dim)) 87 | 88 | def fitness(self, wolves): 89 | synthetic_data = vae.decoder(tf.convert_to_tensor(wolves, dtype=tf.float32)).numpy() 90 | reconstruction_loss = np.mean((synthetic_data - np.mean(X_train, axis=0))**2) 91 | diversity_score = np.mean(np.std(synthetic_data, axis=0)) 92 | return -reconstruction_loss + diversity_score # Maximize diversity, minimize reconstruction error 93 | 94 | def optimize(self): 95 | for t in range(self.max_iters): 96 | fitness = self.fitness(self.wolves) 97 | sorted_indices = np.argsort(fitness)[::-1] 98 | self.wolves = self.wolves[sorted_indices] 99 | 100 | # Handle edge cases for population size 101 | if len(self.wolves) < 3: 102 | alpha = beta = delta = self.wolves[0] 103 | else: 104 | alpha, beta, delta = self.wolves[:3] 105 | 106 | for i in range(len(self.wolves)): 107 | a = 2 - t * (2 / self.max_iters) 108 | r1, r2 = np.random.rand(), np.random.rand() 109 | A1, A2, A3 = 2 * a * r1 - a, 2 * a * r2 - a, 2 * a * np.random.rand() - a 110 | D1, D2, D3 = abs(A1 * alpha - self.wolves[i]), abs(A2 * beta - self.wolves[i]), abs(A3 * delta - self.wolves[i]) 111 | X1, X2, X3 = alpha - A1 * D1, beta - A2 * D2, delta - A3 * D3 112 | self.wolves[i] = (X1 + X2 + X3) / 3 113 | return self.wolves[:min(len(self.wolves), 200)] # Return top 50 latent vectors 114 | 115 | # Generate synthetic data 116 | print("Optimizing latent space with GWO...") 117 | gwo = GrayWolfOptimizer(latent_dim=latent_dim) 118 | optimized_latents = gwo.optimize() 119 | synthetic_data = vae.decoder(tf.convert_to_tensor(optimized_latents, dtype=tf.float32)).numpy() 120 | 121 | 122 | # Combine original and synthetic data 123 | combined_X_train = np.vstack([X_train, synthetic_data]) 124 | synthetic_labels = np.tile(np.argmax(y_train[:len(synthetic_data)], axis=1), (len(synthetic_data) // len(y_train) + 1))[:len(synthetic_data)] 125 | combined_y_train = np.hstack([np.argmax(y_train, axis=1), synthetic_labels]) 126 | 127 | # Train classifier on combined data 128 | clf_combined = RandomForestClassifier(random_state=42) 129 | clf_combined.fit(combined_X_train, combined_y_train) 130 | 131 | # Evaluate on test data 132 | y_combined_pred = clf_combined.predict(X_test) 133 | 134 | # Print classification report 135 | print("\nClassification Report (Combined Original and Synthetic Data):") 136 | print(classification_report(np.argmax(y_test, axis=1), y_combined_pred)) 137 | -------------------------------------------------------------------------------- /Problems/Harmony Search Regression.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | from sklearn.metrics import mean_squared_error 4 | from sklearn.preprocessing import PolynomialFeatures 5 | from sklearn.model_selection import train_test_split 6 | from sklearn.datasets import make_friedman1 7 | 8 | # Load a sample regression dataset (Friedman #1 dataset) 9 | X, y = make_friedman1(n_samples=200, n_features=5, noise=0.1, random_state=42) 10 | 11 | # Use only the first feature for simplicity (can adjust as needed) 12 | X = X[:, :1] 13 | 14 | # Split into training and testing sets 15 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) 16 | 17 | # Define the Harmony Search algorithm 18 | class HarmonySearch: 19 | def __init__(self, obj_func, bounds, hms=20, hmcr=0.9, par=0.3, max_iter=200): 20 | self.obj_func = obj_func 21 | self.bounds = bounds 22 | self.hms = hms 23 | self.hmcr = hmcr 24 | self.par = par 25 | self.max_iter = max_iter 26 | self.harmony_memory = [] 27 | 28 | def initialize(self): 29 | for _ in range(self.hms): 30 | harmony = [np.random.uniform(low, high) for low, high in self.bounds] 31 | self.harmony_memory.append(harmony) 32 | 33 | def improvise(self): 34 | new_harmony = [] 35 | for i, (low, high) in enumerate(self.bounds): 36 | if np.random.rand() < self.hmcr: 37 | new_value = np.random.choice([h[i] for h in self.harmony_memory]) 38 | if np.random.rand() < self.par: 39 | new_value += np.random.uniform(-1, 1) * (high - low) * 0.01 40 | else: 41 | new_value = np.random.uniform(low, high) 42 | new_harmony.append(np.clip(new_value, low, high)) 43 | return new_harmony 44 | 45 | def optimize(self): 46 | self.initialize() 47 | for _ in range(self.max_iter): 48 | new_harmony = self.improvise() 49 | new_score = self.obj_func(new_harmony) 50 | worst_idx = np.argmax([self.obj_func(h) for h in self.harmony_memory]) 51 | if new_score < self.obj_func(self.harmony_memory[worst_idx]): 52 | self.harmony_memory[worst_idx] = new_harmony 53 | best_idx = np.argmin([self.obj_func(h) for h in self.harmony_memory]) 54 | return self.harmony_memory[best_idx] 55 | 56 | # Objective function for regression (minimize MSE) 57 | def objective(params): 58 | degree = int(params[0]) 59 | coeffs = params[1:degree + 2] # Adjust number of coefficients to match degree + intercept 60 | poly = PolynomialFeatures(degree=degree) 61 | X_poly_train = poly.fit_transform(X_train) # Add intercept term 62 | y_pred = np.dot(X_poly_train, np.array(coeffs)) # Predict with coefficients 63 | return mean_squared_error(y_train, y_pred) 64 | 65 | # Set bounds for Harmony Search 66 | max_poly_degree = 5 # Maximum degree of the polynomial 67 | bounds = [(1, max_poly_degree)] + [(-10, 10) for _ in range(max_poly_degree + 1)] # +1 for intercept 68 | 69 | # Run Harmony Search 70 | hs = HarmonySearch(obj_func=objective, bounds=bounds, max_iter=200) 71 | best_params = hs.optimize() 72 | 73 | # Extract the best degree and coefficients 74 | best_degree = int(best_params[0]) 75 | best_coeffs = best_params[1:best_degree + 2] # Include intercept term 76 | 77 | # Use the best polynomial degree and coefficients for plotting 78 | poly = PolynomialFeatures(degree=best_degree) 79 | X_poly_train = poly.fit_transform(X_train) 80 | X_poly_test = poly.transform(X_test) 81 | y_train_pred = np.dot(X_poly_train, np.array(best_coeffs)) 82 | y_test_pred = np.dot(X_poly_test, np.array(best_coeffs)) 83 | 84 | # Calculate Mean Squared Error (MSE) 85 | mse_train = mean_squared_error(y_train, y_train_pred) 86 | mse_test = mean_squared_error(y_test, y_test_pred) 87 | print(f"MSE (Train): {mse_train:.4f}") 88 | print(f"MSE (Test): {mse_test:.4f}") 89 | print(f"Optimized Polynomial Degree: {best_degree}") 90 | 91 | # Plot the regression curve 92 | X_range = np.linspace(X.min(), X.max(), 500).reshape(-1, 1) 93 | X_range_poly = poly.transform(X_range) 94 | y_range_pred = np.dot(X_range_poly, np.array(best_coeffs)) 95 | 96 | plt.scatter(X, y, color='blue', label='Data Samples') 97 | plt.plot(X_range, y_range_pred, color='red', linewidth=2, label=f'Degree {best_degree} Fit') 98 | plt.title('Nonlinear Regression') 99 | plt.xlabel('X') 100 | plt.ylabel('y') 101 | plt.legend() 102 | plt.grid() 103 | plt.show() 104 | -------------------------------------------------------------------------------- /Problems/Particle Swarm Optimization Evolutionary Art.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | 4 | # Particle Swarm Optimization (PSO) Parameters 5 | population_size = 20 6 | num_iterations = 50 7 | inertia = 0.5 8 | c1 = 2.0 # Cognitive parameter 9 | c2 = 2.0 # Social parameter 10 | image_resolution = (300, 300) 11 | 12 | # Fitness Function: Enhanced to reward structured patterns 13 | 14 | def fitness_function(image): 15 | complexity = calculate_complexity(image) 16 | symmetry = calculate_symmetry(image) 17 | contrast = global_contrast_factor(image) 18 | color_diversity = calculate_color_diversity(image) 19 | return complexity + symmetry + contrast + color_diversity 20 | 21 | # Calculate Complexity using edge detection 22 | def calculate_complexity(image): 23 | gradient_x = np.abs(np.diff(image, axis=0)) 24 | gradient_y = np.abs(np.diff(image, axis=1)) 25 | complexity = np.sum(gradient_x) + np.sum(gradient_y) 26 | return complexity / image.size 27 | 28 | # Global Contrast Factor (GCF) 29 | def global_contrast_factor(image): 30 | luminance = np.mean(image, axis=2) # Average across RGB channels 31 | contrast = np.std(luminance) # Standard deviation as a simple contrast measure 32 | return contrast 33 | 34 | # Calculate Symmetry 35 | def calculate_symmetry(image): 36 | vertical_symmetry = np.sum(np.abs(image - np.flip(image, axis=1))) 37 | horizontal_symmetry = np.sum(np.abs(image - np.flip(image, axis=0))) 38 | total_symmetry = -(vertical_symmetry + horizontal_symmetry) / image.size # Negate to reward symmetry 39 | return total_symmetry 40 | 41 | # Calculate Color Diversity 42 | def calculate_color_diversity(image): 43 | unique_colors = len(np.unique(image.reshape(-1, image.shape[2]), axis=0)) 44 | return unique_colors / (image.shape[0] * image.shape[1]) 45 | 46 | # Initialize Population with fractal-like patterns and colorful variations 47 | def initialize_population(size, resolution): 48 | population = [] 49 | for _ in range(size): 50 | x = np.linspace(-2.0, 2.0, resolution[0]) 51 | y = np.linspace(-2.0, 2.0, resolution[1]) 52 | x, y = np.meshgrid(x, y) 53 | r = np.sqrt(x**2 + y**2) 54 | theta = np.arctan2(y, x) 55 | fractal_r = (np.sin(10 * r + 5 * theta) * 127 + 128).astype(np.uint8) 56 | fractal_g = (np.cos(10 * r - 5 * theta) * 127 + 128).astype(np.uint8) 57 | fractal_b = ((np.sin(10 * theta) + np.cos(10 * r)) * 127 + 128).astype(np.uint8) 58 | image = np.stack([fractal_r, fractal_g, fractal_b], axis=2) 59 | population.append(image) 60 | return population 61 | 62 | # PSO Update Function 63 | def update_particles(positions, velocities, personal_best_positions, global_best_position, inertia, c1, c2): 64 | for i in range(len(positions)): 65 | r1, r2 = np.random.random(), np.random.random() 66 | cognitive_component = c1 * r1 * (personal_best_positions[i] - positions[i]) 67 | social_component = c2 * r2 * (global_best_position - positions[i]) 68 | velocities[i] = inertia * velocities[i] + cognitive_component + social_component 69 | positions[i] = np.clip(positions[i] + velocities[i], 0, 255) # Keep positions within valid range 70 | 71 | # Main PSO Loop 72 | population = initialize_population(population_size, image_resolution) 73 | velocities = [np.random.uniform(-1, 1, (image_resolution[0], image_resolution[1], 3)) for _ in range(population_size)] 74 | personal_best_positions = population[:] 75 | personal_best_scores = [fitness_function(image) for image in population] 76 | global_best_position = personal_best_positions[np.argmax(personal_best_scores)] 77 | global_best_score = max(personal_best_scores) 78 | 79 | for iteration in range(num_iterations): 80 | print(f"Iteration {iteration + 1}: Evaluating fitness") 81 | 82 | # Evaluate Fitness 83 | fitness_scores = [fitness_function(image) for image in population] 84 | 85 | for i in range(population_size): 86 | if fitness_scores[i] > personal_best_scores[i]: 87 | personal_best_scores[i] = fitness_scores[i] 88 | personal_best_positions[i] = population[i] 89 | 90 | best_particle_index = np.argmax(personal_best_scores) 91 | if personal_best_scores[best_particle_index] > global_best_score: 92 | global_best_score = personal_best_scores[best_particle_index] 93 | global_best_position = personal_best_positions[best_particle_index] 94 | 95 | print(f" Best fitness: {global_best_score:.2f}") 96 | 97 | # Update Particles 98 | update_particles(population, velocities, personal_best_positions, global_best_position, inertia, c1, c2) 99 | 100 | # Display the best image 101 | plt.imshow(global_best_position.astype(np.uint8)) 102 | plt.title(f"Iteration {iteration + 1} - Best Fitness: {global_best_score:.2f}") 103 | plt.axis('off') 104 | plt.pause(0.5) 105 | 106 | print("Intelligent art generation with PSO complete!") 107 | -------------------------------------------------------------------------------- /Problems/Particle Swarm Optimization Evolved Antenna.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | 4 | # Define the objective function 5 | def objective_function(antenna_points): 6 | """ 7 | Objective Function for Antenna Design Optimization 8 | 9 | This function evaluates the quality of an antenna design by combining multiple 10 | factors such as: 11 | - Total length of the antenna (to ensure it remains compact and efficient). 12 | - Smoothness, penalizing excessive twists and encouraging smooth transitions. 13 | 14 | Parameters: 15 | antenna_points (numpy.ndarray): Array of 3D coordinates representing the antenna geometry. 16 | 17 | Returns: 18 | float: The computed cost for the given antenna design. 19 | """ 20 | # Compute total length of the antenna 21 | total_length = np.sum(np.sqrt(np.sum(np.diff(antenna_points, axis=0)**2, axis=1))) 22 | 23 | # Compute smoothness penalty (penalize excessive variation in angles between segments) 24 | smoothness_penalty = np.sum(np.abs(np.diff(antenna_points[:, 2]))) 25 | 26 | # Combine metrics into the cost function 27 | cost = total_length + 0.3 * smoothness_penalty 28 | return cost 29 | 30 | # Function to generate initial antenna with seven joints 31 | def generate_initial_antenna(): 32 | """ 33 | Generate an initial antenna geometry with seven joints. 34 | 35 | Returns: 36 | numpy.ndarray: Array of 3D coordinates representing the initial antenna geometry. 37 | """ 38 | joints = 7 39 | z = np.linspace(0, 10, joints + 1) # Antenna progresses upward 40 | x = np.random.uniform(-1, 1, joints + 1) 41 | y = np.random.uniform(-1, 1, joints + 1) 42 | return np.column_stack((x, y, z)) 43 | 44 | # PSO Parameters 45 | num_particles = 30 46 | num_iterations = 200 47 | joints = 7 # Number of joints 48 | dimensions = joints * 3 # 3D coordinates for each joint 49 | 50 | # Function to run PSO and return results 51 | def run_pso(): 52 | # Initialize particle positions and velocities 53 | particles = np.random.uniform(-1, 1, (num_particles, dimensions)) 54 | velocities = np.random.uniform(-0.1, 0.1, (num_particles, dimensions)) 55 | best_particle_positions = particles.copy() 56 | best_particle_costs = np.array([objective_function(p.reshape(-1, 3)) for p in particles]) 57 | global_best_position = particles[np.argmin(best_particle_costs)] 58 | global_best_cost = np.min(best_particle_costs) 59 | 60 | # PSO Hyperparameters 61 | w = 0.5 # Inertia weight 62 | c1 = 1.5 # Cognitive coefficient 63 | c2 = 1.5 # Social coefficient 64 | 65 | # Track cost over iterations 66 | cost_history = [] 67 | 68 | # PSO Main Loop 69 | for iteration in range(num_iterations): 70 | for i, particle in enumerate(particles): 71 | # Update velocity 72 | r1, r2 = np.random.random(2) 73 | velocities[i] = (w * velocities[i] + 74 | c1 * r1 * (best_particle_positions[i] - particle) + 75 | c2 * r2 * (global_best_position - particle)) 76 | 77 | # Update position 78 | particles[i] += velocities[i] 79 | 80 | # Constrain particles within bounds 81 | particles[i] = np.clip(particles[i], -1, 1) 82 | 83 | # Evaluate cost 84 | reshaped_particle = particles[i].reshape(-1, 3) 85 | cost = objective_function(reshaped_particle) 86 | 87 | # Update personal best 88 | if cost < best_particle_costs[i]: 89 | best_particle_costs[i] = cost 90 | best_particle_positions[i] = particles[i] 91 | 92 | # Update global best 93 | if cost < global_best_cost: 94 | global_best_cost = cost 95 | global_best_position = particles[i] 96 | 97 | cost_history.append(global_best_cost) 98 | 99 | return global_best_position.reshape(-1, 3), cost_history 100 | 101 | # Plot 4 antennas and their costs in a 2x4 layout 102 | fig, axes = plt.subplots(2, 4, figsize=(20, 10), subplot_kw={}) 103 | 104 | for i in range(4): 105 | best_antenna_points, cost_history = run_pso() 106 | 107 | # Plot antenna geometry (3D plot) 108 | ax = fig.add_subplot(2, 4, i + 1, projection='3d') 109 | ax.plot(best_antenna_points[:, 0], best_antenna_points[:, 1], best_antenna_points[:, 2], marker='o', linewidth=2) 110 | ax.set_title(f"Optimized Antenna {i + 1}", fontsize=14) 111 | ax.set_xlabel("X", fontsize=12) 112 | ax.set_ylabel("Y", fontsize=12) 113 | ax.set_zlabel("Z", fontsize=12) 114 | ax.scatter(best_antenna_points[0, 0], best_antenna_points[0, 1], best_antenna_points[0, 2], color='red', label='Start', s=100) 115 | ax.scatter(best_antenna_points[-1, 0], best_antenna_points[-1, 1], best_antenna_points[-1, 2], color='green', label='End', s=100) 116 | ax.legend(fontsize=10) 117 | 118 | # Plot cost history (2D plot) 119 | ax2 = fig.add_subplot(2, 4, i + 5) 120 | ax2.plot(range(1, num_iterations + 1), cost_history, marker='o', color='blue', linewidth=2) 121 | ax2.set_title(f"Cost Over Iterations {i + 1}", fontsize=14) 122 | ax2.set_xlabel("Iteration", fontsize=12) 123 | ax2.set_ylabel("Cost", fontsize=12) 124 | 125 | plt.tight_layout() 126 | plt.show() 127 | -------------------------------------------------------------------------------- /Problems/Simulated Annealing Quadratic Assignment Problem.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import random 4 | import pandas as pd 5 | 6 | # Function to generate a random QAP problem 7 | def generate_qap_problem(size): 8 | flow_matrix = np.random.randint(1, 100, size=(size, size)) 9 | distance_matrix = np.random.randint(1, 100, size=(size, size)) 10 | return flow_matrix, distance_matrix 11 | 12 | # Objective function for QAP 13 | def calculate_cost(permutation, flow_matrix, distance_matrix): 14 | size = len(permutation) 15 | cost = 0 16 | for i in range(size): 17 | for j in range(size): 18 | cost += flow_matrix[i][j] * distance_matrix[permutation[i]][permutation[j]] 19 | return cost 20 | 21 | # Simulated Annealing for QAP 22 | def simulated_annealing_qap(flow_matrix, distance_matrix, initial_temp=1000, cooling_rate=0.95, max_iterations=1000): 23 | size = len(flow_matrix) 24 | current_permutation = list(range(size)) 25 | random.shuffle(current_permutation) 26 | current_cost = calculate_cost(current_permutation, flow_matrix, distance_matrix) 27 | 28 | best_permutation = current_permutation.copy() 29 | best_cost = current_cost 30 | 31 | costs_over_iterations = [current_cost] 32 | 33 | temperature = initial_temp 34 | 35 | for iteration in range(max_iterations): 36 | i, j = random.sample(range(size), 2) 37 | new_permutation = current_permutation.copy() 38 | new_permutation[i], new_permutation[j] = new_permutation[j], new_permutation[i] 39 | 40 | new_cost = calculate_cost(new_permutation, flow_matrix, distance_matrix) 41 | 42 | delta = new_cost - current_cost 43 | if delta < 0 or random.uniform(0, 1) < np.exp(-delta / temperature): 44 | current_permutation = new_permutation 45 | current_cost = new_cost 46 | 47 | if current_cost < best_cost: 48 | best_permutation = current_permutation 49 | best_cost = current_cost 50 | 51 | costs_over_iterations.append(current_cost) 52 | temperature *= cooling_rate 53 | 54 | return best_permutation, best_cost, costs_over_iterations 55 | 56 | # Main execution for a single run 57 | size = 10 # Problem size 58 | flow_matrix, distance_matrix = generate_qap_problem(size) 59 | best_perm, best_cost, costs = simulated_annealing_qap(flow_matrix, distance_matrix) 60 | 61 | # Plotting iteration cost progress 62 | plt.figure(figsize=(10, 6)) 63 | plt.plot(costs, marker="o", linestyle="--") 64 | plt.title("Cost over Iterations - Simulated Annealing") 65 | plt.xlabel("Iteration") 66 | plt.ylabel("Cost") 67 | plt.grid() 68 | plt.show() 69 | 70 | # Displaying the QAP flow matrix 71 | plt.figure(figsize=(8, 6)) 72 | plt.imshow(flow_matrix, cmap="Blues", interpolation="nearest") 73 | plt.colorbar() 74 | plt.title("Flow Matrix") 75 | plt.show() 76 | 77 | # Displaying results 78 | results = pd.DataFrame({ 79 | "Best Permutation": [best_perm], 80 | "Best Cost": [best_cost], 81 | "Iterations": [len(costs)], 82 | }) 83 | print("Simulated Annealing QAP Results:") 84 | print(results) 85 | 86 | # Save results to a CSV file 87 | results.to_csv("Simulated_Annealing_QAP_Results.csv", index=False) 88 | print("Results saved to 'Simulated_Annealing_QAP_Results.csv'") 89 | -------------------------------------------------------------------------------- /Problems/Stochastic Gradient Descent Resource Allocation.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import pandas as pd 4 | 5 | # Function to generate a random but feasible resource allocation problem 6 | def generate_feasible_resource_allocation_problem(num_resources, num_tasks): 7 | task_demands = np.random.randint(20, 60, size=num_tasks) # Larger demands 8 | total_demand = np.sum(task_demands) 9 | resource_capacities = np.random.randint(50, 100, size=num_resources) # Generate capacities 10 | resource_capacities *= (total_demand // np.sum(resource_capacities) + 1) # Scale capacities to ensure feasibility 11 | cost_matrix = np.random.randint(10, 50, size=(num_tasks, num_resources)) # Higher cost range 12 | return resource_capacities, task_demands, cost_matrix 13 | 14 | # Objective function: calculate total cost of allocation 15 | def calculate_total_cost(allocation, cost_matrix): 16 | total_cost = 0 17 | for task, resource in enumerate(allocation): 18 | total_cost += cost_matrix[task, resource] 19 | return total_cost 20 | 21 | # Check feasibility of an allocation 22 | def is_feasible(allocation, task_demands, resource_capacities): 23 | allocated_resources = np.zeros(len(resource_capacities)) 24 | for task, resource in enumerate(allocation): 25 | allocated_resources[resource] += task_demands[task] 26 | return np.all(allocated_resources <= resource_capacities) 27 | 28 | # Stochastic Gradient Descent (SGD) for Resource Allocation 29 | def stochastic_gradient_descent(task_demands, resource_capacities, cost_matrix, learning_rate=0.1, max_iterations=200): 30 | num_tasks, num_resources = cost_matrix.shape 31 | 32 | # Initialize allocation as random (one resource per task) 33 | allocation = np.random.randint(0, num_resources, size=num_tasks) 34 | 35 | def calculate_gradient(allocation): 36 | # Calculate gradient of the cost function (approximation) 37 | gradient = np.zeros_like(allocation, dtype=float) 38 | for task in range(num_tasks): 39 | current_resource = allocation[task] 40 | costs = cost_matrix[task, :] 41 | gradient[task] = costs[current_resource] - np.min(costs) 42 | return gradient 43 | 44 | def project_to_feasible(allocation): 45 | # Project allocation to feasible space (ensure capacity constraints) 46 | allocated_resources = np.zeros(num_resources) 47 | for task, resource in enumerate(allocation): 48 | allocated_resources[resource] += task_demands[task] 49 | 50 | for task, resource in enumerate(allocation): 51 | if allocated_resources[resource] > resource_capacities[resource]: 52 | # Reallocate task to a feasible resource 53 | feasible_resources = [r for r in range(num_resources) if allocated_resources[r] + task_demands[task] <= resource_capacities[r]] 54 | if feasible_resources: 55 | new_resource = np.random.choice(feasible_resources) 56 | allocated_resources[resource] -= task_demands[task] 57 | allocated_resources[new_resource] += task_demands[task] 58 | allocation[task] = new_resource 59 | return allocation 60 | 61 | cost_progress = [] 62 | for iteration in range(max_iterations): 63 | # Calculate cost and gradient 64 | current_cost = calculate_total_cost(allocation, cost_matrix) 65 | gradient = calculate_gradient(allocation) 66 | 67 | # Update allocation using SGD step 68 | allocation = allocation - learning_rate * gradient 69 | allocation = np.round(np.clip(allocation, 0, num_resources - 1)).astype(int) 70 | 71 | # Project back to feasible space 72 | allocation = project_to_feasible(allocation) 73 | 74 | # Track progress 75 | current_cost = calculate_total_cost(allocation, cost_matrix) 76 | cost_progress.append(current_cost) 77 | 78 | # Return the best solution and cost progress 79 | return allocation, current_cost, cost_progress 80 | 81 | # Main pipeline execution 82 | def main_pipeline(num_resources=6, num_tasks=10, learning_rate=0.03, max_iterations=100): 83 | # Generate a new random problem 84 | resource_capacities, task_demands, cost_matrix = generate_feasible_resource_allocation_problem(num_resources, num_tasks) 85 | 86 | # Solve using SGD 87 | best_allocation, best_cost, cost_progress = stochastic_gradient_descent( 88 | task_demands, resource_capacities, cost_matrix, learning_rate, max_iterations 89 | ) 90 | 91 | # Plotting fitness progress 92 | plt.figure(figsize=(10, 6)) 93 | plt.plot(cost_progress, marker="o", linestyle="--") 94 | plt.title("Fitness over Iterations - Stochastic Gradient Descent") 95 | plt.xlabel("Iteration") 96 | plt.ylabel("Fitness (Total Cost)") 97 | plt.grid() 98 | plt.show() 99 | 100 | # Displaying problem and results 101 | results = pd.DataFrame({ 102 | "Task": np.arange(1, num_tasks + 1), 103 | "Allocated Resource": best_allocation, 104 | "Task Demand": task_demands, 105 | "Resource Capacity": [resource_capacities[res] for res in best_allocation], 106 | "Cost": [cost_matrix[task, best_allocation[task]] for task in range(num_tasks)], 107 | }) 108 | summary = pd.DataFrame({ 109 | "Total Cost": [best_cost], 110 | "Feasible": [is_feasible(best_allocation, task_demands, resource_capacities)] 111 | }) 112 | 113 | # Save and display results 114 | results.to_csv("Resource_Allocation_Results.csv", index=False) 115 | summary.to_csv("Resource_Allocation_Summary.csv", index=False) 116 | 117 | print("Detailed Results:") 118 | print(results) 119 | print("\nSummary:") 120 | print(summary) 121 | 122 | # Run the pipeline 123 | main_pipeline() 124 | -------------------------------------------------------------------------------- /Problems/Whale Optimization Algorithm Hub Location Allocation.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import random 4 | from scipy.spatial import distance 5 | 6 | # Whale Optimization Algorithm Parameters 7 | POPULATION_SIZE = 50 8 | MAX_GENERATIONS = 100 9 | NUM_LOCATIONS = 10 10 | NUM_HUBS = 3 11 | MAX_COORDINATE = 100 12 | 13 | def create_random_locations(num_locations, max_coordinate): 14 | return np.random.randint(0, max_coordinate, size=(num_locations, 2)) 15 | 16 | def calculate_cost(locations, hubs, allocation): 17 | total_cost = 0 18 | for i, hub in enumerate(hubs): 19 | allocated_points = np.where(allocation == i)[0] 20 | for point in allocated_points: 21 | total_cost += distance.euclidean(locations[point], hub) 22 | return total_cost 23 | 24 | def initialize_population(locations, num_hubs, size): 25 | population = [] 26 | for _ in range(size): 27 | hubs = locations[np.random.choice(len(locations), num_hubs, replace=False)] 28 | allocation = np.random.randint(0, num_hubs, size=len(locations)) 29 | population.append((hubs, allocation)) 30 | return population 31 | 32 | def update_position_whale(hubs, leader_hubs, a): 33 | new_hubs = [] 34 | for hub, leader_hub in zip(hubs, leader_hubs): 35 | r = np.random.rand() 36 | A = 2 * a * r - a 37 | C = 2 * r 38 | D = abs(C * leader_hub - hub) 39 | new_hub = leader_hub - A * D 40 | new_hubs.append(new_hub) 41 | return np.array(new_hubs) 42 | 43 | def whale_optimization(locations, num_hubs): 44 | population = initialize_population(locations, num_hubs, POPULATION_SIZE) 45 | best_solution = None 46 | best_cost = float('inf') 47 | costs_over_time = [] 48 | 49 | for generation in range(MAX_GENERATIONS): 50 | a = 2 - generation * (2 / MAX_GENERATIONS) 51 | 52 | for i in range(POPULATION_SIZE): 53 | hubs, allocation = population[i] 54 | cost = calculate_cost(locations, hubs, allocation) 55 | if cost < best_cost: 56 | best_solution = (hubs, allocation) 57 | best_cost = cost 58 | 59 | costs_over_time.append(best_cost) 60 | 61 | leader_hubs, _ = best_solution 62 | 63 | for i in range(POPULATION_SIZE): 64 | hubs, allocation = population[i] 65 | new_hubs = update_position_whale(hubs, leader_hubs, a) 66 | new_allocation = np.random.randint(0, num_hubs, size=len(locations)) 67 | population[i] = (new_hubs, new_allocation) 68 | 69 | print(f"Generation {generation + 1}, Best Cost: {best_cost}") 70 | 71 | return best_solution, best_cost, costs_over_time 72 | 73 | def plot_results(locations, hubs, allocation, title): 74 | plt.figure(figsize=(8, 6)) 75 | colors = ['red', 'green', 'blue', 'purple', 'orange'] # Add more colors if needed 76 | 77 | for i, hub in enumerate(hubs): 78 | allocated_points = np.where(allocation == i)[0] 79 | plt.scatter(locations[allocated_points, 0], locations[allocated_points, 1], color=colors[i % len(colors)], label=f"Hub {i + 1} Allocated Points") 80 | plt.scatter(hub[0], hub[1], color=colors[i % len(colors)], marker="o", s=200, edgecolor="black", label=f"Hub {i + 1}") 81 | 82 | # Draw lines from hub to allocated points 83 | for point in allocated_points: 84 | plt.plot([hub[0], locations[point][0]], [hub[1], locations[point][1]], color=colors[i % len(colors)], linestyle='--', linewidth=1) 85 | 86 | plt.title(title) 87 | plt.xlabel("X Coordinate") 88 | plt.ylabel("Y Coordinate") 89 | plt.legend() 90 | plt.grid() 91 | plt.show() 92 | 93 | def main(): 94 | locations = create_random_locations(NUM_LOCATIONS, MAX_COORDINATE) 95 | best_solution, best_cost, costs_over_time = whale_optimization(locations, NUM_HUBS) 96 | 97 | hubs, allocation = best_solution 98 | 99 | print("\nFinal Best Cost:", best_cost) 100 | print("Best Hub Locations:", hubs) 101 | 102 | plot_results(locations, hubs, allocation, "Final Hub Location Allocation") 103 | 104 | # Plot optimization progress 105 | plt.figure(figsize=(10, 6)) 106 | plt.plot(costs_over_time, marker='o') 107 | plt.title("WOA Optimization of Hub Location Allocation") 108 | plt.xlabel("Generation") 109 | plt.ylabel("Best Cost") 110 | plt.grid() 111 | plt.show() 112 | 113 | if __name__ == "__main__": 114 | main() -------------------------------------------------------------------------------- /Problems/f.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SeyedMuhammadHosseinMousavi/Optimization-Algorithms-and-Problems/e39c3beeff81574071eec6bcc81a30ff694c8c5c/Problems/f.jpg -------------------------------------------------------------------------------- /Problems/tst.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SeyedMuhammadHosseinMousavi/Optimization-Algorithms-and-Problems/e39c3beeff81574071eec6bcc81a30ff694c8c5c/Problems/tst.jpg -------------------------------------------------------------------------------- /Simulated Annealing Quadratic Assignment Problem.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import random 4 | import pandas as pd 5 | 6 | # Function to generate a random QAP problem 7 | def generate_qap_problem(size): 8 | flow_matrix = np.random.randint(1, 100, size=(size, size)) 9 | distance_matrix = np.random.randint(1, 100, size=(size, size)) 10 | return flow_matrix, distance_matrix 11 | 12 | # Objective function for QAP 13 | def calculate_cost(permutation, flow_matrix, distance_matrix): 14 | size = len(permutation) 15 | cost = 0 16 | for i in range(size): 17 | for j in range(size): 18 | cost += flow_matrix[i][j] * distance_matrix[permutation[i]][permutation[j]] 19 | return cost 20 | 21 | # Simulated Annealing for QAP 22 | def simulated_annealing_qap(flow_matrix, distance_matrix, initial_temp=1000, cooling_rate=0.95, max_iterations=1000): 23 | size = len(flow_matrix) 24 | current_permutation = list(range(size)) 25 | random.shuffle(current_permutation) 26 | current_cost = calculate_cost(current_permutation, flow_matrix, distance_matrix) 27 | 28 | best_permutation = current_permutation.copy() 29 | best_cost = current_cost 30 | 31 | costs_over_iterations = [current_cost] 32 | 33 | temperature = initial_temp 34 | 35 | for iteration in range(max_iterations): 36 | i, j = random.sample(range(size), 2) 37 | new_permutation = current_permutation.copy() 38 | new_permutation[i], new_permutation[j] = new_permutation[j], new_permutation[i] 39 | 40 | new_cost = calculate_cost(new_permutation, flow_matrix, distance_matrix) 41 | 42 | delta = new_cost - current_cost 43 | if delta < 0 or random.uniform(0, 1) < np.exp(-delta / temperature): 44 | current_permutation = new_permutation 45 | current_cost = new_cost 46 | 47 | if current_cost < best_cost: 48 | best_permutation = current_permutation 49 | best_cost = current_cost 50 | 51 | costs_over_iterations.append(current_cost) 52 | temperature *= cooling_rate 53 | 54 | return best_permutation, best_cost, costs_over_iterations 55 | 56 | # Main execution for a single run 57 | size = 10 # Problem size 58 | flow_matrix, distance_matrix = generate_qap_problem(size) 59 | best_perm, best_cost, costs = simulated_annealing_qap(flow_matrix, distance_matrix) 60 | 61 | # Plotting iteration cost progress 62 | plt.figure(figsize=(10, 6)) 63 | plt.plot(costs, marker="o", linestyle="--") 64 | plt.title("Cost over Iterations - Simulated Annealing") 65 | plt.xlabel("Iteration") 66 | plt.ylabel("Cost") 67 | plt.grid() 68 | plt.show() 69 | 70 | # Displaying the QAP flow matrix 71 | plt.figure(figsize=(8, 6)) 72 | plt.imshow(flow_matrix, cmap="Blues", interpolation="nearest") 73 | plt.colorbar() 74 | plt.title("Flow Matrix") 75 | plt.show() 76 | 77 | # Displaying results 78 | results = pd.DataFrame({ 79 | "Best Permutation": [best_perm], 80 | "Best Cost": [best_cost], 81 | "Iterations": [len(costs)], 82 | }) 83 | print("Simulated Annealing QAP Results:") 84 | print(results) 85 | 86 | # Save results to a CSV file 87 | results.to_csv("Simulated_Annealing_QAP_Results.csv", index=False) 88 | print("Results saved to 'Simulated_Annealing_QAP_Results.csv'") 89 | -------------------------------------------------------------------------------- /Stochastic Gradient Descent Resource Allocation.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import pandas as pd 4 | 5 | # Function to generate a random but feasible resource allocation problem 6 | def generate_feasible_resource_allocation_problem(num_resources, num_tasks): 7 | task_demands = np.random.randint(20, 60, size=num_tasks) # Larger demands 8 | total_demand = np.sum(task_demands) 9 | resource_capacities = np.random.randint(50, 100, size=num_resources) # Generate capacities 10 | resource_capacities *= (total_demand // np.sum(resource_capacities) + 1) # Scale capacities to ensure feasibility 11 | cost_matrix = np.random.randint(10, 50, size=(num_tasks, num_resources)) # Higher cost range 12 | return resource_capacities, task_demands, cost_matrix 13 | 14 | # Objective function: calculate total cost of allocation 15 | def calculate_total_cost(allocation, cost_matrix): 16 | total_cost = 0 17 | for task, resource in enumerate(allocation): 18 | total_cost += cost_matrix[task, resource] 19 | return total_cost 20 | 21 | # Check feasibility of an allocation 22 | def is_feasible(allocation, task_demands, resource_capacities): 23 | allocated_resources = np.zeros(len(resource_capacities)) 24 | for task, resource in enumerate(allocation): 25 | allocated_resources[resource] += task_demands[task] 26 | return np.all(allocated_resources <= resource_capacities) 27 | 28 | # Stochastic Gradient Descent (SGD) for Resource Allocation 29 | def stochastic_gradient_descent(task_demands, resource_capacities, cost_matrix, learning_rate=0.1, max_iterations=200): 30 | num_tasks, num_resources = cost_matrix.shape 31 | 32 | # Initialize allocation as random (one resource per task) 33 | allocation = np.random.randint(0, num_resources, size=num_tasks) 34 | 35 | def calculate_gradient(allocation): 36 | # Calculate gradient of the cost function (approximation) 37 | gradient = np.zeros_like(allocation, dtype=float) 38 | for task in range(num_tasks): 39 | current_resource = allocation[task] 40 | costs = cost_matrix[task, :] 41 | gradient[task] = costs[current_resource] - np.min(costs) 42 | return gradient 43 | 44 | def project_to_feasible(allocation): 45 | # Project allocation to feasible space (ensure capacity constraints) 46 | allocated_resources = np.zeros(num_resources) 47 | for task, resource in enumerate(allocation): 48 | allocated_resources[resource] += task_demands[task] 49 | 50 | for task, resource in enumerate(allocation): 51 | if allocated_resources[resource] > resource_capacities[resource]: 52 | # Reallocate task to a feasible resource 53 | feasible_resources = [r for r in range(num_resources) if allocated_resources[r] + task_demands[task] <= resource_capacities[r]] 54 | if feasible_resources: 55 | new_resource = np.random.choice(feasible_resources) 56 | allocated_resources[resource] -= task_demands[task] 57 | allocated_resources[new_resource] += task_demands[task] 58 | allocation[task] = new_resource 59 | return allocation 60 | 61 | cost_progress = [] 62 | for iteration in range(max_iterations): 63 | # Calculate cost and gradient 64 | current_cost = calculate_total_cost(allocation, cost_matrix) 65 | gradient = calculate_gradient(allocation) 66 | 67 | # Update allocation using SGD step 68 | allocation = allocation - learning_rate * gradient 69 | allocation = np.round(np.clip(allocation, 0, num_resources - 1)).astype(int) 70 | 71 | # Project back to feasible space 72 | allocation = project_to_feasible(allocation) 73 | 74 | # Track progress 75 | current_cost = calculate_total_cost(allocation, cost_matrix) 76 | cost_progress.append(current_cost) 77 | 78 | # Return the best solution and cost progress 79 | return allocation, current_cost, cost_progress 80 | 81 | # Main pipeline execution 82 | def main_pipeline(num_resources=6, num_tasks=10, learning_rate=0.03, max_iterations=100): 83 | # Generate a new random problem 84 | resource_capacities, task_demands, cost_matrix = generate_feasible_resource_allocation_problem(num_resources, num_tasks) 85 | 86 | # Solve using SGD 87 | best_allocation, best_cost, cost_progress = stochastic_gradient_descent( 88 | task_demands, resource_capacities, cost_matrix, learning_rate, max_iterations 89 | ) 90 | 91 | # Plotting fitness progress 92 | plt.figure(figsize=(10, 6)) 93 | plt.plot(cost_progress, marker="o", linestyle="--") 94 | plt.title("Fitness over Iterations - Stochastic Gradient Descent") 95 | plt.xlabel("Iteration") 96 | plt.ylabel("Fitness (Total Cost)") 97 | plt.grid() 98 | plt.show() 99 | 100 | # Displaying problem and results 101 | results = pd.DataFrame({ 102 | "Task": np.arange(1, num_tasks + 1), 103 | "Allocated Resource": best_allocation, 104 | "Task Demand": task_demands, 105 | "Resource Capacity": [resource_capacities[res] for res in best_allocation], 106 | "Cost": [cost_matrix[task, best_allocation[task]] for task in range(num_tasks)], 107 | }) 108 | summary = pd.DataFrame({ 109 | "Total Cost": [best_cost], 110 | "Feasible": [is_feasible(best_allocation, task_demands, resource_capacities)] 111 | }) 112 | 113 | # Save and display results 114 | results.to_csv("Resource_Allocation_Results.csv", index=False) 115 | summary.to_csv("Resource_Allocation_Summary.csv", index=False) 116 | 117 | print("Detailed Results:") 118 | print(results) 119 | print("\nSummary:") 120 | print(summary) 121 | 122 | # Run the pipeline 123 | main_pipeline() 124 | -------------------------------------------------------------------------------- /Whale Optimization Algorithm Hub Location Allocation.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import random 4 | from scipy.spatial import distance 5 | 6 | # Whale Optimization Algorithm Parameters 7 | POPULATION_SIZE = 50 8 | MAX_GENERATIONS = 100 9 | NUM_LOCATIONS = 10 10 | NUM_HUBS = 3 11 | MAX_COORDINATE = 100 12 | 13 | def create_random_locations(num_locations, max_coordinate): 14 | return np.random.randint(0, max_coordinate, size=(num_locations, 2)) 15 | 16 | def calculate_cost(locations, hubs, allocation): 17 | total_cost = 0 18 | for i, hub in enumerate(hubs): 19 | allocated_points = np.where(allocation == i)[0] 20 | for point in allocated_points: 21 | total_cost += distance.euclidean(locations[point], hub) 22 | return total_cost 23 | 24 | def initialize_population(locations, num_hubs, size): 25 | population = [] 26 | for _ in range(size): 27 | hubs = locations[np.random.choice(len(locations), num_hubs, replace=False)] 28 | allocation = np.random.randint(0, num_hubs, size=len(locations)) 29 | population.append((hubs, allocation)) 30 | return population 31 | 32 | def update_position_whale(hubs, leader_hubs, a): 33 | new_hubs = [] 34 | for hub, leader_hub in zip(hubs, leader_hubs): 35 | r = np.random.rand() 36 | A = 2 * a * r - a 37 | C = 2 * r 38 | D = abs(C * leader_hub - hub) 39 | new_hub = leader_hub - A * D 40 | new_hubs.append(new_hub) 41 | return np.array(new_hubs) 42 | 43 | def whale_optimization(locations, num_hubs): 44 | population = initialize_population(locations, num_hubs, POPULATION_SIZE) 45 | best_solution = None 46 | best_cost = float('inf') 47 | costs_over_time = [] 48 | 49 | for generation in range(MAX_GENERATIONS): 50 | a = 2 - generation * (2 / MAX_GENERATIONS) 51 | 52 | for i in range(POPULATION_SIZE): 53 | hubs, allocation = population[i] 54 | cost = calculate_cost(locations, hubs, allocation) 55 | if cost < best_cost: 56 | best_solution = (hubs, allocation) 57 | best_cost = cost 58 | 59 | costs_over_time.append(best_cost) 60 | 61 | leader_hubs, _ = best_solution 62 | 63 | for i in range(POPULATION_SIZE): 64 | hubs, allocation = population[i] 65 | new_hubs = update_position_whale(hubs, leader_hubs, a) 66 | new_allocation = np.random.randint(0, num_hubs, size=len(locations)) 67 | population[i] = (new_hubs, new_allocation) 68 | 69 | print(f"Generation {generation + 1}, Best Cost: {best_cost}") 70 | 71 | return best_solution, best_cost, costs_over_time 72 | 73 | def plot_results(locations, hubs, allocation, title): 74 | plt.figure(figsize=(8, 6)) 75 | colors = ['red', 'green', 'blue', 'purple', 'orange'] # Add more colors if needed 76 | 77 | for i, hub in enumerate(hubs): 78 | allocated_points = np.where(allocation == i)[0] 79 | plt.scatter(locations[allocated_points, 0], locations[allocated_points, 1], color=colors[i % len(colors)], label=f"Hub {i + 1} Allocated Points") 80 | plt.scatter(hub[0], hub[1], color=colors[i % len(colors)], marker="o", s=200, edgecolor="black", label=f"Hub {i + 1}") 81 | 82 | # Draw lines from hub to allocated points 83 | for point in allocated_points: 84 | plt.plot([hub[0], locations[point][0]], [hub[1], locations[point][1]], color=colors[i % len(colors)], linestyle='--', linewidth=1) 85 | 86 | plt.title(title) 87 | plt.xlabel("X Coordinate") 88 | plt.ylabel("Y Coordinate") 89 | plt.legend() 90 | plt.grid() 91 | plt.show() 92 | 93 | def main(): 94 | locations = create_random_locations(NUM_LOCATIONS, MAX_COORDINATE) 95 | best_solution, best_cost, costs_over_time = whale_optimization(locations, NUM_HUBS) 96 | 97 | hubs, allocation = best_solution 98 | 99 | print("\nFinal Best Cost:", best_cost) 100 | print("Best Hub Locations:", hubs) 101 | 102 | plot_results(locations, hubs, allocation, "Final Hub Location Allocation") 103 | 104 | # Plot optimization progress 105 | plt.figure(figsize=(10, 6)) 106 | plt.plot(costs_over_time, marker='o') 107 | plt.title("WOA Optimization of Hub Location Allocation") 108 | plt.xlabel("Generation") 109 | plt.ylabel("Best Cost") 110 | plt.grid() 111 | plt.show() 112 | 113 | if __name__ == "__main__": 114 | main() -------------------------------------------------------------------------------- /f.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SeyedMuhammadHosseinMousavi/Optimization-Algorithms-and-Problems/e39c3beeff81574071eec6bcc81a30ff694c8c5c/f.jpg -------------------------------------------------------------------------------- /tst.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SeyedMuhammadHosseinMousavi/Optimization-Algorithms-and-Problems/e39c3beeff81574071eec6bcc81a30ff694c8c5c/tst.jpg --------------------------------------------------------------------------------