├── LICENSE ├── README.md └── parPSO.py /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Varsha Verma 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # PSO-parallel-mpi4py 2 | This repository contains the code for parallelization of Particle Swarm Optimization Algorithm on multiple nodes using MPI (via mpi4py python package) using a master-slave model of implementation. 3 | 4 | The objective function that has been minimized by the Particle Swarm Optimization Algorithm is the Alpine 1 Function which has been made artificially expensive by introducing a pause of 0.1 seconds. 5 | 6 | ## Setup 7 | There are two types of nodes in master-slave model of parallelization. There is only one master node in the system and the rest of the other processes are the slave nodes. Therefore, there are two kinds of instructions, one running on the master node and the other running on the slave nodes. 8 | When rank=0, instructions for master node are executed and nodes with all the other ranks have slave instructions executed as can be seen in the code. Note that the code file on all the nodes should be saved at the same location with the same name. For instance, I saved it as `parPSO.py` in the Documents directory on all the nodes. 9 | 10 | ## Running Instructions 11 | 12 | Execute the following command on the terminal of the master node: 13 | 14 | `mpiexec -f machinefile -n 3 python Documents/parPSO.py` 15 | 16 | The above command is for running the parallelized PSO on 3 nodes (1 master, 2 slaves). -------------------------------------------------------------------------------- /parPSO.py: -------------------------------------------------------------------------------- 1 | import random 2 | from mpi4py import MPI 3 | from collections import deque 4 | import time 5 | import math 6 | 7 | # ARTIFICIALLY EXPENSIVE ALPINE OBJECTIVE FUNCTION 8 | def costFunction(x): 9 | a = 0 10 | time.sleep(0.01) # to make this artificially expensive 11 | for i in x: 12 | a+= abs((math.sin(i)*i) + (0.1*i)) 13 | return a 14 | 15 | class Particle: 16 | def __init__(self, x0): 17 | self.position=[] 18 | self.velocity=[] 19 | self.best_pos_in=[] 20 | self.best_cost_in=float('inf') 21 | self.cost=float('inf') 22 | 23 | for i in range(0, num_dimensions): 24 | self.velocity.append(random.uniform(-1, 1)) 25 | self.position.append(random.uniform(bounds[0], bounds[1])) 26 | 27 | 28 | 29 | def update_velocity(self, best_pos_g): 30 | w=0.5 31 | c1=2 32 | c2=2 33 | 34 | for i in range(0, num_dimensions): 35 | r1=random.random() 36 | r2=random.random() 37 | 38 | vel_cognitive=c1*r1*(self.best_pos_in[i]-self.position[i]) 39 | vel_social= c2*r2*(best_pos_g[i]-self.position[i]) 40 | self.velocity[i]= w*self.velocity[i]+vel_social+vel_cognitive 41 | 42 | 43 | def update_position(self, bounds): 44 | for i in range(0, num_dimensions): 45 | self.position[i]+=self.velocity[i] 46 | 47 | if self.position[i]bounds[1]: 51 | self.position[i]=bounds[1] 52 | 53 | 54 | class PSO(): 55 | def __init__(self, num_d, bounds, num_particles, num_iter): 56 | global num_dimensions 57 | num_dimensions=num_d 58 | 59 | best_cost_g=float('inf') 60 | best_pos_g=[] 61 | 62 | swarm=[] 63 | for i in range(0, num_particles): 64 | swarm.append(Particle(bounds)) 65 | 66 | for i in range(num_iter): 67 | 68 | evalQueue = deque(range(num_particles)) 69 | 70 | 71 | # POP AND SEND PARTICLES TO EACH SLAVE NODE 72 | for i in range(1, size): 73 | p = evalQueue.popleft() 74 | obj_comm = (p, swarm[p].position) 75 | comm.send(obj_comm, dest=i) 76 | 77 | idle=0 78 | # FURTHER LOOPING 79 | while(1): 80 | obj_recv = comm.recv(source = MPI.ANY_SOURCE, status=status) 81 | id_recv = obj_recv[0] 82 | f_recv = obj_recv[1] 83 | src_rank = status.Get_source() 84 | 85 | swarm[id_recv].cost = f_recv 86 | if f_recv < swarm[id_recv].best_cost_in: 87 | swarm[id_recv].best_pos_in = list(swarm[id_recv].position) 88 | swarm[id_recv].best_cost_in = float(f_recv) 89 | 90 | if f_recv < best_cost_g : 91 | best_cost_g = float(f_recv) 92 | best_pos_g = list(swarm[id_recv].position) 93 | 94 | if len(evalQueue)!=0: 95 | j= evalQueue.popleft() 96 | obj_comm = (j, swarm[j].position) 97 | comm.send(obj_comm, dest = src_rank) 98 | else: 99 | idle+=1 100 | 101 | if idle==size-1: 102 | break 103 | 104 | 105 | for j in range(0, num_particles): 106 | swarm[j].update_velocity(best_pos_g) 107 | swarm[j].update_position(bounds) 108 | 109 | 110 | for k in range(1,size): 111 | comm.send(0, dest=k, tag=200) 112 | print 'Best position : ' 113 | print best_pos_g 114 | print 'Best cost : ' 115 | print best_cost_g 116 | 117 | 118 | comm = MPI.COMM_WORLD 119 | rank = comm.Get_rank() 120 | size = comm.Get_size() 121 | status = MPI.Status() 122 | 123 | if rank==0: 124 | start_time = time.time() 125 | num_d=20 126 | bounds=(-10,10) 127 | PSO(num_d, bounds, num_particles=15, num_iter=400) 128 | print("time taken:") 129 | print(time.time()-start_time) 130 | else: 131 | while(1): 132 | obj_recv = comm.recv(source=0, tag=MPI.ANY_TAG, status=status) 133 | tag = status.Get_tag() 134 | if tag == 200: 135 | break 136 | 137 | f = costFunction(obj_recv[1]) 138 | obj_sent = (obj_recv[0], f) 139 | comm.send(obj_sent, dest=0) --------------------------------------------------------------------------------