├── .gitignore
├── max_reward_large.png
├── max_reward_small.png
├── max_time_large.png
├── max_time_medium.png
├── max_time_small.png
├── mean_time_large.png
├── mean_time_medium.png
├── mean_time_small.png
├── max_reward_medium.png
├── mean_reward_large.png
├── mean_reward_medium.png
├── mean_reward_small.png
├── task_pic
├── large
│ └── ACO-1-1.png
├── medium
│ └── ACO-1-1.png
└── small
│ └── ACO-1-1.png
├── readme.md
├── aco.py
├── small_size_result.csv
├── large_size_result.csv
├── medium_size_result.csv
├── ga.py
├── evaluate.py
└── pso.py
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__
--------------------------------------------------------------------------------
/max_reward_large.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/robin-shaun/Multi-UAV-Task-Assignment-Benchmark/HEAD/max_reward_large.png
--------------------------------------------------------------------------------
/max_reward_small.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/robin-shaun/Multi-UAV-Task-Assignment-Benchmark/HEAD/max_reward_small.png
--------------------------------------------------------------------------------
/max_time_large.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/robin-shaun/Multi-UAV-Task-Assignment-Benchmark/HEAD/max_time_large.png
--------------------------------------------------------------------------------
/max_time_medium.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/robin-shaun/Multi-UAV-Task-Assignment-Benchmark/HEAD/max_time_medium.png
--------------------------------------------------------------------------------
/max_time_small.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/robin-shaun/Multi-UAV-Task-Assignment-Benchmark/HEAD/max_time_small.png
--------------------------------------------------------------------------------
/mean_time_large.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/robin-shaun/Multi-UAV-Task-Assignment-Benchmark/HEAD/mean_time_large.png
--------------------------------------------------------------------------------
/mean_time_medium.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/robin-shaun/Multi-UAV-Task-Assignment-Benchmark/HEAD/mean_time_medium.png
--------------------------------------------------------------------------------
/mean_time_small.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/robin-shaun/Multi-UAV-Task-Assignment-Benchmark/HEAD/mean_time_small.png
--------------------------------------------------------------------------------
/max_reward_medium.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/robin-shaun/Multi-UAV-Task-Assignment-Benchmark/HEAD/max_reward_medium.png
--------------------------------------------------------------------------------
/mean_reward_large.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/robin-shaun/Multi-UAV-Task-Assignment-Benchmark/HEAD/mean_reward_large.png
--------------------------------------------------------------------------------
/mean_reward_medium.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/robin-shaun/Multi-UAV-Task-Assignment-Benchmark/HEAD/mean_reward_medium.png
--------------------------------------------------------------------------------
/mean_reward_small.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/robin-shaun/Multi-UAV-Task-Assignment-Benchmark/HEAD/mean_reward_small.png
--------------------------------------------------------------------------------
/task_pic/large/ACO-1-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/robin-shaun/Multi-UAV-Task-Assignment-Benchmark/HEAD/task_pic/large/ACO-1-1.png
--------------------------------------------------------------------------------
/task_pic/medium/ACO-1-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/robin-shaun/Multi-UAV-Task-Assignment-Benchmark/HEAD/task_pic/medium/ACO-1-1.png
--------------------------------------------------------------------------------
/task_pic/small/ACO-1-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/robin-shaun/Multi-UAV-Task-Assignment-Benchmark/HEAD/task_pic/small/ACO-1-1.png
--------------------------------------------------------------------------------
/readme.md:
--------------------------------------------------------------------------------
1 | # Multi-UAV Task Assignment Benchmark
2 | ## 多无人机任务分配算法测试基准
3 |
4 | ## Introduction
5 | A benchmark for multi-UAV task assignment is presented in order to evaluate different algorithms. An extended Team Orienteering Problem is modeled for a kind of multi-UAV task assignment problem. Three intelligent algorithms, i.e., Genetic Algorithm, Ant Colony Optimization and Particle Swarm Optimization are implemented to solve the problem. A series of experiments with different settings are conducted to evaluate three algorithms. The modeled problem and the evaluation results constitute a benchmark, which can be used to evaluate other algorithms used for multi-UAV task assignment problems.
6 |
7 | Notice that three algorithms run at three CPU cores respectively, which means that there is no parallel optimization in this benchmark.
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 | Please refer to the paper to see more detail.
16 |
17 | K. Xiao, J. Lu, Y. Nie, L. Ma, X. Wang and G. Wang, "A Benchmark for Multi-UAV Task Assignment of an Extended Team Orienteering Problem," 2022 China Automation Congress (CAC), Xiamen, China, 2022, pp. 6966-6970, doi: 10.1109/CAC57257.2022.10054991.
18 |
19 | ArXiv preprint **[ arXiv:2003.09700](https://arxiv.org/abs/2009.00363)**
20 |
21 |
22 | ## Usage
23 |
24 | ### 1. Algorithm input and output
25 |
26 | Algorithm input includes vehicle number (scalar), speeds of vehicles ($n\times1$ array), target number (scalar $n$), targets ($(n+1)\times4$ array, the first line is depot, the first column is x position, the second column is y position, the third column is reward and the forth column is time consumption to finish the mission), time limit (scalar). The code below is the initialization of the class GA in `ga.py`.
27 |
28 | ```python
29 | def __init__(self, vehicle_num, vehicles_speed, target_num, targets, time_lim)
30 | ```
31 |
32 | There should be a function called `run()` in the algorithm class, and the function should return task assignment plan(array, e.g. [[28, 19, 11], [25, 22, 7, 16, 17, 23], [21, 26, 12, 9, 6, 3], [5, 15, 1], [18, 20, 29]], each subset is a vehicle path) and computational time usage (scalar).
33 |
34 | ### 2. Evaluate
35 |
36 | You can replace one algorithm below with another algorithm in `evaluate.py`, and then `python evaluate.py`. If you don't want to evaluate three algorithm together, you should modify the code properly( this is easy).
37 |
38 | ```python
39 | ga = GA(vehicle_num,env.vehicles_speed,target_num,env.targets,env.time_lim)
40 | aco = ACO(vehicle_num,target_num,env.vehicles_speed,env.targets,env.time_lim)
41 | pso = PSO(vehicle_num,target_num ,env.targets,env.vehicles_speed,env.time_lim)
42 | ga_result=p.apply_async(ga.run)
43 | aco_result=p.apply_async(aco.run)
44 | pso_result=p.apply_async(pso.run)
45 | p.close()
46 | p.join()
47 | ga_task_assignmet = ga_result.get()[0]
48 | env.run(ga_task_assignmet,'GA',i+1,j+1)
49 | re_ga[i].append((env.total_reward,ga_result.get()[1]))
50 | env.reset()
51 | aco_task_assignmet = aco_result.get()[0]
52 | env.run(aco_task_assignmet,'ACO',i+1,j+1)
53 | re_aco[i].append((env.total_reward,aco_result.get()[1]))
54 | env.reset()
55 | pso_task_assignmet = pso_result.get()[0]
56 | env.run(pso_task_assignmet,'PSO',i+1,j+1)
57 | re_pso[i].append((env.total_reward,pso_result.get()[1]))
58 | ```
59 |
60 | ### 3. About reinforcement learning
61 |
62 | In `Env()` in `evaluate.py`, function `step` is used for reinforcement learning. Because this is still being developed, we cannot supply a demo. If your algorithm is reinforcement learning, you can try to train it with `Env()`. Your pull request and issue are welcome.
63 |
64 | ## Enhancement
65 |
66 | This [repository](https://github.com/dietmarwo/Multi-UAV-Task-Assignment-Benchmark) does great enhancement and you can use it for high performance. Thanks to [dietmarwo](https://github.com/dietmarwo) for the nice work.
67 |
68 | 1) GA uses [numba](https://numba.pydata.org/) for a dramatic speedup. Parameters are adapted so that the
69 | execution time remains the same: popsize 50 -> 300, iterations 500 -> 6000
70 | For this reason GA now performs much better compared to the original version.
71 |
72 | 2) Experiments are configured so that wall time for small size is balanced. This means:
73 | increased effort for GA, decreased effort for ACO. For medium / large
74 | problem size you see which algorithms scale badly: Increase execution time superlinear
75 | in relation to the problem size. Avoid these for large problems.
76 |
77 | 3) Adds a standard continuous optimization algorithm: [BiteOpt](https://github.com/avaneev/biteopt)
78 | from Aleksey Vaneev - using the same fitness function as GA.py.
79 | BiteOpt is the only algorithm included which works well with a large problem size.
80 | It is by far the simplest implementation, only the fitness function needs
81 | to be coded, since we can apply a continuous optimization library
82 | [fcmaes](https://github.com/dietmarwo/fast-cma-es). Execute "pip install fcmaes" to use it.
83 |
84 | 4) Uses NestablePool to enable BiteOpt multiprocessing: Many BiteOpt optimization runs
85 | are performed in parallel and the best result is returned. Set workers=1
86 | if you want to test BiteOpt single threaded.
87 |
88 | 5) All results are created using an AMD 5950x 16 core processor
89 | utilizing all cores: 29 parallel BiteOpt threads, the other 3 algorithms remain single threaded.
90 |
91 | 6) Added test_bite.py where you can monitor the progress of BiteOpt applied to the problem.
92 |
93 | 7) Added test_mode.py where you can monitor the progress of fcmaes-MODE applied to the problem and compare it
94 | to BiteOpt for the same instance. fcmaes-MODE is a multi-objective optimizer applied to a
95 | multi-objective variant of the problem.
96 | Objectives are: reward (to be maximized), maximal time (to be minimized), energy (to be minimized).
97 | The maximal time constraint from the single objective case is still valid.
98 | Energy consumption is approximated by `sum(dt*v*v)`
99 |
100 |
101 |
102 |
103 |
--------------------------------------------------------------------------------
/aco.py:
--------------------------------------------------------------------------------
1 | import random
2 | import numpy as np
3 | import math
4 | import time
5 | import os
6 |
7 | class ACO():
8 | def __init__(self, vehicle_num, target_num,vehicle_speed, target, time_lim):
9 | self.num_type_ant = vehicle_num
10 | self.num_city = target_num+1 #number of cities
11 | self.group = 200
12 | self.num_ant = self.group*self.num_type_ant #number of ants
13 | self.ant_vel = vehicle_speed
14 | self.cut_time = time_lim
15 | self.oneee = np.zeros((4,1))
16 | self.target = target
17 | self.alpha = 1 #pheromone
18 | self.beta = 2
19 | self.k1 = 0.03
20 | self.iter_max = 150
21 | #matrix of the distances between cities
22 | def distance_matrix(self):
23 | dis_mat = []
24 | for i in range(self.num_city):
25 | dis_mat_each = []
26 | for j in range(self.num_city):
27 | dis = math.sqrt(pow(self.target[i][0]-self.target[j][0],2)+pow(self.target[i][1]-self.target[j][1],2))
28 | dis_mat_each.append(dis)
29 | dis_mat.append(dis_mat_each)
30 | return dis_mat
31 | def run(self):
32 | print("ACO start, pid: %s" % os.getpid())
33 | start_time = time.time()
34 | #distances of nodes
35 | dis_list = self.distance_matrix()
36 | dis_mat = np.array(dis_list)
37 | value_init = self.target[:,2].transpose()
38 | delay_init = self.target[:,3].transpose()
39 | pheromone_mat = np.ones((self.num_type_ant,self.num_city,self.num_city))
40 | #velocity of ants
41 | path_new = [[0]for i in range (self.num_type_ant)]
42 | count_iter = 0
43 | while count_iter < self.iter_max:
44 | path_sum = np.zeros((self.num_ant,1))
45 | time_sum = np.zeros((self.num_ant,1))
46 | value_sum = np.zeros((self.num_ant,1))
47 | path_mat=[[0]for i in range (self.num_ant)]
48 | value = np.zeros((self.group,1))
49 | atten = np.ones((self.num_type_ant,1)) * 0.2
50 | for ant in range(self.num_ant):
51 | ant_type = ant % self.num_type_ant
52 | visit = 0
53 | if ant_type == 0:
54 | unvisit_list=list(range(1,self.num_city))#have not visit
55 | for j in range(1,self.num_city):
56 | #choice of next city
57 | trans_list=[]
58 | tran_sum=0
59 | trans=0
60 | #if len(unvisit_list)==0:
61 | #print('len(unvisit_list)==0')
62 | for k in range(len(unvisit_list)): # to decide which node to visit
63 | trans +=np.power(pheromone_mat[ant_type][visit][unvisit_list[k]],self.alpha)*np.power(value_init[unvisit_list[k]]*self.ant_vel[ant_type]/(dis_mat[visit][unvisit_list[k]]*delay_init[unvisit_list[k]]),self.beta)
64 | #trans +=np.power(pheromone_mat[ant_type][unvisit_list[k]],self.alpha)*np.power(0.05*value_init[unvisit_list[k]],self.beta)
65 | trans_list.append(trans)
66 | tran_sum = trans
67 | rand = random.uniform(0,tran_sum)
68 | for t in range(len(trans_list)):
69 | if(rand <= trans_list[t]):
70 | visit_next = unvisit_list[t]
71 | break
72 | else:
73 | continue
74 | path_mat[ant].append(visit_next)
75 | path_sum[ant] += dis_mat[path_mat[ant][j-1]][path_mat[ant][j]]
76 | time_sum[ant] += path_sum[ant] / self.ant_vel[ant_type] + delay_init[visit_next]
77 | if time_sum[ant] > self.cut_time:
78 | time_sum[ant]-=path_sum[ant] / self.ant_vel[ant_type] + delay_init[visit_next]
79 | path_mat[ant].pop()
80 | break
81 | value_sum[ant] += value_init[visit_next]
82 | unvisit_list.remove(visit_next)#update
83 | visit = visit_next
84 | if (ant_type) == self.num_type_ant-1:
85 | small_group = int(ant/self.num_type_ant)
86 | for k in range (self.num_type_ant):
87 | value[small_group]+= value_sum[ant-k]
88 | #iteration
89 | if count_iter == 0:
90 | value_new = max(value)
91 | value = value.tolist()
92 | for k in range (0,self.num_type_ant):
93 | path_new[k] = path_mat[value.index(value_new)*self.num_type_ant+k]
94 | path_new[k].remove(0)
95 | else:
96 | if max(value) > value_new:
97 | value_new = max(value)
98 | value = value.tolist()
99 | for k in range (0,self.num_type_ant):
100 | path_new[k] = path_mat[value.index(value_new)*self.num_type_ant+k]
101 | path_new[k].remove(0)
102 |
103 | #update pheromone
104 | pheromone_change = np.zeros((self.num_type_ant,self.num_city,self.num_city))
105 | for i in range(self.num_ant):
106 | length = len(path_mat[i])
107 | m = i%self.num_type_ant
108 | n = int(i/self.num_type_ant)
109 | for j in range(length-1):
110 | pheromone_change[m][path_mat[i][j]][path_mat[i][j+1]]+= value_init[path_mat[i][j+1]]*self.ant_vel[m]/(dis_mat[path_mat[i][j]][path_mat[i][j+1]]*delay_init[path_mat[i][j+1]])
111 | atten[m] += (value_sum[i]/(np.power((value_new-value[n]),4)+1))/self.group
112 |
113 | for k in range (self.num_type_ant):
114 | pheromone_mat[k]=(1-atten[k])*pheromone_mat[k]+pheromone_change[k]
115 | count_iter += 1
116 |
117 | print("ACO result:", path_new)
118 | end_time = time.time()
119 | print("ACO time:", end_time - start_time)
120 | return path_new, end_time - start_time
121 |
122 |
--------------------------------------------------------------------------------
/small_size_result.csv:
--------------------------------------------------------------------------------
1 | ,aco_reward,aco_time,ga_reward,ga_time,pso_reward,pso_time
2 | 0,89,88.16137266159058,119,12.685494184494019,107,13.414397716522217
3 | 1,88,91.05341100692749,106,9.964252710342407,107,16.068947315216064
4 | 2,89,91.91174960136414,109,14.250640153884888,107,16.815466165542603
5 | 3,89,92.22553205490112,110,21.893151998519897,107,17.22126531600952
6 | 4,89,92.13908076286316,101,10.614777565002441,107,16.337905406951904
7 | 5,89,92.50885510444641,121,15.014703512191772,107,17.107726097106934
8 | 6,90,92.41064667701721,109,17.080315113067627,107,17.380303859710693
9 | 7,89,92.84331011772156,119,20.32515835762024,107,16.93877387046814
10 | 8,88,92.46119904518127,114,19.172377824783325,107,16.968574047088623
11 | 9,90,94.00208711624146,127,21.729254484176636,107,17.066615104675293
12 | 10,96,89.81863832473755,117,22.284271717071533,107,17.06852388381958
13 | 11,99,88.96507263183594,113,19.54985523223877,107,17.087736129760742
14 | 12,96,88.38604950904846,109,14.674241304397583,107,16.747975826263428
15 | 13,97,89.2066752910614,110,11.188013553619385,107,16.291066884994507
16 | 14,96,89.45146775245667,115,18.54390525817871,107,17.09095311164856
17 | 15,97,91.43690013885498,118,34.3544921875,107,17.089507579803467
18 | 16,101,87.81503558158875,116,16.678542375564575,107,17.032145738601685
19 | 17,95,87.75125932693481,116,13.018948793411255,107,16.583208084106445
20 | 18,97,87.48667025566101,103,11.756951570510864,107,16.502718687057495
21 | 19,96,89.22999024391174,117,23.240997076034546,107,17.057995319366455
22 | 20,96,90.63653469085693,113,20.940587043762207,102,17.042691230773926
23 | 21,91,90.38828539848328,104,17.667251586914062,102,17.154610633850098
24 | 22,87,90.0098135471344,98,17.66662073135376,102,16.96206521987915
25 | 23,87,89.37335777282715,97,12.019228219985962,102,16.48302674293518
26 | 24,85,90.64067506790161,114,23.917268753051758,102,17.08442449569702
27 | 25,90,91.06858992576599,108,20.25761079788208,102,17.10637593269348
28 | 26,87,89.10331749916077,103,10.527708768844604,102,16.272404670715332
29 | 27,86,90.21255588531494,111,17.25125241279602,102,17.043729782104492
30 | 28,88,90.96662783622742,111,24.268309354782104,102,17.005598783493042
31 | 29,88,90.03387355804443,109,13.798240184783936,102,16.55376434326172
32 | 30,67,85.81624722480774,75,19.326607704162598,72,17.03825044631958
33 | 31,67,85.53707933425903,80,22.211409330368042,72,17.13922429084778
34 | 32,67,85.78650307655334,79,19.998062133789062,72,17.11928629875183
35 | 33,67,84.82013392448425,81,11.410378456115723,72,16.370493173599243
36 | 34,67,86.44931316375732,79,28.120026111602783,72,17.011059761047363
37 | 35,67,84.43776869773865,78,14.097299098968506,72,16.78164315223694
38 | 36,67,84.38073658943176,75,10.875871896743774,72,16.290704488754272
39 | 37,67,85.27193593978882,77,18.085707426071167,72,17.11751627922058
40 | 38,67,86.35783362388611,77,20.6546049118042,72,17.204835653305054
41 | 39,67,84.17668747901917,70,9.783939123153687,72,16.234663009643555
42 | 40,90,81.11320877075195,109,25.354978561401367,94,17.01652693748474
43 | 41,92,81.4474401473999,115,25.54797601699829,94,17.27031111717224
44 | 42,90,82.45976829528809,105,21.565988302230835,94,17.12051248550415
45 | 43,86,81.89453530311584,116,25.38959002494812,94,17.148070096969604
46 | 44,90,80.1114649772644,108,12.730968475341797,94,16.551698684692383
47 | 45,88,80.46094584465027,112,16.600263595581055,94,17.025696277618408
48 | 46,92,80.10696768760681,108,10.806996822357178,94,16.190462350845337
49 | 47,94,82.78089380264282,106,23.92259168624878,94,17.245839834213257
50 | 48,87,82.29063534736633,110,17.997102975845337,94,17.09163522720337
51 | 49,94,79.32447719573975,106,10.254770755767822,94,16.159439086914062
52 | 50,63,81.13122916221619,85,19.498087406158447,80,17.062880754470825
53 | 51,64,78.95552730560303,73,11.968117952346802,80,16.42030930519104
54 | 52,64,80.55506777763367,83,19.04611301422119,80,17.044256925582886
55 | 53,64,80.00666880607605,79,11.748615503311157,80,16.27301836013794
56 | 54,66,80.77283143997192,88,11.66093134880066,80,16.391525506973267
57 | 55,64,82.00660061836243,88,24.136553049087524,80,17.06270742416382
58 | 56,66,81.55947065353394,85,16.429919242858887,80,16.999276161193848
59 | 57,63,81.114262342453,87,22.340787172317505,80,17.043132543563843
60 | 58,63,81.04138517379761,78,18.89263892173767,80,17.08504867553711
61 | 59,64,80.52739334106445,93,13.616387367248535,80,16.57269597053528
62 | 60,82,86.7540352344513,99,13.066121578216553,94,16.544466018676758
63 | 61,81,87.89562153816223,104,25.433122873306274,94,17.053762197494507
64 | 62,82,86.26548409461975,99,12.918514966964722,94,16.66313886642456
65 | 63,83,87.24347186088562,106,21.98454189300537,94,17.103214263916016
66 | 64,82,86.60797476768494,98,12.473426580429077,94,16.57561159133911
67 | 65,84,87.11871242523193,103,10.223820686340332,94,16.257148027420044
68 | 66,81,88.71003365516663,94,24.597357988357544,94,17.143748998641968
69 | 67,82,86.51209807395935,96,14.048967838287354,94,16.72418999671936
70 | 68,81,86.93004393577576,91,17.69091010093689,94,17.053284645080566
71 | 69,81,87.69031119346619,89,13.707175254821777,94,16.6680109500885
72 | 70,79,85.87792015075684,86,9.261616945266724,91,16.168455839157104
73 | 71,79,88.14101362228394,96,23.5689058303833,91,17.132070302963257
74 | 72,79,88.25272393226624,96,26.78891372680664,91,17.096915006637573
75 | 73,79,89.53712511062622,96,28.207839250564575,91,17.064720630645752
76 | 74,79,87.57802844047546,97,14.012932777404785,91,16.740302085876465
77 | 75,79,87.26567029953003,86,12.413164615631104,91,16.392486333847046
78 | 76,79,86.66206288337708,89,11.749331951141357,91,16.469265460968018
79 | 77,79,87.8359739780426,85,17.515364170074463,91,17.102705717086792
80 | 78,80,88.0450918674469,94,23.927947282791138,91,17.082552194595337
81 | 79,79,87.65122652053833,93,17.091141939163208,91,17.03953456878662
82 | 80,74,84.19559216499329,91,26.049842834472656,83,16.9984188079834
83 | 81,75,82.51725816726685,85,16.961402416229248,83,17.110384464263916
84 | 82,75,82.8316662311554,89,23.545618057250977,83,17.114362478256226
85 | 83,73,82.98265886306763,84,23.422379732131958,83,17.07512879371643
86 | 84,73,81.70284986495972,92,18.57567572593689,83,17.09586262702942
87 | 85,76,83.29867267608643,90,24.692944288253784,83,17.056284427642822
88 | 86,76,81.04190707206726,93,11.550926208496094,83,16.41378617286682
89 | 87,74,82.97272729873657,94,18.214133262634277,83,17.02610754966736
90 | 88,72,81.47475123405457,86,10.687882900238037,83,16.277140855789185
91 | 89,73,82.54533529281616,89,17.926280975341797,83,17.130127668380737
92 | 90,88,89.74022126197815,128,20.152071475982666,107,17.193649530410767
93 | 91,88,88.83557057380676,115,14.007786512374878,107,16.752809762954712
94 | 92,89,89.99075937271118,120,21.46599245071411,107,17.087929248809814
95 | 93,89,90.57273888587952,116,22.19508957862854,107,17.231003284454346
96 | 94,91,89.41200828552246,109,13.885975360870361,107,16.87487244606018
97 | 95,88,89.83750343322754,115,20.845364570617676,107,17.157454013824463
98 | 96,91,91.00219821929932,124,27.728176832199097,107,17.07404375076294
99 | 97,93,91.0686559677124,117,25.557191371917725,107,17.23515510559082
100 | 98,87,90.42128872871399,112,15.197402715682983,107,16.944777965545654
101 | 99,90,89.05649876594543,100,14.382555484771729,107,16.822237491607666
102 |
--------------------------------------------------------------------------------
/large_size_result.csv:
--------------------------------------------------------------------------------
1 | ,aco_reward,aco_time,ga_reward,ga_time,pso_reward,pso_time
2 | 0,296,908.7814054489136,247,55.92802929878235,263,475.8543794155121
3 | 1,291,915.7366240024567,259,66.25442147254944,263,472.1400876045227
4 | 2,292,917.8597526550293,254,74.90953588485718,263,474.662939786911
5 | 3,289,929.387636423111,260,103.34872436523438,263,478.1414213180542
6 | 4,289,923.429899930954,251,60.22564744949341,263,468.2993767261505
7 | 5,288,921.4861361980438,256,101.24155569076538,263,480.2911822795868
8 | 6,293,899.2834107875824,255,80.45500588417053,263,467.8873429298401
9 | 7,289,920.9990880489349,258,104.65078663825989,263,473.22990322113037
10 | 8,289,915.464262008667,252,55.428141355514526,263,470.7831304073334
11 | 9,292,908.4859659671783,242,54.579482316970825,263,468.63810992240906
12 | 10,273,951.7187411785126,249,53.21723532676697,267,471.1878535747528
13 | 11,273,967.2745745182037,265,127.98299217224121,267,489.7042224407196
14 | 12,270,974.3173124790192,260,195.45258331298828,267,495.3276345729828
15 | 13,275,963.2233729362488,246,50.30011963844299,267,470.9858467578888
16 | 14,271,962.010968208313,248,73.91375541687012,267,471.8012545108795
17 | 15,271,962.2612085342407,252,77.44514584541321,267,481.26786375045776
18 | 16,267,945.5351057052612,245,58.64225435256958,267,477.9602701663971
19 | 17,277,948.2806112766266,255,67.99747490882874,267,468.89854645729065
20 | 18,271,967.6417164802551,252,129.3072385787964,267,478.41221261024475
21 | 19,274,963.3537228107452,253,133.60208249092102,267,479.5689525604248
22 | 20,290,966.2115695476532,250,122.1526083946228,260,489.93380999565125
23 | 21,287,943.0081570148468,250,62.05885910987854,260,477.671777009964
24 | 22,290,947.1903564929962,254,59.37166452407837,260,467.77480578422546
25 | 23,286,959.2566442489624,276,91.52135014533997,260,477.249960899353
26 | 24,288,954.6075274944305,268,48.26884913444519,260,464.37573313713074
27 | 25,284,936.1187009811401,273,57.24583983421326,260,477.16314125061035
28 | 26,286,954.3773159980774,259,82.08687591552734,260,475.90100502967834
29 | 27,289,949.4377288818359,254,53.36060166358948,260,470.7586085796356
30 | 28,290,952.0964720249176,273,65.8470516204834,260,474.4114272594452
31 | 29,290,944.5154075622559,275,43.74000549316406,260,468.4188332557678
32 | 30,327,994.9844787120819,294,81.43704128265381,301,474.0977404117584
33 | 31,323,1018.6526775360107,273,114.55374956130981,301,498.5897653102875
34 | 32,321,1009.1453545093536,285,94.25002026557922,301,486.3076343536377
35 | 33,327,1019.1480383872986,278,55.407536029815674,301,488.1904435157776
36 | 34,325,1007.914253950119,293,83.80115604400635,301,491.72301745414734
37 | 35,325,1024.5869517326355,282,148.9419755935669,301,497.10984230041504
38 | 36,323,1020.457249879837,295,108.69291090965271,301,496.7013669013977
39 | 37,326,1013.691241979599,271,73.25992369651794,301,494.8483748435974
40 | 38,323,1020.1873610019684,278,48.434046030044556,301,490.01100039482117
41 | 39,325,1021.3731291294098,292,115.58995175361633,301,488.44017720222473
42 | 40,278,976.9366610050201,264,138.9213318824768,273,499.79298758506775
43 | 41,275,965.3231558799744,262,145.9869430065155,273,498.0558907985687
44 | 42,280,962.862530708313,271,91.95361614227295,273,489.8705041408539
45 | 43,279,959.0885939598083,236,54.67619323730469,273,486.26035809516907
46 | 44,276,973.558468580246,255,105.7680230140686,273,491.402090549469
47 | 45,279,967.0545673370361,248,83.93505239486694,273,485.08744978904724
48 | 46,276,957.583824634552,239,93.60761904716492,273,494.0147354602814
49 | 47,275,965.79727602005,264,104.30339407920837,273,489.8355438709259
50 | 48,280,971.2357912063599,247,81.43815469741821,273,485.04322052001953
51 | 49,278,973.0727701187134,254,91.01016688346863,273,489.8650426864624
52 | 50,291,952.5716059207916,250,101.36161661148071,275,494.92200326919556
53 | 51,294,946.210232257843,254,66.17070937156677,275,481.7474868297577
54 | 52,294,946.330258846283,256,88.53258848190308,275,489.36784863471985
55 | 53,294,940.2625517845154,248,63.820109605789185,275,485.3837275505066
56 | 54,291,951.8322811126709,256,68.07165431976318,275,488.7123284339905
57 | 55,299,958.7923038005829,265,68.89578294754028,275,491.05396008491516
58 | 56,296,951.3731620311737,245,73.54164481163025,275,486.9369945526123
59 | 57,291,958.3713037967682,258,89.91634559631348,275,491.3002550601959
60 | 58,290,945.0339353084564,246,56.91977286338806,275,481.18742632865906
61 | 59,291,947.7742516994476,261,71.559574842453,275,481.191358089447
62 | 60,305,981.3974587917328,291,92.71349763870239,269,488.7560610771179
63 | 61,304,957.5966999530792,275,82.32165241241455,269,491.32160925865173
64 | 62,307,968.3465480804443,266,79.71064329147339,269,492.24424958229065
65 | 63,309,978.8897063732147,269,78.40533828735352,269,490.47363781929016
66 | 64,305,976.8462386131287,263,96.9474766254425,269,497.1219482421875
67 | 65,310,973.8594441413879,257,72.08272051811218,269,493.34489607810974
68 | 66,306,964.727823972702,276,96.79535627365112,269,496.8235650062561
69 | 67,309,980.2682957649231,271,110.70756220817566,269,489.6940174102783
70 | 68,304,985.0895121097565,266,146.9937801361084,269,510.74099040031433
71 | 69,304,970.6574778556824,259,80.50431251525879,269,480.4831213951111
72 | 70,331,1002.6550228595734,276,71.99679160118103,282,486.46082282066345
73 | 71,330,1068.6351492404938,297,123.51885652542114,282,506.458402633667
74 | 72,332,1023.2077965736389,285,78.46024966239929,282,492.73976039886475
75 | 73,331,1017.5172808170319,280,48.53227877616882,282,495.07808446884155
76 | 74,332,1011.5874664783478,309,67.04316973686218,282,494.5928440093994
77 | 75,330,1028.534333705902,274,72.79688119888306,282,492.562472820282
78 | 76,332,996.4580583572388,310,69.33413505554199,282,489.1693527698517
79 | 77,332,1006.4915940761566,293,63.24198341369629,282,486.631254196167
80 | 78,332,1007.7951982021332,278,54.046175479888916,282,487.0864179134369
81 | 79,327,1008.2880766391754,306,101.87895131111145,282,491.83132791519165
82 | 80,342,1024.3724205493927,311,84.77551889419556,307,490.72554993629456
83 | 81,346,1037.724690914154,321,93.26548743247986,307,497.81773042678833
84 | 82,345,1045.6583635807037,300,86.76090788841248,307,484.0392985343933
85 | 83,344,1043.5157623291016,310,91.16247344017029,307,490.33659172058105
86 | 84,344,1042.54065823555,322,59.16938662528992,307,484.3666524887085
87 | 85,344,1039.1224558353424,301,86.92423415184021,307,493.7003331184387
88 | 86,340,1043.6719121932983,320,77.55270719528198,307,483.9702203273773
89 | 87,344,1042.995453596115,306,104.2813949584961,307,494.0151512622833
90 | 88,347,1051.3341484069824,321,152.23194694519043,307,496.49487829208374
91 | 89,349,1040.0653417110443,349,86.07370352745056,307,498.4700348377228
92 | 90,327,981.0026612281799,277,82.25102162361145,310,486.8569030761719
93 | 91,327,981.1793773174286,291,83.74289011955261,310,487.12745547294617
94 | 92,327,976.752777338028,306,72.23682999610901,310,489.5701353549957
95 | 93,326,980.6637990474701,300,54.929299116134644,310,483.11501002311707
96 | 94,322,969.3974039554596,284,70.095294713974,310,484.54920268058777
97 | 95,324,970.2287967205048,277,90.13793587684631,310,491.29523491859436
98 | 96,323,978.8446981906891,279,76.42075634002686,310,490.77434039115906
99 | 97,325,968.8402066230774,275,64.63985276222229,310,498.89036893844604
100 | 98,324,979.2253255844116,286,160.1146640777588,310,496.08253359794617
101 | 99,325,982.7923681735992,275,45.25343370437622,310,482.38406586647034
102 |
--------------------------------------------------------------------------------
/medium_size_result.csv:
--------------------------------------------------------------------------------
1 | ,aco_reward,aco_time,ga_reward,ga_time,pso_reward,pso_time
2 | 0,216,448.3831191062927,206,49.305721282958984,193,148.66212391853333
3 | 1,215,450.0390589237213,201,47.92679977416992,193,148.18085885047913
4 | 2,215,446.2680039405823,197,44.56689190864563,193,147.05394744873047
5 | 3,214,439.6638705730438,206,30.478964805603027,193,145.65527486801147
6 | 4,216,446.25123286247253,213,49.142014026641846,193,146.94575667381287
7 | 5,215,449.3586504459381,191,35.995574951171875,193,147.08190488815308
8 | 6,215,444.68290305137634,199,47.1325957775116,193,149.1070830821991
9 | 7,215,440.68888783454895,191,40.503169775009155,193,146.32232356071472
10 | 8,216,444.11085629463196,207,30.60081696510315,193,145.07146883010864
11 | 9,216,442.4932496547699,203,41.649266719818115,193,148.24117517471313
12 | 10,181,394.1740880012512,182,41.235496282577515,179,145.92154598236084
13 | 11,179,398.2101173400879,182,58.84644627571106,179,149.12411260604858
14 | 12,182,396.67690896987915,185,28.317508935928345,179,146.20850467681885
15 | 13,181,402.9303925037384,178,50.20690608024597,179,150.27634143829346
16 | 14,183,399.2702867984772,198,61.198126792907715,179,150.24325561523438
17 | 15,181,392.7786009311676,193,27.70851445198059,179,145.60873198509216
18 | 16,186,394.2873070240021,187,40.75690317153931,179,146.5531108379364
19 | 17,182,400.4948661327362,189,68.48983502388,179,151.8164780139923
20 | 18,181,396.15389823913574,176,36.01111102104187,179,146.96473789215088
21 | 19,183,407.0688331127167,177,74.86308264732361,179,152.32446765899658
22 | 20,257,535.1680011749268,242,45.81318497657776,257,162.57035994529724
23 | 21,248,477.0249879360199,257,59.37494111061096,257,151.88625025749207
24 | 22,248,481.47851395606995,248,73.29448699951172,257,153.8765308856964
25 | 23,248,476.1490092277527,251,46.956031799316406,257,150.61592316627502
26 | 24,248,472.28077578544617,245,35.464972496032715,257,147.88373970985413
27 | 25,247,465.2167932987213,248,33.261672258377075,257,147.64871048927307
28 | 26,248,465.4674003124237,243,46.813090562820435,257,149.02625226974487
29 | 27,253,475.7856402397156,253,62.99959635734558,257,152.69138717651367
30 | 28,248,471.5851991176605,249,62.63740587234497,257,151.3696005344391
31 | 29,248,468.71898126602173,242,40.11980128288269,257,149.03825902938843
32 | 30,196,404.4883544445038,190,26.050060272216797,194,144.73951077461243
33 | 31,200,409.98085474967957,196,49.781771659851074,194,148.48935222625732
34 | 32,197,411.36246395111084,197,45.22318196296692,194,149.27837538719177
35 | 33,197,413.2939670085907,198,49.13954973220825,194,149.69017028808594
36 | 34,200,408.55559182167053,208,41.79141283035278,194,149.21917819976807
37 | 35,196,409.9758207798004,189,27.64315676689148,194,147.62873792648315
38 | 36,197,408.1155550479889,194,47.21198105812073,194,147.90398573875427
39 | 37,199,412.9177339076996,217,75.3260293006897,194,152.389484167099
40 | 38,195,413.65162658691406,197,56.222227573394775,194,148.9140920639038
41 | 39,196,406.7831847667694,196,33.56608486175537,194,146.55531525611877
42 | 40,162,376.0339472293854,166,32.595866680145264,170,144.9636528491974
43 | 41,162,377.5476269721985,183,36.15443444252014,170,147.16523098945618
44 | 42,162,380.8216028213501,169,73.12481117248535,170,151.93030548095703
45 | 43,163,377.19195222854614,167,26.03180742263794,170,146.38755416870117
46 | 44,162,375.4290704727173,165,31.497214555740356,170,145.87686395645142
47 | 45,162,381.92375802993774,177,65.6746084690094,170,151.02717685699463
48 | 46,162,377.6920804977417,167,41.89390730857849,170,149.11691117286682
49 | 47,162,379.4388999938965,168,47.881861448287964,170,146.81237983703613
50 | 48,163,386.09958243370056,176,90.63225603103638,170,154.9767725467682
51 | 49,162,376.02294278144836,178,49.68327236175537,170,147.34447741508484
52 | 50,163,395.1006145477295,170,60.02331042289734,166,149.5303943157196
53 | 51,164,398.0407438278198,173,58.35295605659485,166,148.20625829696655
54 | 52,161,387.7398009300232,160,36.16767406463623,166,146.93824124336243
55 | 53,160,397.3074097633362,181,87.42074513435364,166,151.60932040214539
56 | 54,161,394.0010869503021,161,35.39233636856079,166,145.9512619972229
57 | 55,168,397.33205008506775,180,93.66990065574646,166,153.4657781124115
58 | 56,165,391.67463517189026,177,34.15181350708008,166,146.88838911056519
59 | 57,160,394.11439633369446,179,75.18911981582642,166,150.38912177085876
60 | 58,163,391.3193950653076,169,50.267693281173706,166,146.59030938148499
61 | 59,161,389.8388180732727,169,43.60353207588196,166,146.73621797561646
62 | 60,214,458.1954791545868,224,53.538702964782715,213,148.77755308151245
63 | 61,216,457.9216077327728,217,74.3746817111969,213,172.45012044906616
64 | 62,210,419.3990144729614,198,35.978588342666626,213,148.06180047988892
65 | 63,219,429.85972452163696,218,87.86180424690247,213,153.94083642959595
66 | 64,214,424.77522015571594,209,65.07919359207153,213,151.17670392990112
67 | 65,218,418.9645538330078,201,33.109193086624146,213,146.59753608703613
68 | 66,213,419.9968156814575,211,60.90353274345398,213,151.33484530448914
69 | 67,213,419.75107407569885,219,41.44067192077637,213,148.6664433479309
70 | 68,215,418.3867540359497,215,53.756091594696045,213,149.20613074302673
71 | 69,217,426.778751373291,207,55.369181394577026,213,150.33477354049683
72 | 70,199,410.5372402667999,215,60.27366662025452,208,149.8002016544342
73 | 71,198,409.2937562465668,204,59.25821495056152,208,151.39743947982788
74 | 72,196,409.5259928703308,215,35.602235317230225,208,145.2328040599823
75 | 73,201,406.344420671463,223,48.74324607849121,208,148.4908196926117
76 | 74,199,408.3418302536011,220,70.71654105186462,208,151.31224751472473
77 | 75,198,404.72881293296814,194,29.9748272895813,208,145.87402033805847
78 | 76,200,407.07944345474243,199,32.814154624938965,208,148.0822048187256
79 | 77,198,410.07060742378235,221,83.80286979675293,208,152.27385711669922
80 | 78,198,403.81956219673157,205,46.65635085105896,208,149.30409383773804
81 | 79,197,410.29728984832764,225,53.520286560058594,208,148.7795009613037
82 | 80,152,380.2354018688202,157,35.35539889335632,169,147.90625596046448
83 | 81,150,384.5501811504364,159,48.30092000961304,169,149.15482306480408
84 | 82,151,376.554048538208,157,25.061031818389893,169,146.199316740036
85 | 83,152,379.9583477973938,160,32.2420814037323,169,147.88309359550476
86 | 84,154,381.87709069252014,159,46.78597569465637,169,148.42677807807922
87 | 85,150,384.82411456108093,176,71.57334589958191,169,152.0538854598999
88 | 86,154,380.4787724018097,153,32.610721588134766,169,145.2583565711975
89 | 87,154,381.19555377960205,160,35.47093892097473,169,146.1291468143463
90 | 88,154,375.84812235832214,158,32.85135459899902,169,146.2271864414215
91 | 89,150,377.0024824142456,172,31.29044246673584,169,146.2692093849182
92 | 90,188,408.0042040348053,172,33.111896276474,172,146.74985241889954
93 | 91,188,415.29174041748047,183,46.34089708328247,172,148.31623196601868
94 | 92,186,408.48268270492554,171,65.71342968940735,172,151.4460847377777
95 | 93,187,408.3678331375122,173,50.35272192955017,172,149.03712511062622
96 | 94,188,410.88290190696716,177,33.343971252441406,172,147.3882360458374
97 | 95,187,414.69072556495667,181,56.616501331329346,172,149.22642374038696
98 | 96,185,413.3330612182617,178,45.60746383666992,172,148.60242438316345
99 | 97,187,406.33209466934204,181,42.515186071395874,172,146.75966262817383
100 | 98,187,405.92414903640747,183,27.310251474380493,172,145.60900115966797
101 | 99,186,415.5781035423279,190,51.28600215911865,172,146.98746609687805
102 |
--------------------------------------------------------------------------------
/ga.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import random
3 | import time
4 | import os
5 |
6 |
7 | class GA():
8 | def __init__(self, vehicle_num, vehicles_speed, target_num, targets, time_lim):
9 | # vehicles_speed,targets in the type of narray
10 | self.vehicle_num = vehicle_num
11 | self.vehicles_speed = vehicles_speed
12 | self.target_num = target_num
13 | self.targets = targets
14 | self.time_lim = time_lim
15 | self.map = np.zeros(shape=(target_num+1, target_num+1), dtype=float)
16 | self.pop_size = 50
17 | self.p_cross = 0.6
18 | self.p_mutate = 0.005
19 | for i in range(target_num+1):
20 | self.map[i, i] = 0
21 | for j in range(i):
22 | self.map[j, i] = self.map[i, j] = np.linalg.norm(
23 | targets[i, :2]-targets[j, :2])
24 | self.pop = np.zeros(
25 | shape=(self.pop_size, vehicle_num-1+target_num-1), dtype=np.int32)
26 | self.ff = np.zeros(self.pop_size, dtype=float)
27 | for i in range(self.pop_size):
28 | for j in range(vehicle_num-1):
29 | self.pop[i, j] = random.randint(0, target_num)
30 | for j in range(target_num-1):
31 | self.pop[i, vehicle_num+j -
32 | 1] = random.randint(0, target_num-j-1)
33 | self.ff[i] = self.fitness(self.pop[i, :])
34 | self.tmp_pop = np.array([])
35 | self.tmp_ff = np.array([])
36 | self.tmp_size = 0
37 |
38 | def fitness(self, gene):
39 | ins = np.zeros(self.target_num+1, dtype=np.int32)
40 | seq = np.zeros(self.target_num, dtype=np.int32)
41 | ins[self.target_num] = 1
42 | for i in range(self.vehicle_num-1):
43 | ins[gene[i]] += 1
44 | rest = np.array(range(1, self.target_num+1))
45 | for i in range(self.target_num-1):
46 | seq[i] = rest[gene[i+self.vehicle_num-1]]
47 | rest = np.delete(rest, gene[i+self.vehicle_num-1])
48 | seq[self.target_num-1] = rest[0]
49 | i = 0 # index of vehicle
50 | pre = 0 # index of last target
51 | post = 0 # index of ins/seq
52 | t = 0
53 | reward = 0
54 | while i < self.vehicle_num:
55 | if ins[post] > 0:
56 | i += 1
57 | ins[post] -= 1
58 | pre = 0
59 | t = 0
60 | else:
61 | t += self.targets[pre, 3]
62 | past = self.map[pre, seq[post]]/self.vehicles_speed[i]
63 | t += past
64 | if t < self.time_lim:
65 | reward += self.targets[seq[post], 2]
66 | pre = seq[post]
67 | post += 1
68 | return reward
69 |
70 | def selection(self):
71 | roll = np.zeros(self.tmp_size, dtype=float)
72 | roll[0] = self.tmp_ff[0]
73 | for i in range(1, self.tmp_size):
74 | roll[i] = roll[i-1]+self.tmp_ff[i]
75 | for i in range(self.pop_size):
76 | xx = random.uniform(0, roll[self.tmp_size-1])
77 | j = 0
78 | while xx > roll[j]:
79 | j += 1
80 | self.pop[i, :] = self.tmp_pop[j, :]
81 | self.ff[i] = self.tmp_ff[j]
82 |
83 | def mutation(self):
84 | for i in range(self.tmp_size):
85 | flag = False
86 | for j in range(self.vehicle_num-1):
87 | if random.random() < self.p_mutate:
88 | self.tmp_pop[i, j] = random.randint(0, self.target_num)
89 | flag = True
90 | for j in range(self.target_num-1):
91 | if random.random() < self.p_mutate:
92 | self.tmp_pop[i, self.vehicle_num+j -
93 | 1] = random.randint(0, self.target_num-j-1)
94 | flag = True
95 | if flag:
96 | self.tmp_ff[i] = self.fitness(self.tmp_pop[i, :])
97 |
98 | def crossover(self):
99 | new_pop = []
100 | new_ff = []
101 | new_size = 0
102 | for i in range(0, self.pop_size, 2):
103 | if random.random() < self.p_cross:
104 | x1 = random.randint(0, self.vehicle_num-2)
105 | x2 = random.randint(0, self.target_num-2)+self.vehicle_num
106 | g1 = self.pop[i, :]
107 | g2 = self.pop[i+1, :]
108 | g1[x1:x2] = self.pop[i+1, x1:x2]
109 | g2[x1:x2] = self.pop[i, x1:x2]
110 | new_pop.append(g1)
111 | new_pop.append(g2)
112 | new_ff.append(self.fitness(g1))
113 | new_ff.append(self.fitness(g2))
114 | new_size += 2
115 | self.tmp_size = self.pop_size+new_size
116 | self.tmp_pop = np.zeros(
117 | shape=(self.tmp_size, self.vehicle_num-1+self.target_num-1), dtype=np.int32)
118 | self.tmp_pop[0:self.pop_size, :] = self.pop
119 | self.tmp_pop[self.pop_size:self.tmp_size, :] = np.array(new_pop)
120 | self.tmp_ff = np.zeros(self.tmp_size, dtype=float)
121 | self.tmp_ff[0:self.pop_size] = self.ff
122 | self.tmp_ff[self.pop_size:self.tmp_size] = np.array(new_ff)
123 |
124 | def run(self):
125 | print("GA start, pid: %s" % os.getpid())
126 | start_time = time.time()
127 | cut = 0
128 | count = 0
129 | while count < 500:
130 | self.crossover()
131 | self.mutation()
132 | self.selection()
133 | new_cut = self.tmp_ff.max()
134 | if cut < new_cut:
135 | cut = new_cut
136 | count = 0
137 | gene = self.tmp_pop[np.argmax(self.tmp_ff)]
138 | else:
139 | count += 1
140 |
141 | ins = np.zeros(self.target_num+1, dtype=np.int32)
142 | seq = np.zeros(self.target_num, dtype=np.int32)
143 | ins[self.target_num] = 1
144 | for i in range(self.vehicle_num-1):
145 | ins[gene[i]] += 1
146 | rest = np.array(range(1, self.target_num+1))
147 | for i in range(self.target_num-1):
148 | seq[i] = rest[gene[i+self.vehicle_num-1]]
149 | rest = np.delete(rest, gene[i+self.vehicle_num-1])
150 | seq[self.target_num-1] = rest[0]
151 | task_assignment = [[] for i in range(self.vehicle_num)]
152 | i = 0 # index of vehicle
153 | pre = 0 # index of last target
154 | post = 0 # index of ins/seq
155 | t = 0
156 | reward = 0
157 | while i < self.vehicle_num:
158 | if ins[post] > 0:
159 | i += 1
160 | ins[post] -= 1
161 | pre = 0
162 | t = 0
163 | else:
164 | t += self.targets[pre, 3]
165 | past = self.map[pre, seq[post]]/self.vehicles_speed[i]
166 | t += past
167 | if t < self.time_lim:
168 | task_assignment[i].append(seq[post])
169 | reward += self.targets[seq[post], 2]
170 | pre = seq[post]
171 | post += 1
172 | print("GA result:", task_assignment)
173 | end_time = time.time()
174 | print("GA time:", end_time - start_time)
175 | return task_assignment, end_time - start_time
176 |
177 |
--------------------------------------------------------------------------------
/evaluate.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import matplotlib.pyplot as plt
3 | import random
4 | import pandas as pd
5 | import copy
6 | from multiprocessing import Pool
7 | from ga import GA
8 | from aco import ACO
9 | from pso import PSO
10 |
11 | class Env():
12 | def __init__(self, vehicle_num, target_num, map_size, visualized=True, time_cost=None, repeat_cost=None):
13 | self.vehicles_position = np.zeros(vehicle_num,dtype=np.int32)
14 | self.vehicles_speed = np.zeros(vehicle_num,dtype=np.int32)
15 | self.targets = np.zeros(shape=(target_num+1,4),dtype=np.int32)
16 | if vehicle_num==5:
17 | self.size='small'
18 | if vehicle_num==10:
19 | self.size='medium'
20 | if vehicle_num==15:
21 | self.size='large'
22 | self.map_size = map_size
23 | self.speed_range = [10, 15, 30]
24 | #self.time_lim = 1e6
25 | self.time_lim = self.map_size / self.speed_range[1]
26 | self.vehicles_lefttime = np.ones(vehicle_num,dtype=np.float32) * self.time_lim
27 | self.distant_mat = np.zeros((target_num+1,target_num+1),dtype=np.float32)
28 | self.total_reward = 0
29 | self.reward = 0
30 | self.visualized = visualized
31 | self.time = 0
32 | self.time_cost = time_cost
33 | self.repeat_cost = repeat_cost
34 | self.end = False
35 | self.assignment = [[] for i in range(vehicle_num)]
36 | self.task_generator()
37 |
38 | def task_generator(self):
39 | for i in range(self.vehicles_speed.shape[0]):
40 | choose = random.randint(0,2)
41 | self.vehicles_speed[i] = self.speed_range[choose]
42 | for i in range(self.targets.shape[0]-1):
43 | self.targets[i+1,0] = random.randint(1,self.map_size) - 0.5*self.map_size # x position
44 | self.targets[i+1,1] = random.randint(1,self.map_size) - 0.5*self.map_size # y position
45 | self.targets[i+1,2] = random.randint(1,10) # reward
46 | self.targets[i+1,3] = random.randint(5,30) # time consumption to finish the mission
47 | for i in range(self.targets.shape[0]):
48 | for j in range(self.targets.shape[0]):
49 | self.distant_mat[i,j] = np.linalg.norm(self.targets[i,:2]-self.targets[j,:2])
50 | self.targets_value = copy.deepcopy((self.targets[:,2]))
51 |
52 | def step(self, action):
53 | count = 0
54 | for j in range(len(action)):
55 | k = action[j]
56 | delta_time = self.distant_mat[self.vehicles_position[j],k] / self.vehicles_speed[j] + self.targets[k,3]
57 | self.vehicles_lefttime[j] = self.vehicles_lefttime[j] - delta_time
58 | if self.vehicles_lefttime[j] < 0:
59 | count = count + 1
60 | continue
61 | else:
62 | if k == 0:
63 | self.reward = - self.repeat_cost
64 | else:
65 | self.reward = self.targets[k,2] - delta_time * self.time_cost + self.targets[k,2]
66 | if self.targets[k,2] == 0:
67 | self.reward = self.reward - self.repeat_cost
68 | self.vehicles_position[j] = k
69 | self.targets[k,2] = 0
70 | self.total_reward = self.total_reward + self.reward
71 | self.assignment[j].append(action)
72 | if count == len(action):
73 | self.end = True
74 |
75 | def run(self, assignment, algorithm, play, rond):
76 | self.assignment = assignment
77 | self.algorithm = algorithm
78 | self.play = play
79 | self.rond = rond
80 | self.get_total_reward()
81 | if self.visualized:
82 | self.visualize()
83 |
84 | def reset(self):
85 | self.vehicles_position = np.zeros(self.vehicles_position.shape[0],dtype=np.int32)
86 | self.vehicles_lefttime = np.ones(self.vehicles_position.shape[0],dtype=np.float32) * self.time_lim
87 | self.targets[:,2] = self.targets_value
88 | self.total_reward = 0
89 | self.reward = 0
90 | self.end = False
91 |
92 | def get_total_reward(self):
93 | for i in range(len(self.assignment)):
94 | speed = self.vehicles_speed[i]
95 | for j in range(len(self.assignment[i])):
96 | position = self.targets[self.assignment[i][j],:4]
97 | self.total_reward = self.total_reward + position[2]
98 | if j == 0:
99 | self.vehicles_lefttime[i] = self.vehicles_lefttime[i] - np.linalg.norm(position[:2]) / speed - position[3]
100 | else:
101 | self.vehicles_lefttime[i] = self.vehicles_lefttime[i] - np.linalg.norm(position[:2]-position_last[:2]) / speed - position[3]
102 | position_last = position
103 | if self.vehicles_lefttime[i] > self.time_lim:
104 | self.end = True
105 | break
106 | if self.end:
107 | self.total_reward = 0
108 | break
109 |
110 | def visualize(self):
111 | if self.assignment == None:
112 | plt.scatter(x=0,y=0,s=200,c='k')
113 | plt.scatter(x=self.targets[1:,0],y=self.targets[1:,1],s=self.targets[1:,2]*10,c='r')
114 | plt.title('Target distribution')
115 | plt.savefig('task_pic/'+self.size+'/'+self.algorithm+ "-%d-%d.png" % (self.play,self.rond))
116 | plt.cla()
117 | else:
118 | plt.title('Task assignment by '+self.algorithm +', total reward : '+str(self.total_reward))
119 | plt.scatter(x=0,y=0,s=200,c='k')
120 | plt.scatter(x=self.targets[1:,0],y=self.targets[1:,1],s=self.targets[1:,2]*10,c='r')
121 | for i in range(len(self.assignment)):
122 | trajectory = np.array([[0,0,20]])
123 | for j in range(len(self.assignment[i])):
124 | position = self.targets[self.assignment[i][j],:3]
125 | trajectory = np.insert(trajectory,j+1,values=position,axis=0)
126 | plt.scatter(x=trajectory[1:,0],y=trajectory[1:,1],s=trajectory[1:,2]*10,c='b')
127 | plt.plot(trajectory[:,0], trajectory[:,1])
128 | plt.savefig('task_pic/'+self.size+'/'+self.algorithm+ "-%d-%d.png" % (self.play,self.rond))
129 | plt.cla()
130 |
131 | def evaluate(vehicle_num, target_num, map_size):
132 | if vehicle_num==5:
133 | size='small'
134 | if vehicle_num==10:
135 | size='medium'
136 | if vehicle_num==15:
137 | size='large'
138 | re_ga=[[] for i in range(10)]
139 | re_aco=[[] for i in range(10)]
140 | re_pso=[[] for i in range(10)]
141 | for i in range(10):
142 | env = Env(vehicle_num,target_num,map_size,visualized=True)
143 | for j in range(10):
144 | p=Pool(3)
145 | ga = GA(vehicle_num,env.vehicles_speed,target_num,env.targets,env.time_lim)
146 | aco = ACO(vehicle_num,target_num,env.vehicles_speed,env.targets,env.time_lim)
147 | pso = PSO(vehicle_num,target_num ,env.targets,env.vehicles_speed,env.time_lim)
148 | ga_result=p.apply_async(ga.run)
149 | aco_result=p.apply_async(aco.run)
150 | pso_result=p.apply_async(pso.run)
151 | p.close()
152 | p.join()
153 | ga_task_assignmet = ga_result.get()[0]
154 | env.run(ga_task_assignmet,'GA',i+1,j+1)
155 | re_ga[i].append((env.total_reward,ga_result.get()[1]))
156 | env.reset()
157 | aco_task_assignmet = aco_result.get()[0]
158 | env.run(aco_task_assignmet,'ACO',i+1,j+1)
159 | re_aco[i].append((env.total_reward,aco_result.get()[1]))
160 | env.reset()
161 | pso_task_assignmet = pso_result.get()[0]
162 | env.run(pso_task_assignmet,'PSO',i+1,j+1)
163 | re_pso[i].append((env.total_reward,pso_result.get()[1]))
164 | env.reset()
165 | x_index=np.arange(10)
166 | ymax11=[]
167 | ymax12=[]
168 | ymax21=[]
169 | ymax22=[]
170 | ymax31=[]
171 | ymax32=[]
172 | ymean11=[]
173 | ymean12=[]
174 | ymean21=[]
175 | ymean22=[]
176 | ymean31=[]
177 | ymean32=[]
178 | for i in range(10):
179 | tmp1=[re_ga[i][j][0] for j in range(10)]
180 | tmp2=[re_ga[i][j][1] for j in range(10)]
181 | ymax11.append(np.amax(tmp1))
182 | ymax12.append(np.amax(tmp2))
183 | ymean11.append(np.mean(tmp1))
184 | ymean12.append(np.mean(tmp2))
185 | tmp1=[re_aco[i][j][0] for j in range(10)]
186 | tmp2=[re_aco[i][j][1] for j in range(10)]
187 | ymax21.append(np.amax(tmp1))
188 | ymax22.append(np.amax(tmp2))
189 | ymean21.append(np.mean(tmp1))
190 | ymean22.append(np.mean(tmp2))
191 | tmp1=[re_pso[i][j][0] for j in range(10)]
192 | tmp2=[re_pso[i][j][1] for j in range(10)]
193 | ymax31.append(np.amax(tmp1))
194 | ymax32.append(np.amax(tmp2))
195 | ymean31.append(np.mean(tmp1))
196 | ymean32.append(np.mean(tmp2))
197 | rects1=plt.bar(x_index,ymax11,width=0.1,color='b',label='ga_max_reward')
198 | rects2=plt.bar(x_index+0.1,ymax21,width=0.1,color='r',label='aco_max_reward')
199 | rects3=plt.bar(x_index+0.2,ymax31,width=0.1,color='g',label='pso_max_reward')
200 | plt.xticks(x_index+0.1,x_index)
201 | plt.legend()
202 | plt.title('max_reward_for_'+size+'_size')
203 | plt.savefig('max_reward_'+size+'.png')
204 | plt.cla()
205 |
206 | rects1=plt.bar(x_index,ymax12,width=0.1,color='b',label='ga_max_time')
207 | rects2=plt.bar(x_index+0.1,ymax22,width=0.1,color='r',label='aco_max_time')
208 | rects3=plt.bar(x_index+0.2,ymax32,width=0.1,color='g',label='pso_max_time')
209 | plt.xticks(x_index+0.1,x_index)
210 | plt.legend()
211 | plt.title('max_time_for_'+size+'_size')
212 | plt.savefig('max_time_'+size+'.png')
213 | plt.cla()
214 |
215 | rects1=plt.bar(x_index,ymean11,width=0.1,color='b',label='ga_mean_reward')
216 | rects2=plt.bar(x_index+0.1,ymean21,width=0.1,color='r',label='aco_mean_reward')
217 | rects3=plt.bar(x_index+0.2,ymean31,width=0.1,color='g',label='pso_mean_reward')
218 | plt.xticks(x_index+0.1,x_index)
219 | plt.legend()
220 | plt.title('mean_reward_for_'+size+'_size')
221 | plt.savefig('mean_reward_'+size+'.png')
222 | plt.cla()
223 |
224 | rects1=plt.bar(x_index,ymean12,width=0.1,color='b',label='ga_mean_time')
225 | rects2=plt.bar(x_index+0.1,ymean22,width=0.1,color='r',label='aco_mean_time')
226 | rects3=plt.bar(x_index+0.2,ymean32,width=0.1,color='g',label='pso_mean_time')
227 | plt.xticks(x_index+0.1,x_index)
228 | plt.legend()
229 | plt.title('mean_time_for_'+size+'_size')
230 | plt.savefig('mean_time_'+size+'.png')
231 | plt.cla()
232 |
233 | t_ga=[]
234 | r_ga=[]
235 | t_aco=[]
236 | r_aco=[]
237 | t_pso=[]
238 | r_pso=[]
239 | for i in range(10):
240 | for j in range(10):
241 | t_ga.append(re_ga[i][j][1])
242 | r_ga.append(re_ga[i][j][0])
243 | t_aco.append(re_aco[i][j][1])
244 | r_aco.append(re_aco[i][j][0])
245 | t_pso.append(re_pso[i][j][1])
246 | r_pso.append(re_pso[i][j][0])
247 | dataframe = pd.DataFrame({'ga_time':t_ga,'ga_reward':r_ga,'aco_time':t_aco,'aco_reward':r_aco,'pso_time':t_pso,'pso_reward':r_pso})
248 | dataframe.to_csv(size+'_size_result.csv',sep=',')
249 |
250 |
251 | if __name__=='__main__':
252 | # small scale
253 | evaluate(5,30,5e3)
254 | # medium scale
255 | evaluate(10,60,1e4)
256 | # large scale
257 | evaluate(15,90,1.5e4)
258 |
--------------------------------------------------------------------------------
/pso.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 | import numpy as np
3 | import random
4 | import math
5 | import cmath
6 | import time
7 | import os
8 | # ----------------------Optimization scheme----------------------------------
9 | # Optimization ideas:
10 | # 1. Increase the convergence factor k;
11 | # 2. Dynamic change of inertia factor W;
12 | # 3. Using PSO local search algorithm(Ring method)
13 | # 4. The probability of position variation is added
14 | # ----------------------Set PSO Parameter---------------------------------
15 |
16 |
17 | class PSO():
18 | def __init__(self, uav_num, target_num, targets, vehicles_speed, time_lim):
19 | self.uav_num = uav_num
20 | self.dim = target_num
21 | self.targets = targets
22 | self.vehicles_speed = vehicles_speed
23 | self.time_all = time_lim
24 | self.pN = 2*(self.uav_num+self.dim) # Number of particles
25 | self.max_iter = 0 # Number of iterations
26 | # Target distance list (dim+1)*(dim+1)
27 | self.Distance = np.zeros((target_num+1, target_num+1))
28 | self.Value = np.zeros(target_num+1) # Value list of targets 1*dim+1
29 | self.Stay_time = []
30 | # UAV flight speed matrix
31 | self.w = 0.8
32 | self.c1 = 2
33 | self.c2 = 2
34 | self.r1 = 0.6
35 | self.r2 = 0.3
36 | self.k = 0 # Convergence factor
37 | self.wini = 0.9
38 | self.wend = 0.4
39 |
40 | self.X = np.zeros((self.pN, self.dim+self.uav_num-1)
41 | ) # Position of all particles
42 | self.V = np.zeros((self.pN, self.dim+self.uav_num-1)
43 | ) # Velocity of all particles
44 | # The historical optimal position of each individual
45 | self.pbest = np.zeros((self.pN, self.dim+self.uav_num-1))
46 | self.gbest = np.zeros((1, self.dim+self.uav_num-1))
47 | # Global optimal position
48 | self.gbest_ring = np.zeros((self.pN, self.dim+self.uav_num-1))
49 | # Historical optimal fitness of each individual
50 | self.p_fit = np.zeros(self.pN)
51 | self.fit = 0 # Global optimal fitness
52 | self.ring = []
53 | self.ring_fit = np.zeros(self.pN)
54 | # variation parameter
55 | self.p1 = 0.4 # Probability of mutation
56 | self.p2 = 0.5 # Proportion of individuals with variation in population
57 | self.p3 = 0.5 # Proportion of locations where variation occurs
58 | self.TEST = []
59 | self.test_num = 0
60 | self.uav_best = []
61 |
62 | self.time_out = np.zeros(self.uav_num)
63 |
64 | self.cal_time = 0
65 | # ------------------Get Initial parameter------------------
66 |
67 | def fun_get_initial_parameter(self):
68 | self.max_iter = 40*(self.uav_num+self.dim)
69 | if self.max_iter > 4100:
70 | self.max_iter = 4100
71 |
72 | # Get Stay_time Arrary & Distance Arrary & Value Arrary
73 | Targets = self.targets
74 | self.Stay_time = Targets[:, 3]
75 | self.Distance = np.zeros((self.dim+1, self.dim+1))
76 | self.Value = np.zeros(self.dim+1)
77 | for i in range(self.dim+1):
78 | self.Value[i] = Targets[i, 2]
79 | for j in range(i):
80 | self.Distance[i][j] = (
81 | Targets[i, 0]-Targets[j, 0])*(Targets[i, 0]-Targets[j, 0])
82 | self.Distance[i][j] = self.Distance[i][j] + \
83 | (Targets[i, 1]-Targets[j, 1])*(Targets[i, 1]-Targets[j, 1])
84 | self.Distance[i][j] = math.sqrt(self.Distance[i][j])
85 | self.Distance[j][i] = self.Distance[i][j]
86 | # ------------------Transfer_Function---------------------
87 |
88 | def fun_Transfer(self, X):
89 | # Converting continuous sequence X into discrete sequence X_path
90 | X1 = X[0:self.dim]
91 | X_path = []
92 | l1 = len(X1)
93 | for i in range(l1):
94 | m = X1[i]*(self.dim-i)
95 | m = math.floor(m)
96 | X_path.append(m)
97 | # Converting the continuous interpolation sequence X into discrete interpolation sequence X_rank
98 | X2 = X[self.dim:]
99 | l1 = len(X2)
100 | X_rank = []
101 | for i in range(l1):
102 |
103 | m = X2[i]*(self.dim+1)
104 |
105 | m1 = math.floor(m)
106 | X_rank.append(m1)
107 | # Rank and Complement
108 | c = sorted(X_rank)
109 | l1 = len(c)
110 | Rank = []
111 | Rank.append(0)
112 | for i in range(l1):
113 | Rank.append(c[i])
114 | Rank.append(self.dim)
115 | # Get Separate_Arrary
116 | Sep = []
117 | for i in range(l1+1):
118 | sep = Rank[i+1]-Rank[i]
119 | Sep.append(sep)
120 | return X_path, Sep
121 |
122 | # -------------------Obtain the Real Flight Path Sequence of Particles--------------------------
123 | def position(self, X):
124 | Position_All = list(range(1, self.dim+1))
125 | X2 = []
126 | for i in range(self.dim):
127 | m1 = X[i]
128 | m1 = int(m1)
129 | X2.append(Position_All[m1])
130 | del Position_All[m1]
131 | return X2
132 | # ---------------------Fitness_Computing Function-----------------------------
133 |
134 | def function(self, X):
135 | X_path, Sep = self.fun_Transfer(X)
136 |
137 | # Obtain the Real Flight Path Sequence of Particles
138 | X = self.position(X_path)
139 | # Get the search sequence of each UAV
140 | UAV = []
141 | l = 0
142 | for i in range(self.uav_num):
143 | UAV.append([])
144 | k = Sep[i]
145 | for j in range(k):
146 | UAV[i].append(X[l])
147 | l = l+1
148 |
149 | # Calculate Fitness
150 | fitness = 0
151 | for i in range(self.uav_num):
152 | k = Sep[i]
153 | t = 0
154 | for j in range(k):
155 | m1 = UAV[i][j]
156 |
157 | if j == 0:
158 | t = t+self.Distance[0, m1] / \
159 | self.vehicles_speed[i]+self.Stay_time[m1]
160 | else:
161 | m1 = UAV[i][j]
162 | m2 = UAV[i][j-1]
163 | t = t+self.Distance[m1][m2] / \
164 | self.vehicles_speed[i]+self.Stay_time[m1]
165 | if t <= self.time_all:
166 | fitness = fitness+self.Value[m1]
167 | return fitness
168 | # ----------------------------variation-------------------------------------------
169 |
170 | def variation_fun(self):
171 | p1 = np.random.uniform(0, 1) # Probability of mutation
172 | if p1 < self.p1:
173 | for i in range(self.pN):
174 | # Proportion of individuals with variation in population
175 | p2 = np.random.uniform(0, 1)
176 | if p2 < self.p2:
177 | # Numbers of locations where variation occurs
178 | m = int(self.p3*(self.dim+self.uav_num-1))
179 | for j in range(m):
180 | replace_position = math.floor(
181 | np.random.uniform(0, 1)*(self.dim+self.uav_num-1))
182 | replace_value = np.random.uniform(0, 1)
183 | self.X[i][replace_position] = replace_value
184 | # Update pbest & gbest
185 | for i in range(self.pN):
186 | temp = self.function(self.X[i])
187 | self.ring_fit[i] = temp
188 | if temp > self.p_fit[i]:
189 | self.p_fit[i] = temp
190 | self.pbest[i] = self.X[i]
191 | # Update gbest
192 | if self.p_fit[i] > self.fit:
193 | self.gbest = self.X[i]
194 | self.fit = self.p_fit[i]
195 |
196 | # ---------------------Population Initialization----------------------------------
197 |
198 | def init_Population(self):
199 | # Initialization of position(X), speed(V), history optimal(pbest) and global optimal(gbest)
200 | for i in range(self.pN):
201 | x = np.random.uniform(0, 1, self.dim+self.uav_num-1)
202 | self.X[i, :] = x
203 | v = np.random.uniform(0, 0.4, self.dim+self.uav_num-1)
204 | self.V[i, :] = v
205 | self.pbest[i] = self.X[i]
206 |
207 | tmp = self.function(self.X[i])
208 | self.p_fit[i] = tmp
209 | if tmp > self.fit:
210 | self.fit = tmp
211 | self.gbest = self.X[i]
212 | # Calculate the convergence factor k
213 | phi = self.c1+self.c2
214 | k = abs(phi*phi-4*phi)
215 | k = cmath.sqrt(k)
216 | k = abs(2-phi-k)
217 | k = 2/k
218 | self.k = k
219 | # Initialize ring_matrix
220 | for i in range(self.pN):
221 | self.ring.append([])
222 | self.ring[i].append(i)
223 | # Initialize test_set
224 | self.TEST = np.zeros((self.test_num, self.dim+self.uav_num-1))
225 | for i in range(self.test_num):
226 | test = np.random.uniform(0, 1, self.dim+self.uav_num-1)
227 | self.TEST[i, :] = test
228 |
229 | # ----------------------Update Particle Position----------------------------------
230 |
231 | def iterator(self):
232 | fitness = []
233 | fitness_old = 0
234 | k = 0
235 | for t in range(self.max_iter):
236 | w = (self.wini-self.wend)*(self.max_iter-t)/self.max_iter+self.wend
237 | self.w = w
238 | # Variation
239 | self.variation_fun()
240 | l1 = len(self.ring[0])
241 | # Local PSO algorithm
242 | # Update ring_arrary
243 | if l1 < self.pN:
244 | if not(t % 2):
245 | k = k+1
246 | for i in range(self.pN):
247 | m1 = i-k
248 | if m1 < 0:
249 | m1 = self.pN+m1
250 | m2 = i+k
251 | if m2 > self.pN-1:
252 | m2 = m2-self.pN
253 | self.ring[i].append(m1)
254 | self.ring[i].append(m2)
255 | # Update gbest_ring
256 | l_ring = len(self.ring[0])
257 | for i in range(self.pN):
258 | fitness1 = 0
259 | for j in range(l_ring):
260 | m1 = self.ring[i][j]
261 | fitness2 = self.ring_fit[m1]
262 | if fitness2 > fitness1:
263 | self.gbest_ring[i] = self.X[m1]
264 | fitness1 = fitness2
265 | # Update velocity
266 | for i in range(self.pN):
267 | self.V[i] = self.k*(self.w * self.V[i] + self.c1 * self.r1 * (self.pbest[i] - self.X[i])) + \
268 | self.c2 * self.r2 * (self.gbest_ring[i] - self.X[i])
269 | # Update position
270 | self.X[i] = self.X[i] + self.V[i]
271 |
272 | # Global PSO algorithm
273 | else:
274 | # Update velocity
275 | for i in range(self.pN):
276 | self.V[i] = self.k*(self.w * self.V[i] + self.c1 * self.r1 * (self.pbest[i] - self.X[i])) + \
277 | self.c2 * self.r2 * (self.gbest - self.X[i])
278 | # Update position
279 | self.X[i] = self.X[i] + self.V[i]
280 |
281 | # Set position boundary
282 | for i in range(self.pN):
283 | for j in range(self.dim+self.uav_num-1):
284 | if self.X[i][j] >= 1:
285 | self.X[i][j] = 0.999
286 | if self.X[i][j] < 0:
287 | self.X[i][j] = 0
288 | # Update pbest & gbest
289 | for i in range(self.pN):
290 | temp = self.function(self.X[i])
291 | self.ring_fit[i] = temp
292 | if temp > self.p_fit[i]:
293 | self.p_fit[i] = temp
294 | self.pbest[i] = self.X[i]
295 | # Update gbest
296 | if self.p_fit[i] > self.fit:
297 | self.gbest = self.X[i]
298 | self.fit = self.p_fit[i]
299 | self.uav_best = self.fun_Data()
300 |
301 | # print
302 | fitness.append(self.fit)
303 | if self.fit == fitness_old:
304 | continue
305 | else:
306 | fitness_old = self.fit
307 | return fitness
308 |
309 | # ---------------------Data_Processing Function---------------------------
310 | def fun_Data(self):
311 | X_path, Sep = self.fun_Transfer(self.gbest)
312 | # Obtain the Real Flight Path Sequence of Particles
313 | X = self.position(X_path)
314 | # Get the search sequence of each UAV
315 | UAV = []
316 | l = 0
317 | for i in range(self.uav_num):
318 | UAV.append([])
319 | k = Sep[i]
320 | for j in range(k):
321 | UAV[i].append(X[l])
322 | l = l+1
323 | # Calculate UAV_Out
324 | UAV_Out = []
325 | for i in range(self.uav_num):
326 | k = Sep[i]
327 | t = 0
328 | UAV_Out.append([])
329 | for j in range(k):
330 | m1 = UAV[i][j]
331 | if j == 0:
332 | t = t+self.Distance[0, m1] / \
333 | self.vehicles_speed[i]+self.Stay_time[m1]
334 | else:
335 | m2 = UAV[i][j-1]
336 | t = t+self.Distance[m2][m1] / \
337 | self.vehicles_speed[i]+self.Stay_time[m1]
338 | if t <= self.time_all:
339 | UAV_Out[i].append(m1)
340 | self.time_out[i] = t
341 | return UAV_Out
342 | # ---------------------TEST Function------------------------------
343 |
344 | def fun_TEST(self):
345 | Test_Value = []
346 | for i in range(self.test_num):
347 | Test_Value.append(self.function(self.TEST[i]))
348 | return Test_Value
349 | # ---------------------Main----------------------------------------
350 |
351 | def run(self):
352 | print("PSO start, pid: %s" % os.getpid())
353 | start_time = time.time()
354 | self.fun_get_initial_parameter()
355 | self.init_Population()
356 | fitness = self.iterator()
357 | end_time = time.time()
358 | #self.cal_time = end_time - start_time
359 | #self.task_assignment = self.uav_best
360 | print("PSO result:", self.uav_best)
361 | print("PSO time:", end_time - start_time)
362 | return self.uav_best, end_time - start_time
363 |
364 |
365 |
--------------------------------------------------------------------------------