├── .idea
├── DDPG-master.iml
├── inspectionProfiles
│ └── profiles_settings.xml
├── misc.xml
├── modules.xml
└── workspace.xml
├── Config
├── config_1.json
└── config_2.json
├── LICENSE
├── README.md
├── Result
├── DCORA.xlsx
├── DDPG.xlsx
├── DDPG_500.csv
├── DDPG_500_QoC.csv
├── DQN.xlsx
├── DQN_500.csv
├── DQN_500_QoC.csv
└── Episode.fig
├── __pycache__
├── agent.cpython-37.pyc
├── dotdic.cpython-37.pyc
└── scenario.cpython-37.pyc
├── agent.py
├── dotdic.py
├── main.py
└── scenario.py
/.idea/DDPG-master.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/profiles_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/workspace.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 | 1605491503145
64 |
65 |
66 | 1605491503145
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
--------------------------------------------------------------------------------
/Config/config_1.json:
--------------------------------------------------------------------------------
1 | {
2 | "nMBS": 2,
3 | "nPBS": 8,
4 | "nFBS": 16,
5 | "rMBS": 500,
6 | "rPBS": 100,
7 | "rFBS": 30,
8 | "BW": 180000,
9 | "nChannel": 20,
10 | "N0": -174,
11 | "profit": 0.5,
12 | "power_cost": 0.0005,
13 | "action_cost": 0.001,
14 | "negative_cost": -1.0,
15 | "QoS_thr": 5
16 | }
--------------------------------------------------------------------------------
/Config/config_2.json:
--------------------------------------------------------------------------------
1 | {
2 | "nagents": 30,
3 | "capacity": 500,
4 | "learningrate": 0.01,
5 | "momentum": 0.005,
6 | "eps_max": 0.9,
7 | "eps_min": 0.0,
8 | "eps_decay": 200,
9 | "eps_increment": 0.003,
10 | "batch_size": 8,
11 | "gamma": 0.9,
12 | "nepisodes": 500,
13 | "nsteps": 500,
14 | "nupdate": 50,
15 | "cuda":0
16 | }
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 Liangyu Chu
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # joint-computation-offloading-and-resource-allocation
2 | joint computation offloading and resource allocation in Internet of Vehicle
3 | Simulation from an degree paper of BUPT
4 |
--------------------------------------------------------------------------------
/Result/DCORA.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LoveBUPT2018/joint-computation-offloading-and-resource-allocation/13e68b71c8e9ae7347a82294a355266c3ce28a81/Result/DCORA.xlsx
--------------------------------------------------------------------------------
/Result/DDPG.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LoveBUPT2018/joint-computation-offloading-and-resource-allocation/13e68b71c8e9ae7347a82294a355266c3ce28a81/Result/DDPG.xlsx
--------------------------------------------------------------------------------
/Result/DDPG_500.csv:
--------------------------------------------------------------------------------
1 | This includes the running steps:
2 | 271
3 | 118
4 | 84
5 | 53
6 | 25
7 | 202
8 | 39
9 | 72
10 | 112
11 | 4
12 | 143
13 | 61
14 | 38
15 | 62
16 | 39
17 | 65
18 | 43
19 | 52
20 | 52
21 | 36
22 | 91
23 | 41
24 | 17
25 | 27
26 | 50
27 | 22
28 | 102
29 | 28
30 | 77
31 | 97
32 | 91
33 | 40
34 | 69
35 | 17
36 | 32
37 | 85
38 | 69
39 | 35
40 | 54
41 | 20
42 | 35
43 | 59
44 | 13
45 | 7
46 | 7
47 | 33
48 | 46
49 | 28
50 | 99
51 | 31
52 | 14
53 | 8
54 | 42
55 | 16
56 | 17
57 | 9
58 | 52
59 | 111
60 | 7
61 | 58
62 | 27
63 | 5
64 | 78
65 | 64
66 | 36
67 | 24
68 | 16
69 | 30
70 | 2
71 | 36
72 | 14
73 | 32
74 | 55
75 | 5
76 | 29
77 | 55
78 | 14
79 | 175
80 | 13
81 | 30
82 | 5
83 | 133
84 | 11
85 | 1
86 | 5
87 | 57
88 | 64
89 | 3
90 | 17
91 | 13
92 | 12
93 | 16
94 | 19
95 | 10
96 | 70
97 | 20
98 | 3
99 | 52
100 | 1
101 | 117
102 | 38
103 | 22
104 | 13
105 | 3
106 | 32
107 | 45
108 | 5
109 | 10
110 | 16
111 | 15
112 | 1
113 | 0
114 | 16
115 | 14
116 | 17
117 | 7
118 | 5
119 | 51
120 | 1
121 | 28
122 | 16
123 | 20
124 | 6
125 | 4
126 | 8
127 | 22
128 | 36
129 | 6
130 | 17
131 | 5
132 | 6
133 | 3
134 | 36
135 | 3
136 | 8
137 | 47
138 | 2
139 | 5
140 | 33
141 | 19
142 | 2
143 | 36
144 | 120
145 | 9
146 | 2
147 | 4
148 | 90
149 | 14
150 | 14
151 | 29
152 | 10
153 | 53
154 | 9
155 | 2
156 | 32
157 | 7
158 | 26
159 | 3
160 | 16
161 | 2
162 | 20
163 | 4
164 | 4
165 | 41
166 | 3
167 | 31
168 | 5
169 | 56
170 | 4
171 | 24
172 | 28
173 | 5
174 | 3
175 | 150
176 | 3
177 | 2
178 | 19
179 | 11
180 | 20
181 | 31
182 | 17
183 | 23
184 | 3
185 | 84
186 | 18
187 | 15
188 | 45
189 | 21
190 | 66
191 | 17
192 | 11
193 | 81
194 | 1
195 | 27
196 | 17
197 | 4
198 | 7
199 | 0
200 | 2
201 | 10
202 | 57
203 | 10
204 | 12
205 | 4
206 | 16
207 | 2
208 | 3
209 | 8
210 | 2
211 | 1
212 | 15
213 | 5
214 | 55
215 | 24
216 | 0
217 | 31
218 | 3
219 | 6
220 | 1
221 | 16
222 | 36
223 | 21
224 | 40
225 | 20
226 | 10
227 | 17
228 | 0
229 | 9
230 | 45
231 | 53
232 | 48
233 | 34
234 | 14
235 | 34
236 | 3
237 | 102
238 | 22
239 | 9
240 | 44
241 | 3
242 | 23
243 | 5
244 | 4
245 | 10
246 | 3
247 | 10
248 | 6
249 | 7
250 | 1
251 | 1
252 | 19
253 | 27
254 | 2
255 | 9
256 | 3
257 | 17
258 | 10
259 | 2
260 | 11
261 | 1
262 | 49
263 | 67
264 | 2
265 | 1
266 | 6
267 | 5
268 | 12
269 | 19
270 | 24
271 | 7
272 | 3
273 | 9
274 | 9
275 | 4
276 | 3
277 | 8
278 | 1
279 | 11
280 | 10
281 | 7
282 | 14
283 | 15
284 | 43
285 | 2
286 | 2
287 | 41
288 | 8
289 | 15
290 | 123
291 | 14
292 | 1
293 | 19
294 | 68
295 | 5
296 | 13
297 | 12
298 | 1
299 | 4
300 | 2
301 | 7
302 | 22
303 | 18
304 | 23
305 | 1
306 | 25
307 | 7
308 | 1
309 | 28
310 | 11
311 | 15
312 | 4
313 | 37
314 | 2
315 | 1
316 | 19
317 | 7
318 | 3
319 | 12
320 | 7
321 | 1
322 | 40
323 | 38
324 | 17
325 | 10
326 | 17
327 | 1
328 | 5
329 | 30
330 | 10
331 | 4
332 | 2
333 | 4
334 | 20
335 | 4
336 | 33
337 | 3
338 | 26
339 | 2
340 | 21
341 | 28
342 | 52
343 | 17
344 | 3
345 | 28
346 | 1
347 | 11
348 | 41
349 | 2
350 | 9
351 | 1
352 | 3
353 | 2
354 | 13
355 | 47
356 | 40
357 | 3
358 | 9
359 | 13
360 | 6
361 | 3
362 | 2
363 | 37
364 | 18
365 | 13
366 | 14
367 | 6
368 | 30
369 | 1
370 | 15
371 | 7
372 | 25
373 | 39
374 | 113
375 | 102
376 | 49
377 | 18
378 | 31
379 | 4
380 | 27
381 | 3
382 | 10
383 | 34
384 | 32
385 | 14
386 | 3
387 | 103
388 | 7
389 | 15
390 | 3
391 | 3
392 | 1
393 | 11
394 | 9
395 | 16
396 | 8
397 | 22
398 | 24
399 | 1
400 | 12
401 | 23
402 | 10
403 | 43
404 | 5
405 | 2
406 | 4
407 | 2
408 | 5
409 | 18
410 | 13
411 | 8
412 | 1
413 | 25
414 | 28
415 | 13
416 | 23
417 | 26
418 | 2
419 | 13
420 | 36
421 | 42
422 | 1
423 | 39
424 | 3
425 | 58
426 | 17
427 | 11
428 | 1
429 | 10
430 | 1
431 | 18
432 | 10
433 | 31
434 | 2
435 | 14
436 | 5
437 | 63
438 | 47
439 | 28
440 | 24
441 | 39
442 | 27
443 | 18
444 | 13
445 | 60
446 | 1
447 | 5
448 | 53
449 | 2
450 | 1
451 | 24
452 | 48
453 | 12
454 | 2
455 | 1
456 | 1
457 | 11
458 | 4
459 | 6
460 | 14
461 | 2
462 | 1
463 | 19
464 | 11
465 | 41
466 | 3
467 | 6
468 | 11
469 | 2
470 | 5
471 | 2
472 | 13
473 | 30
474 | 2
475 | 3
476 | 15
477 | 32
478 | 3
479 | 2
480 | 43
481 | 23
482 | 84
483 | 11
484 | 2
485 | 3
486 | 19
487 | 15
488 | 8
489 | 46
490 | 24
491 | 12
492 | 24
493 | 10
494 | 29
495 | 18
496 | 3
497 | 14
498 | 14
499 | 24
500 | 1
501 | 2
502 |
--------------------------------------------------------------------------------
/Result/DDPG_500_QoC.csv:
--------------------------------------------------------------------------------
1 | This includes the running steps:
2 | 500
3 | 500
4 | 218
5 | 500
6 | 500
7 | 62
8 | 437
9 | 500
10 | 68
11 | 500
12 | 467
13 | 349
14 | 150
15 | 218
16 | 49
17 | 303
18 | 382
19 | 145
20 | 199
21 | 500
22 | 152
23 | 231
24 | 33
25 | 62
26 | 455
27 | 451
28 | 62
29 | 396
30 | 500
31 | 73
32 | 102
33 | 74
34 | 72
35 | 202
36 | 13
37 | 217
38 | 64
39 | 11
40 | 104
41 | 223
42 | 306
43 | 49
44 | 37
45 | 114
46 | 283
47 | 15
48 | 496
49 | 246
50 | 96
51 | 9
52 | 96
53 | 219
54 | 6
55 | 378
56 | 238
57 | 18
58 | 43
59 | 19
60 | 22
61 | 45
62 | 84
63 | 161
64 | 84
65 | 267
66 | 9
67 | 111
68 | 6
69 | 40
70 | 21
71 | 114
72 | 136
73 | 7
74 | 43
75 | 113
76 | 342
77 | 94
78 | 50
79 | 29
80 | 187
81 | 63
82 | 216
83 | 12
84 | 21
85 | 13
86 | 52
87 | 341
88 | 17
89 | 201
90 | 348
91 | 35
92 | 5
93 | 5
94 | 73
95 | 69
96 | 285
97 | 56
98 | 6
99 | 236
100 | 92
101 | 500
102 | 6
103 | 29
104 | 54
105 | 34
106 | 158
107 | 6
108 | 37
109 | 6
110 | 33
111 | 175
112 | 8
113 | 4
114 | 15
115 | 88
116 | 241
117 | 68
118 | 5
119 | 131
120 | 88
121 | 15
122 | 36
123 | 126
124 | 310
125 | 15
126 | 11
127 | 10
128 | 121
129 | 128
130 | 75
131 | 5
132 | 52
133 | 48
134 | 35
135 | 63
136 | 380
137 | 35
138 | 8
139 | 282
140 | 87
141 | 76
142 | 93
143 | 36
144 | 25
145 | 102
146 | 6
147 | 55
148 | 107
149 | 93
150 | 45
151 | 90
152 | 247
153 | 45
154 | 19
155 | 164
156 | 5
157 | 30
158 | 10
159 | 215
160 | 203
161 | 55
162 | 81
163 | 58
164 | 54
165 | 60
166 | 78
167 | 146
168 | 2
169 | 35
170 | 263
171 | 105
172 | 69
173 | 67
174 | 76
175 | 11
176 | 2
177 | 20
178 | 26
179 | 224
180 | 47
181 | 104
182 | 30
183 | 41
184 | 97
185 | 100
186 | 89
187 | 74
188 | 18
189 | 2
190 | 29
191 | 11
192 | 34
193 | 55
194 | 40
195 | 59
196 | 221
197 | 454
198 | 48
199 | 10
200 | 40
201 | 79
202 | 42
203 | 34
204 | 83
205 | 69
206 | 20
207 | 108
208 | 125
209 | 22
210 | 75
211 | 252
212 | 93
213 | 263
214 | 6
215 | 10
216 | 133
217 | 23
218 | 61
219 | 210
220 | 102
221 | 19
222 | 16
223 | 64
224 | 96
225 | 11
226 | 4
227 | 85
228 | 6
229 | 77
230 | 41
231 | 12
232 | 51
233 | 32
234 | 248
235 | 17
236 | 53
237 | 118
238 | 53
239 | 68
240 | 23
241 | 56
242 | 187
243 | 22
244 | 2
245 | 4
246 | 399
247 | 11
248 | 22
249 | 140
250 | 45
251 | 23
252 | 18
253 | 51
254 | 38
255 | 4
256 | 9
257 | 3
258 | 18
259 | 106
260 | 23
261 | 62
262 | 96
263 | 255
264 | 44
265 | 14
266 | 46
267 | 101
268 | 32
269 | 15
270 | 32
271 | 21
272 | 111
273 | 8
274 | 19
275 | 42
276 | 138
277 | 22
278 | 46
279 | 62
280 | 11
281 | 43
282 | 86
283 | 317
284 | 252
285 | 27
286 | 19
287 | 66
288 | 170
289 | 2
290 | 151
291 | 98
292 | 77
293 | 117
294 | 28
295 | 5
296 | 27
297 | 241
298 | 24
299 | 56
300 | 7
301 | 66
302 | 213
303 | 105
304 | 1
305 | 28
306 | 38
307 | 233
308 | 23
309 | 25
310 | 16
311 | 138
312 | 58
313 | 11
314 | 57
315 | 86
316 | 8
317 | 6
318 | 2
319 | 37
320 | 79
321 | 104
322 | 53
323 | 6
324 | 22
325 | 34
326 | 30
327 | 42
328 | 98
329 | 4
330 | 7
331 | 70
332 | 15
333 | 80
334 | 24
335 | 2
336 | 44
337 | 96
338 | 9
339 | 139
340 | 118
341 | 9
342 | 68
343 | 133
344 | 18
345 | 3
346 | 27
347 | 27
348 | 44
349 | 62
350 | 31
351 | 1
352 | 216
353 | 227
354 | 92
355 | 35
356 | 35
357 | 44
358 | 8
359 | 177
360 | 24
361 | 12
362 | 3
363 | 2
364 | 13
365 | 1
366 | 6
367 | 100
368 | 28
369 | 32
370 | 112
371 | 127
372 | 83
373 | 6
374 | 215
375 | 18
376 | 213
377 | 79
378 | 51
379 | 8
380 | 120
381 | 46
382 | 67
383 | 5
384 | 16
385 | 73
386 | 1
387 | 25
388 | 119
389 | 20
390 | 87
391 | 19
392 | 17
393 | 37
394 | 27
395 | 61
396 | 40
397 | 8
398 | 68
399 | 68
400 | 124
401 | 42
402 | 107
403 | 201
404 | 54
405 | 32
406 | 91
407 | 6
408 | 2
409 | 95
410 | 94
411 | 15
412 | 142
413 | 92
414 | 282
415 | 285
416 | 44
417 | 38
418 | 19
419 | 41
420 | 6
421 | 1
422 | 73
423 | 34
424 | 181
425 | 13
426 | 39
427 | 209
428 | 59
429 | 267
430 | 5
431 | 8
432 | 172
433 | 31
434 | 36
435 | 69
436 | 47
437 | 145
438 | 11
439 | 147
440 | 82
441 | 32
442 | 3
443 | 90
444 | 86
445 | 6
446 | 14
447 | 35
448 | 202
449 | 143
450 | 63
451 | 26
452 | 12
453 | 42
454 | 4
455 | 50
456 | 60
457 | 140
458 | 2
459 | 73
460 | 44
461 | 85
462 | 5
463 | 7
464 | 164
465 | 89
466 | 3
467 | 27
468 | 74
469 | 26
470 | 168
471 | 38
472 | 30
473 | 10
474 | 84
475 | 19
476 | 8
477 | 29
478 | 108
479 | 25
480 | 4
481 | 78
482 | 65
483 | 52
484 | 16
485 | 1
486 | 127
487 | 54
488 | 31
489 | 84
490 | 5
491 | 1
492 | 33
493 | 188
494 | 117
495 | 40
496 | 69
497 | 153
498 | 56
499 | 3
500 | 39
501 | 55
502 |
--------------------------------------------------------------------------------
/Result/DQN.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LoveBUPT2018/joint-computation-offloading-and-resource-allocation/13e68b71c8e9ae7347a82294a355266c3ce28a81/Result/DQN.xlsx
--------------------------------------------------------------------------------
/Result/DQN_500.csv:
--------------------------------------------------------------------------------
1 | This includes the running steps:
2 | 500
3 | 500
4 | 500
5 | 500
6 | 500
7 | 500
8 | 500
9 | 500
10 | 500
11 | 500
12 | 300
13 | 500
14 | 500
15 | 500
16 | 286
17 | 284
18 | 500
19 | 19
20 | 488
21 | 483
22 | 67
23 | 500
24 | 37
25 | 195
26 | 87
27 | 36
28 | 104
29 | 124
30 | 160
31 | 105
32 | 118
33 | 145
34 | 131
35 | 126
36 | 145
37 | 34
38 | 288
39 | 96
40 | 100
41 | 159
42 | 11
43 | 280
44 | 120
45 | 288
46 | 67
47 | 395
48 | 142
49 | 35
50 | 106
51 | 37
52 | 69
53 | 66
54 | 54
55 | 100
56 | 94
57 | 14
58 | 113
59 | 86
60 | 378
61 | 146
62 | 34
63 | 16
64 | 103
65 | 42
66 | 19
67 | 80
68 | 45
69 | 99
70 | 60
71 | 32
72 | 45
73 | 128
74 | 27
75 | 71
76 | 139
77 | 104
78 | 269
79 | 151
80 | 205
81 | 19
82 | 109
83 | 62
84 | 119
85 | 21
86 | 236
87 | 71
88 | 7
89 | 18
90 | 130
91 | 138
92 | 115
93 | 51
94 | 40
95 | 110
96 | 62
97 | 28
98 | 55
99 | 168
100 | 126
101 | 7
102 | 44
103 | 230
104 | 179
105 | 46
106 | 46
107 | 155
108 | 6
109 | 22
110 | 83
111 | 32
112 | 7
113 | 14
114 | 33
115 | 275
116 | 42
117 | 63
118 | 11
119 | 125
120 | 102
121 | 126
122 | 209
123 | 31
124 | 16
125 | 19
126 | 188
127 | 127
128 | 36
129 | 264
130 | 210
131 | 93
132 | 3
133 | 22
134 | 58
135 | 12
136 | 62
137 | 22
138 | 9
139 | 20
140 | 156
141 | 53
142 | 48
143 | 25
144 | 36
145 | 22
146 | 31
147 | 74
148 | 218
149 | 19
150 | 172
151 | 38
152 | 106
153 | 21
154 | 52
155 | 158
156 | 36
157 | 39
158 | 118
159 | 56
160 | 134
161 | 189
162 | 24
163 | 19
164 | 24
165 | 19
166 | 23
167 | 58
168 | 9
169 | 46
170 | 29
171 | 199
172 | 14
173 | 3
174 | 29
175 | 11
176 | 181
177 | 4
178 | 9
179 | 4
180 | 92
181 | 212
182 | 6
183 | 44
184 | 36
185 | 52
186 | 62
187 | 4
188 | 103
189 | 3
190 | 12
191 | 3
192 | 29
193 | 4
194 | 109
195 | 146
196 | 4
197 | 65
198 | 32
199 | 188
200 | 14
201 | 152
202 | 37
203 | 3
204 | 25
205 | 169
206 | 55
207 | 100
208 | 85
209 | 3
210 | 170
211 | 140
212 | 17
213 | 60
214 | 16
215 | 9
216 | 55
217 | 44
218 | 6
219 | 59
220 | 24
221 | 55
222 | 86
223 | 108
224 | 14
225 | 6
226 | 49
227 | 30
228 | 10
229 | 266
230 | 19
231 | 52
232 | 46
233 | 120
234 | 54
235 | 115
236 | 11
237 | 5
238 | 9
239 | 57
240 | 312
241 | 114
242 | 27
243 | 4
244 | 5
245 | 3
246 | 55
247 | 9
248 | 32
249 | 16
250 | 127
251 | 14
252 | 81
253 | 84
254 | 11
255 | 86
256 | 21
257 | 5
258 | 42
259 | 124
260 | 43
261 | 125
262 | 111
263 | 149
264 | 17
265 | 5
266 | 76
267 | 111
268 | 64
269 | 54
270 | 106
271 | 34
272 | 6
273 | 69
274 | 22
275 | 105
276 | 45
277 | 23
278 | 20
279 | 13
280 | 199
281 | 49
282 | 106
283 | 87
284 | 6
285 | 14
286 | 174
287 | 50
288 | 8
289 | 29
290 | 15
291 | 153
292 | 17
293 | 17
294 | 62
295 | 160
296 | 31
297 | 48
298 | 35
299 | 1
300 | 77
301 | 75
302 | 8
303 | 30
304 | 259
305 | 28
306 | 36
307 | 77
308 | 43
309 | 15
310 | 72
311 | 24
312 | 92
313 | 83
314 | 1
315 | 50
316 | 123
317 | 74
318 | 31
319 | 7
320 | 3
321 | 148
322 | 19
323 | 17
324 | 18
325 | 3
326 | 10
327 | 144
328 | 7
329 | 26
330 | 14
331 | 44
332 | 34
333 | 2
334 | 142
335 | 3
336 | 14
337 | 55
338 | 3
339 | 14
340 | 132
341 | 19
342 | 98
343 | 10
344 | 49
345 | 34
346 | 73
347 | 60
348 | 20
349 | 11
350 | 134
351 | 17
352 | 7
353 | 9
354 | 19
355 | 22
356 | 27
357 | 2
358 | 4
359 | 38
360 | 59
361 | 34
362 | 6
363 | 69
364 | 4
365 | 1
366 | 294
367 | 123
368 | 59
369 | 31
370 | 32
371 | 53
372 | 142
373 | 30
374 | 43
375 | 146
376 | 107
377 | 2
378 | 22
379 | 15
380 | 60
381 | 6
382 | 15
383 | 52
384 | 105
385 | 15
386 | 68
387 | 128
388 | 32
389 | 9
390 | 107
391 | 78
392 | 57
393 | 29
394 | 63
395 | 15
396 | 89
397 | 13
398 | 14
399 | 121
400 | 87
401 | 29
402 | 4
403 | 29
404 | 24
405 | 14
406 | 38
407 | 62
408 | 12
409 | 33
410 | 110
411 | 31
412 | 61
413 | 7
414 | 55
415 | 126
416 | 17
417 | 32
418 | 6
419 | 45
420 | 9
421 | 8
422 | 195
423 | 9
424 | 18
425 | 7
426 | 64
427 | 21
428 | 90
429 | 20
430 | 170
431 | 38
432 | 3
433 | 6
434 | 37
435 | 48
436 | 48
437 | 10
438 | 102
439 | 79
440 | 33
441 | 1
442 | 210
443 | 19
444 | 72
445 | 11
446 | 27
447 | 44
448 | 65
449 | 25
450 | 24
451 | 10
452 | 83
453 | 45
454 | 6
455 | 91
456 | 7
457 | 3
458 | 147
459 | 136
460 | 102
461 | 36
462 | 13
463 | 8
464 | 3
465 | 15
466 | 12
467 | 29
468 | 64
469 | 55
470 | 60
471 | 10
472 | 33
473 | 6
474 | 7
475 | 3
476 | 24
477 | 1
478 | 8
479 | 41
480 | 123
481 | 37
482 | 46
483 | 21
484 | 33
485 | 62
486 | 26
487 | 19
488 | 37
489 | 14
490 | 33
491 | 63
492 | 64
493 | 37
494 | 34
495 | 10
496 | 69
497 | 87
498 | 18
499 | 12
500 | 92
501 | 78
502 |
--------------------------------------------------------------------------------
/Result/DQN_500_QoC.csv:
--------------------------------------------------------------------------------
1 | This includes the running steps:
2 | 32
3 | 167
4 | 94
5 | 75
6 | 170
7 | 186
8 | 52
9 | 141
10 | 4
11 | 60
12 | 102
13 | 74
14 | 25
15 | 239
16 | 33
17 | 275
18 | 283
19 | 230
20 | 500
21 | 0
22 | 61
23 | 44
24 | 45
25 | 54
26 | 60
27 | 280
28 | 173
29 | 476
30 | 126
31 | 153
32 | 19
33 | 17
34 | 160
35 | 29
36 | 254
37 | 42
38 | 443
39 | 13
40 | 23
41 | 500
42 | 471
43 | 500
44 | 70
45 | 8
46 | 27
47 | 240
48 | 248
49 | 49
50 | 402
51 | 500
52 | 112
53 | 35
54 | 117
55 | 51
56 | 206
57 | 1
58 | 83
59 | 44
60 | 56
61 | 2
62 | 161
63 | 11
64 | 85
65 | 125
66 | 19
67 | 207
68 | 342
69 | 22
70 | 95
71 | 53
72 | 73
73 | 79
74 | 20
75 | 17
76 | 56
77 | 32
78 | 67
79 | 112
80 | 206
81 | 48
82 | 122
83 | 8
84 | 3
85 | 4
86 | 27
87 | 64
88 | 30
89 | 214
90 | 28
91 | 38
92 | 133
93 | 27
94 | 26
95 | 83
96 | 12
97 | 5
98 | 59
99 | 40
100 | 53
101 | 11
102 | 38
103 | 14
104 | 82
105 | 52
106 | 45
107 | 62
108 | 5
109 | 3
110 | 36
111 | 5
112 | 9
113 | 25
114 | 40
115 | 33
116 | 20
117 | 4
118 | 20
119 | 83
120 | 14
121 | 45
122 | 39
123 | 21
124 | 3
125 | 76
126 | 25
127 | 85
128 | 20
129 | 24
130 | 10
131 | 4
132 | 68
133 | 3
134 | 1
135 | 3
136 | 57
137 | 11
138 | 2
139 | 23
140 | 20
141 | 5
142 | 33
143 | 81
144 | 103
145 | 28
146 | 13
147 | 52
148 | 13
149 | 17
150 | 43
151 | 14
152 | 14
153 | 48
154 | 83
155 | 7
156 | 10
157 | 39
158 | 49
159 | 35
160 | 52
161 | 7
162 | 63
163 | 23
164 | 6
165 | 3
166 | 16
167 | 5
168 | 4
169 | 0
170 | 17
171 | 45
172 | 5
173 | 12
174 | 26
175 | 3
176 | 60
177 | 7
178 | 2
179 | 76
180 | 3
181 | 3
182 | 18
183 | 2
184 | 62
185 | 21
186 | 121
187 | 17
188 | 6
189 | 25
190 | 78
191 | 8
192 | 5
193 | 7
194 | 85
195 | 14
196 | 2
197 | 5
198 | 27
199 | 14
200 | 39
201 | 66
202 | 7
203 | 15
204 | 10
205 | 4
206 | 8
207 | 16
208 | 8
209 | 11
210 | 27
211 | 29
212 | 43
213 | 14
214 | 3
215 | 9
216 | 2
217 | 30
218 | 4
219 | 2
220 | 16
221 | 11
222 | 11
223 | 6
224 | 37
225 | 59
226 | 7
227 | 30
228 | 18
229 | 11
230 | 21
231 | 12
232 | 7
233 | 2
234 | 2
235 | 7
236 | 1
237 | 17
238 | 54
239 | 51
240 | 19
241 | 2
242 | 0
243 | 19
244 | 77
245 | 19
246 | 32
247 | 8
248 | 41
249 | 3
250 | 15
251 | 1
252 | 20
253 | 40
254 | 4
255 | 68
256 | 41
257 | 42
258 | 5
259 | 12
260 | 3
261 | 4
262 | 15
263 | 13
264 | 36
265 | 24
266 | 74
267 | 3
268 | 20
269 | 5
270 | 35
271 | 3
272 | 3
273 | 3
274 | 19
275 | 4
276 | 7
277 | 7
278 | 53
279 | 3
280 | 32
281 | 19
282 | 3
283 | 24
284 | 20
285 | 64
286 | 12
287 | 9
288 | 25
289 | 21
290 | 5
291 | 8
292 | 19
293 | 4
294 | 9
295 | 27
296 | 41
297 | 3
298 | 48
299 | 17
300 | 47
301 | 5
302 | 1
303 | 9
304 | 8
305 | 54
306 | 79
307 | 9
308 | 1
309 | 35
310 | 6
311 | 30
312 | 1
313 | 8
314 | 15
315 | 2
316 | 19
317 | 3
318 | 12
319 | 8
320 | 1
321 | 16
322 | 2
323 | 22
324 | 9
325 | 1
326 | 38
327 | 16
328 | 8
329 | 12
330 | 9
331 | 28
332 | 5
333 | 1
334 | 1
335 | 19
336 | 50
337 | 18
338 | 12
339 | 26
340 | 4
341 | 2
342 | 1
343 | 49
344 | 3
345 | 18
346 | 7
347 | 18
348 | 18
349 | 12
350 | 2
351 | 1
352 | 1
353 | 30
354 | 13
355 | 2
356 | 24
357 | 13
358 | 5
359 | 30
360 | 53
361 | 12
362 | 72
363 | 21
364 | 1
365 | 5
366 | 3
367 | 20
368 | 7
369 | 45
370 | 3
371 | 5
372 | 4
373 | 3
374 | 1
375 | 8
376 | 30
377 | 18
378 | 26
379 | 5
380 | 3
381 | 35
382 | 9
383 | 54
384 | 12
385 | 1
386 | 20
387 | 8
388 | 35
389 | 3
390 | 11
391 | 7
392 | 4
393 | 2
394 | 13
395 | 9
396 | 18
397 | 1
398 | 12
399 | 6
400 | 13
401 | 1
402 | 25
403 | 23
404 | 15
405 | 43
406 | 41
407 | 10
408 | 1
409 | 18
410 | 17
411 | 2
412 | 37
413 | 22
414 | 55
415 | 1
416 | 28
417 | 60
418 | 28
419 | 2
420 | 18
421 | 3
422 | 19
423 | 1
424 | 3
425 | 32
426 | 19
427 | 28
428 | 58
429 | 4
430 | 1
431 | 22
432 | 74
433 | 1
434 | 2
435 | 2
436 | 13
437 | 10
438 | 2
439 | 7
440 | 25
441 | 6
442 | 4
443 | 25
444 | 25
445 | 7
446 | 10
447 | 1
448 | 1
449 | 14
450 | 2
451 | 3
452 | 3
453 | 1
454 | 10
455 | 7
456 | 27
457 | 43
458 | 25
459 | 19
460 | 28
461 | 7
462 | 44
463 | 4
464 | 7
465 | 2
466 | 16
467 | 6
468 | 16
469 | 12
470 | 1
471 | 38
472 | 53
473 | 52
474 | 2
475 | 9
476 | 4
477 | 0
478 | 19
479 | 1
480 | 1
481 | 8
482 | 76
483 | 48
484 | 2
485 | 17
486 | 6
487 | 10
488 | 6
489 | 23
490 | 17
491 | 47
492 | 38
493 | 16
494 | 39
495 | 7
496 | 30
497 | 26
498 | 3
499 | 8
500 | 16
501 | 9
502 |
--------------------------------------------------------------------------------
/Result/Episode.fig:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LoveBUPT2018/joint-computation-offloading-and-resource-allocation/13e68b71c8e9ae7347a82294a355266c3ce28a81/Result/Episode.fig
--------------------------------------------------------------------------------
/__pycache__/agent.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LoveBUPT2018/joint-computation-offloading-and-resource-allocation/13e68b71c8e9ae7347a82294a355266c3ce28a81/__pycache__/agent.cpython-37.pyc
--------------------------------------------------------------------------------
/__pycache__/dotdic.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LoveBUPT2018/joint-computation-offloading-and-resource-allocation/13e68b71c8e9ae7347a82294a355266c3ce28a81/__pycache__/dotdic.cpython-37.pyc
--------------------------------------------------------------------------------
/__pycache__/scenario.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/LoveBUPT2018/joint-computation-offloading-and-resource-allocation/13e68b71c8e9ae7347a82294a355266c3ce28a81/__pycache__/scenario.cpython-37.pyc
--------------------------------------------------------------------------------
/agent.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Author:fenghao
4 | Date:2020.2.14
5 | Location:BUPT
6 | """
7 |
8 | import copy
9 | import numpy as np
10 | from numpy import pi
11 | from collections import namedtuple
12 | from random import random, uniform, choice, randrange, sample
13 | import torch
14 | from torch import optim
15 | import torch.nn as nn
16 | from torch.autograd import Variable
17 | from torch.nn import functional as F
18 | from scenario import Scenario, MEC
19 |
20 | Transition = namedtuple('Transition',
21 | ('state', 'action', 'next_state', 'reward')) # Define a transition tuple
22 |
23 | class ReplayMemory(object): # Define a replay memory
24 |
25 | def __init__(self, capacity):
26 | self.capacity = capacity
27 | self.memory = []
28 | self.position = 0
29 |
30 | def Push(self, *args):
31 | if len(self.memory) < self.capacity:
32 | self.memory.append(None)
33 | self.memory[self.position] = Transition(*args)
34 | self.position = (self.position + 1) % self.capacity
35 |
36 | def Sample(self, batch_size):
37 | return sample(self.memory, batch_size)
38 |
39 | def __len__(self):
40 | return len(self.memory)
41 |
42 |
43 | class DNN(nn.Module): # Define a deep neural network
44 |
45 | def __init__(self, opt, sce, scenario): # Define the layers of the fully-connected hidden network
46 | super(DNN, self).__init__()
47 | self.input_layer = nn.Linear(opt.nagents, 64)
48 | self.middle1_layer = nn.Linear(64, 32)
49 | self.middle2_layer = nn.Linear(32, 32)
50 | self.output_layer = nn.Linear(32, scenario.MEC_Number() * sce.nRB)
51 |
52 | def forward(self, state): # Define the neural network forward function
53 | x1 = F.relu(self.input_layer(state))
54 | x2 = F.relu(self.middle1_layer(x1))
55 | x3 = F.relu(self.middle2_layer(x2))
56 | out = self.output_layer(x3)
57 | return out
58 |
59 |
60 | class Agent: # Define the agent (UE)
61 |
62 | def __init__(self, opt, sce, scenario, index, device): # Initialize the agent (UE)
63 | self.opt = opt
64 | self.sce = sce
65 | self.id = index
66 | self.device = device
67 | self.location = self.Set_Location(scenario)
68 | self.memory = ReplayMemory(opt.capacity)
69 | self.model_policy = DNN(opt, sce, scenario)
70 | self.model_target = DNN(opt, sce, scenario)
71 | self.model_target.load_state_dict(self.model_policy.state_dict())
72 | self.model_target.eval()
73 | self.optimizer = optim.RMSprop(params=self.model_policy.parameters(), lr=opt.learningrate, momentum=opt.momentum)
74 |
75 | def Set_Location(self, scenario): # Initialize the location of the agent
76 | Loc_MMEC, _ , _ = scenario.MEC_Location()
77 | Loc_agent = np.zeros(2)
78 | LocM = choice(Loc_MMEC)
79 | r = self.sce.rMMEC*random()
80 | theta = uniform(-pi,pi)
81 | Loc_agent[0] = LocM[0] + r*np.cos(theta)
82 | Loc_agent[1] = LocM[1] + r*np.sin(theta)
83 | return Loc_agent
84 |
85 | def Get_Location(self):
86 | return self.location
87 |
88 | def Select_Action(self, state, scenario, eps_threshold): # Select action for a user based on the network state
89 | L = scenario.MEC_Number() # The total number of MECs
90 | K = self.sce.nRB # The total number of RBs
91 | sample = random()
92 | if sample < eps_threshold: # epsilon-greeedy policy
93 | with torch.no_grad():
94 | Q_value = self.model_policy(state) # Get the Q_value from DNN
95 | action = Q_value.max(0)[1].view(1,1)
96 | else:
97 | action = torch.tensor([[randrange(L*K)]], dtype=torch.long)
98 | return action
99 |
100 | def Get_Reward(self, action, action_i, state, scenario): # Get reward for the state-action pair
101 | MEC = scenario.Get_MECs()
102 | L = scenario.MEC_Number() # The total number of MECs
103 | K = self.sce.nRB # The total number of RBs
104 |
105 | MEC_selected = action_i // K
106 | Ch_selected = action_i % K # Translate to the selected MEC and RB based on the selected action index
107 | Loc_diff = MEC[MEC_selected].Get_Location() - self.location
108 | distance = np.sqrt((Loc_diff[0]**2 + Loc_diff[1]**2)) # Calculate the distance between MEC and UE
109 | Rx_power = MEC[MEC_selected].Receive_Power(distance) # Calculate the received power
110 |
111 | if Rx_power == 0.0:
112 | reward = self.sce.negative_cost # Out of range of the selected MEC, thus obtain a negative reward
113 | QoS = 0 # Definitely, QoS cannot be satisfied
114 | else: # If inside the coverage, then we will calculate the reward value
115 | Interference = 0.0
116 | for i in range(self.opt.nagents): # Obtain interference on the same RB
117 | MEC_select_i = action[i] // K
118 | Ch_select_i = action[i] % K # The choice of other users
119 | if Ch_select_i == Ch_selected: # Calculate the interference on the same RB
120 | Loc_diff_i = MEC[MEC_select_i].Get_Location() - self.location
121 | distance_i = np.sqrt((Loc_diff_i[0]**2 + Loc_diff_i[1]**2))
122 | Rx_power_i = MEC[MEC_select_i].Receive_Power(distance_i)
123 | Interference += Rx_power_i # Sum all the interference
124 | Interference -= Rx_power # Remove the received power from interference
125 | Noise = 10**((self.sce.N0)/10)*self.sce.BW # Calculate the noise
126 | SINR = Rx_power/(Interference + Noise) # Calculate the SINR
127 | if SINR >= 10**(self.sce.QoS_thr/10):
128 | QoS = 1
129 | reward = 1
130 | else:
131 | QoS = 0
132 | reward = self.sce.negative_cost
133 | """Rate = self.sce.BW * np.log2(1 + SINR) / (10**6) # Calculate the rate of UE
134 | profit = self.sce.profit * Rate
135 | Tx_power_dBm = MEC[MEC_selected].Transmit_Power_dBm() # Calculate the transmit power of the selected MEC
136 | cost = self.sce.power_cost * Tx_power_dBm + self.sce.action_cost # Calculate the total cost
137 | reward = profit - cost """
138 | reward = torch.tensor([reward])
139 | return QoS, reward
140 |
141 | def Save_Transition(self, state, action, next_state, reward, scenario): # Store a transition
142 | L = scenario.MEC_Number() # The total number of MECs
143 | K = self.sce.nRB # The total number of RBs
144 | action = torch.tensor([[action]])
145 | reward = torch.tensor([reward])
146 | state = state.unsqueeze(0)
147 | next_state = next_state.unsqueeze(0)
148 | self.memory.Push(state, action, next_state, reward)
149 |
150 | def Target_Update(self): # Update the parameters of the target network
151 | self.model_target.load_state_dict(self.model_policy.state_dict())
152 |
153 | def Optimize_Model(self):
154 | if len(self.memory) < self.opt.batch_size:
155 | return
156 | transitions = self.memory.Sample(self.opt.batch_size)
157 | batch = Transition(*zip(*transitions))
158 | non_final_mask = torch.tensor(tuple(map(lambda s: s is not None,
159 | batch.next_state)), dtype=torch.bool)
160 | non_final_next_states = torch.cat([s for s in batch.next_state
161 | if s is not None])
162 | state_batch = torch.cat(batch.state)
163 | action_batch = torch.cat(batch.action)
164 | reward_batch = torch.cat(batch.reward)
165 | state_action_values = self.model_policy(state_batch).gather(1, action_batch)
166 | next_state_values = torch.zeros(self.opt.batch_size)
167 |
168 | next_action_batch = torch.unsqueeze(self.model_policy(non_final_next_states).max(1)[1], 1)
169 | next_state_values = self.model_target(non_final_next_states).gather(1, next_action_batch)
170 | expected_state_action_values = (next_state_values * self.opt.gamma) + reward_batch.unsqueeze(1)
171 | # Compute Huber loss
172 | loss = F.smooth_l1_loss(state_action_values, expected_state_action_values) # DDPG
173 | """
174 | next_state_values[non_final_mask] = self.model_target(non_final_next_states).max(1)[0].detach() # DDPG
175 | expected_state_action_values = (next_state_values * self.opt.gamma) + reward_batch
176 | # Compute Huber loss
177 | loss = F.smooth_l1_loss(state_action_values, expected_state_action_values.unsqueeze(1))
178 | """
179 | # Optimize the model
180 | self.optimizer.zero_grad()
181 | loss.backward()
182 | for param in self.model_policy.parameters():
183 | param.grad.data.clamp_(-1, 1)
184 | self.optimizer.step()
185 |
186 |
187 |
188 |
189 |
190 |
191 |
192 |
193 |
194 |
195 |
196 |
197 |
198 |
199 |
200 |
201 |
202 |
203 |
204 |
205 |
206 |
--------------------------------------------------------------------------------
/dotdic.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import copy
3 |
4 | class DotDic(dict):
5 | __getattr__ = dict.get
6 | __setattr__ = dict.__setitem__
7 | __delattr__ = dict.__delitem__
8 |
9 | def __deepcopy__(self, memo=None):
10 | return DotDic(copy.deepcopy(dict(self), memo=memo))
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 |
4 | import copy, json, argparse
5 | import torch
6 | from scenario import Scenario
7 | from agent import Agent
8 | from dotdic import DotDic
9 |
10 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
11 |
12 | def create_agents(opt, sce, scenario, device):
13 | agents = [] # Vector of agents
14 | for i in range(opt.nagents):
15 | agents.append(Agent(opt, sce, scenario, index=i, device=device)) # Initialization, create a CNet for each agent
16 | return agents
17 |
18 | def run_episodes(opt, sce, agents, scenario):
19 | global_step = 0
20 | nepisode = 0
21 | action = torch.zeros(opt.nagents,dtype=int)
22 | reward = torch.zeros(opt.nagents)
23 | QoS = torch.zeros(opt.nagents)
24 | state_target = torch.ones(opt.nagents) # The QoS requirement
25 | f= open("DDPG.csv","w+")
26 | f.write("This includes the running steps:\n")
27 | while nepisode < opt.nepisodes:
28 | state = torch.zeros(opt.nagents) # Reset the state
29 | next_state = torch.zeros(opt.nagents) # Reset the next_state
30 | nstep = 0
31 | while nstep < opt.nsteps:
32 | eps_threshold = opt.eps_min + opt.eps_increment * nstep * (nepisode + 1)
33 | if eps_threshold > opt.eps_max:
34 | eps_threshold = opt.eps_max # Linear increasing epsilon
35 | # eps_threshold = opt.eps_min + (opt.eps_max - opt.eps_min) * np.exp(-1. * nstep * (nepisode + 1)/opt.eps_decay)
36 | # Exponential decay epsilon
37 | for i in range(opt.nagents):
38 | action[i] = agents[i].Select_Action(state, scenario, eps_threshold) # Select action
39 | for i in range(opt.nagents):
40 | QoS[i], reward[i] = agents[i].Get_Reward(action, action[i], state, scenario) # Obtain reward and next state
41 | next_state[i] = QoS[i]
42 | for i in range(opt.nagents):
43 | agents[i].Save_Transition(state, action[i], next_state, reward[i], scenario) # Save the state transition
44 | agents[i].Optimize_Model() # Train the model
45 | if nstep % opt.nupdate == 0: # Update the target network for a period
46 | agents[i].Target_Update()
47 | state = copy.deepcopy(next_state) # State transits
48 | if torch.all(state.eq(state_target)): # If QoS is satisified, break
49 | break
50 | nstep += 1
51 | print('Episode Number:', nepisode, 'Training Step:', nstep)
52 | # print('Final State:', state)
53 | f.write("%i \n" % nstep)
54 | nepisode += 1
55 | f.close()
56 |
57 | def run_trial(opt, sce):
58 | scenario = Scenario(sce)
59 | agents = create_agents(opt, sce, scenario, device) # Initialization
60 | run_episodes(opt, sce, agents, scenario)
61 |
62 | if __name__ == '__main__':
63 | parser = argparse.ArgumentParser()
64 | parser.add_argument('-c1', '--config_path1', type=str, help='path to existing scenarios file')
65 | parser.add_argument('-c2', '--config_path2', type=str, help='path to existing options file')
66 | parser.add_argument('-n', '--ntrials', type=int, default=1, help='number of trials to run')
67 | args = parser.parse_args()
68 | sce = DotDic(json.loads(open(args.config_path1, 'r').read()))
69 | opt = DotDic(json.loads(open(args.config_path2, 'r').read())) # Load the configuration file as arguments
70 | for i in range(args.ntrials):
71 | trial_result_path = None
72 | trial_opt = copy.deepcopy(opt)
73 | trial_sce = copy.deepcopy(sce)
74 | run_trial(trial_opt, trial_sce)
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
--------------------------------------------------------------------------------
/scenario.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Thu Jan 30 11:51:49 2020
4 |
5 | @author: liangyu
6 |
7 | Create the network simulation scenario
8 | """
9 |
10 | import numpy as np
11 | from numpy import pi
12 | from random import random, uniform, choice
13 |
14 | class MEC: # Define the MEC
15 |
16 | def __init__(self, sce, MEC_index, MEC_type, MEC_Loc, MEC_Radius):
17 | self.sce = sce
18 | self.id = MEC_index
19 | self.MECtype = MEC_type
20 | self.MEC_Loc = MEC_Loc
21 | self.MEC_Radius = MEC_Radius
22 |
23 | def reset(self): # Reset the RB status
24 | self.Ch_State = np.zeros(self.sce.nRB)
25 |
26 | def Get_Location(self):
27 | return self.MEC_Loc
28 |
29 | def Transmit_Power_dBm(self): # Calculate the transmit power of a MEC
30 | if self.MECtype == "MMEC":
31 | Tx_Power_dBm = 40
32 | elif self.MECtype == "PMEC":
33 | Tx_Power_dBm = 30
34 | elif self.MECtype == "FMEC":
35 | Tx_Power_dBm = 20
36 | return Tx_Power_dBm # Transmit power in dBm, no consideration of power allocation now
37 |
38 | def Receive_Power(self, d): # Calculate the received power by transmit power and path loss of a certain MEC
39 | Tx_Power_dBm = self.Transmit_Power_dBm()
40 | if self.MECtype == "MMEC" or self.MECtype == "PMEC":
41 | loss = 34 + 40 * np.log10(d)
42 | elif self.MECtype == "FMEC":
43 | loss = 37 + 30 * np.log10(d)
44 | if d <= self.MEC_Radius:
45 | Rx_power_dBm = Tx_Power_dBm - loss # Received power in dBm
46 | Rx_power = 10**(Rx_power_dBm/10) # Received power in mW
47 | else:
48 | Rx_power = 0.0
49 | return Rx_power
50 |
51 |
52 | class Scenario: # Define the network scenario
53 |
54 | def __init__(self, sce): # Initialize the scenario we simulate
55 | self.sce = sce
56 | self.MECs = self.MEC_Init()
57 |
58 | def reset(self): # Reset the scenario we simulate
59 | for i in range(len(self.MECs)):
60 | self.MECs[i].reset()
61 |
62 | def MEC_Number(self):
63 | nMEC = self.sce.nMMEC + self.sce.nPMEC + self.sce.nFMEC # The number of MECs
64 | return nMEC
65 |
66 | def MEC_Location(self):
67 | Loc_MMEC = np.zeros((self.sce.nMMEC,2)) # Initialize the locations of MECs
68 | Loc_PMEC = np.zeros((self.sce.nPMEC,2))
69 | Loc_FMEC = np.zeros((self.sce.nFMEC,2))
70 |
71 | for i in range(self.sce.nMMEC):
72 | Loc_MMEC[i,0] = 500 + 900*i # x-coordinate
73 | Loc_MMEC[i,1] = 500 # y-coordinate
74 |
75 | for i in range(self.sce.nPMEC):
76 | Loc_PMEC[i,0] = Loc_MMEC[int(i/4),0] + 250*np.cos(pi/2*(i%4))
77 | Loc_PMEC[i,1] = Loc_MMEC[int(i/4),1] + 250*np.sin(pi/2*(i%4))
78 |
79 | for i in range(self.sce.nFMEC):
80 | LocM = choice(Loc_MMEC)
81 | r = self.sce.rMMEC*random()
82 | theta = uniform(-pi,pi)
83 | Loc_FMEC[i,0] = LocM[0] + r*np.cos(theta)
84 | Loc_FMEC[i,1] = LocM[1] + r*np.sin(theta)
85 |
86 | return Loc_MMEC, Loc_PMEC, Loc_FMEC
87 |
88 | def MEC_Init(self): # Initialize all the MECs
89 | MECs = [] # The vector of MECs
90 | Loc_MMEC, Loc_PMEC, Loc_FMEC = self.MEC_Location()
91 |
92 | for i in range(self.sce.nMMEC): # Initialize the MMECs
93 | MEC_index = i
94 | MEC_type = "MMEC"
95 | MEC_Loc = Loc_MMEC[i]
96 | MEC_Radius = self.sce.rMMEC
97 | MECs.append(MEC(self.sce, MEC_index, MEC_type, MEC_Loc, MEC_Radius))
98 |
99 | for i in range(self.sce.nPMEC):
100 | MEC_index = self.sce.nMMEC + i
101 | MEC_type = "PMEC"
102 | MEC_Loc = Loc_PMEC[i]
103 | MEC_Radius = self.sce.rPMEC
104 | MECs.append(MEC(self.sce, MEC_index, MEC_type, MEC_Loc, MEC_Radius))
105 |
106 | for i in range(self.sce.nFMEC):
107 | MEC_index = self.sce.nMMEC + self.sce.nPMEC + i
108 | MEC_type = "FMEC"
109 | MEC_Loc = Loc_FMEC[i]
110 | MEC_Radius = self.sce.rFMEC
111 | MECs.append(MEC(self.sce, MEC_index, MEC_type, MEC_Loc, MEC_Radius))
112 | return MECs
113 |
114 | def MECs(self):
115 | return self.MECs
116 |
117 |
118 |
119 |
120 |
121 |
122 |
--------------------------------------------------------------------------------