├── .idea
├── .gitignore
├── Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV.iml
├── Multihop-V2V.iml
├── deployment.xml
├── inspectionProfiles
│ ├── Project_Default.xml
│ └── profiles_settings.xml
├── misc.xml
├── modules.xml
└── vcs.xml
├── README.md
├── env
├── __init__.py
├── config.py
├── datastruct.py
├── environment.py
└── utils.py
└── methods
├── A3C
├── a3c.py
├── outputs
│ └── Multihop-V2V
│ │ ├── 20230926-211815
│ │ ├── models
│ │ │ └── a3c.pt
│ │ └── results
│ │ │ ├── config_params.txt
│ │ │ ├── params.txt
│ │ │ ├── record.txt
│ │ │ ├── train_completion_rate.npy
│ │ │ ├── train_completion_rate_curve.eps
│ │ │ ├── train_ma_completion_rate.npy
│ │ │ ├── train_ma_rewards.npy
│ │ │ ├── train_rewards.npy
│ │ │ └── train_rewards_curve.eps
│ │ ├── 20230927-153132
│ │ ├── models
│ │ │ └── a3c.pt
│ │ └── results
│ │ │ ├── config_params.txt
│ │ │ ├── params.txt
│ │ │ ├── record.txt
│ │ │ ├── train_completion_rate.npy
│ │ │ ├── train_completion_rate_curve.eps
│ │ │ ├── train_ma_completion_rate.npy
│ │ │ ├── train_ma_rewards.npy
│ │ │ ├── train_rewards.npy
│ │ │ └── train_rewards_curve.eps
│ │ ├── 20230927-153927
│ │ ├── models
│ │ │ └── a3c.pt
│ │ └── results
│ │ │ ├── config_params.txt
│ │ │ ├── params.txt
│ │ │ ├── record.txt
│ │ │ ├── train_completion_rate.npy
│ │ │ ├── train_completion_rate_curve.eps
│ │ │ ├── train_ma_completion_rate.npy
│ │ │ ├── train_ma_rewards.npy
│ │ │ ├── train_rewards.npy
│ │ │ └── train_rewards_curve.eps
│ │ ├── 20230928-110901
│ │ ├── models
│ │ │ └── a3c.pt
│ │ └── results
│ │ │ ├── params.txt
│ │ │ ├── train_completion_rate.npy
│ │ │ ├── train_completion_rate_curve.eps
│ │ │ ├── train_ma_completion_rate.npy
│ │ │ ├── train_ma_rewards.npy
│ │ │ ├── train_rewards.npy
│ │ │ └── train_rewards_curve.eps
│ │ ├── 20230930-114045
│ │ ├── models
│ │ │ └── a3c.pt
│ │ └── results
│ │ │ ├── params.txt
│ │ │ ├── train_completion_rate.npy
│ │ │ ├── train_completion_rate_curve.eps
│ │ │ ├── train_ma_completion_rate.npy
│ │ │ ├── train_ma_rewards.npy
│ │ │ ├── train_rewards.npy
│ │ │ └── train_rewards_curve.eps
│ │ ├── 20231002-214935
│ │ ├── models
│ │ │ └── a3c.pt
│ │ └── results
│ │ │ ├── config_params.txt
│ │ │ ├── params.txt
│ │ │ ├── train_completion_rate.npy
│ │ │ ├── train_completion_rate_curve.eps
│ │ │ ├── train_ma_completion_rate.npy
│ │ │ ├── train_ma_rewards.npy
│ │ │ ├── train_rewards.npy
│ │ │ └── train_rewards_curve.eps
│ │ ├── 20231004-150720
│ │ ├── models
│ │ │ └── a3c.pt
│ │ └── results
│ │ │ ├── params.txt
│ │ │ ├── train_completion_rate.npy
│ │ │ ├── train_completion_rate_curve.eps
│ │ │ ├── train_ma_completion_rate.npy
│ │ │ ├── train_ma_rewards.npy
│ │ │ ├── train_rewards.npy
│ │ │ └── train_rewards_curve.eps
│ │ ├── 20231004-162835
│ │ └── results
│ │ │ ├── train_completion_rate.npy
│ │ │ ├── train_completion_rate_curve.eps
│ │ │ ├── train_ma_completion_rate.npy
│ │ │ ├── train_ma_rewards.npy
│ │ │ ├── train_rewards.npy
│ │ │ └── train_rewards_curve.eps
│ │ ├── 20231005-153812
│ │ └── results
│ │ │ ├── train_completion_rate.npy
│ │ │ ├── train_completion_rate_curve.eps
│ │ │ ├── train_ma_completion_rate.npy
│ │ │ ├── train_ma_rewards.npy
│ │ │ ├── train_rewards.npy
│ │ │ └── train_rewards_curve.eps
│ │ ├── 20231005-192015
│ │ ├── models
│ │ │ └── a3c.pt
│ │ └── results
│ │ │ ├── config_params.txt
│ │ │ ├── params.txt
│ │ │ ├── record.txt
│ │ │ ├── train_completion_rate.npy
│ │ │ ├── train_completion_rate_curve.eps
│ │ │ ├── train_ma_completion_rate.npy
│ │ │ ├── train_ma_rewards.npy
│ │ │ ├── train_rewards.npy
│ │ │ └── train_rewards_curve.eps
│ │ ├── 20231008-130030
│ │ ├── models
│ │ │ └── a3c.pt
│ │ └── results
│ │ │ ├── params.txt
│ │ │ ├── train_completion_rate.npy
│ │ │ ├── train_completion_rate_curve.eps
│ │ │ ├── train_ma_completion_rate.npy
│ │ │ ├── train_ma_rewards.npy
│ │ │ ├── train_rewards.npy
│ │ │ └── train_rewards_curve.eps
│ │ ├── 20231011-143350
│ │ ├── models
│ │ │ └── a3c.pt
│ │ └── results
│ │ │ ├── params.txt
│ │ │ ├── train_completion_rate.npy
│ │ │ ├── train_completion_rate_curve.eps
│ │ │ ├── train_ma_completion_rate.npy
│ │ │ ├── train_ma_rewards.npy
│ │ │ ├── train_rewards.npy
│ │ │ └── train_rewards_curve.eps
│ │ ├── 20231011-185838
│ │ ├── models
│ │ │ └── a3c.pt
│ │ └── results
│ │ │ ├── params.txt
│ │ │ ├── train_completion_rate.npy
│ │ │ ├── train_completion_rate_curve.eps
│ │ │ ├── train_ma_completion_rate.npy
│ │ │ ├── train_ma_rewards.npy
│ │ │ ├── train_rewards.npy
│ │ │ └── train_rewards_curve.eps
│ │ ├── 20231012-213018
│ │ ├── models
│ │ │ └── a3c.pt
│ │ └── results
│ │ │ ├── params.txt
│ │ │ ├── train_completion_rate.npy
│ │ │ ├── train_completion_rate_curve.eps
│ │ │ ├── train_ma_completion_rate.npy
│ │ │ ├── train_ma_rewards.npy
│ │ │ ├── train_rewards.npy
│ │ │ └── train_rewards_curve.eps
│ │ ├── 20231013-085149
│ │ ├── models
│ │ │ └── a3c.pt
│ │ └── results
│ │ │ ├── params.txt
│ │ │ ├── train_completion_rate.npy
│ │ │ ├── train_completion_rate_curve.eps
│ │ │ ├── train_ma_completion_rate.npy
│ │ │ ├── train_ma_rewards.npy
│ │ │ ├── train_rewards.npy
│ │ │ └── train_rewards_curve.eps
│ │ ├── 20231013-145006
│ │ ├── models
│ │ │ └── a3c.pt
│ │ └── results
│ │ │ ├── params.txt
│ │ │ ├── train_completion_rate.npy
│ │ │ ├── train_completion_rate_curve.eps
│ │ │ ├── train_ma_completion_rate.npy
│ │ │ ├── train_ma_rewards.npy
│ │ │ ├── train_rewards.npy
│ │ │ └── train_rewards_curve.eps
│ │ ├── 20231013-170138
│ │ ├── models
│ │ │ └── a3c.pt
│ │ └── results
│ │ │ ├── params.txt
│ │ │ ├── train_completion_rate.npy
│ │ │ ├── train_completion_rate_curve.eps
│ │ │ ├── train_ma_completion_rate.npy
│ │ │ ├── train_ma_rewards.npy
│ │ │ ├── train_rewards.npy
│ │ │ └── train_rewards_curve.eps
│ │ ├── 20231013-203310
│ │ ├── models
│ │ │ └── a3c.pt
│ │ └── results
│ │ │ ├── params.txt
│ │ │ ├── train_completion_rate.npy
│ │ │ ├── train_completion_rate_curve.eps
│ │ │ ├── train_ma_completion_rate.npy
│ │ │ ├── train_ma_rewards.npy
│ │ │ ├── train_rewards.npy
│ │ │ └── train_rewards_curve.eps
│ │ ├── 20231017-090344
│ │ ├── models
│ │ │ └── a3c.pt
│ │ └── results
│ │ │ ├── params.txt
│ │ │ ├── train_completion_rate.npy
│ │ │ ├── train_completion_rate_curve.eps
│ │ │ ├── train_ma_completion_rate.npy
│ │ │ ├── train_ma_rewards.npy
│ │ │ ├── train_rewards.npy
│ │ │ └── train_rewards_curve.eps
│ │ ├── 20231025-084607
│ │ ├── models
│ │ │ └── a3c.pt
│ │ └── results
│ │ │ ├── params.txt
│ │ │ ├── train_completion_rate.npy
│ │ │ ├── train_completion_rate_curve.eps
│ │ │ ├── train_ma_completion_rate.npy
│ │ │ ├── train_ma_rewards.npy
│ │ │ ├── train_rewards.npy
│ │ │ └── train_rewards_curve.eps
│ │ ├── 20231107-153511
│ │ ├── models
│ │ │ └── a3c.pt
│ │ └── results
│ │ │ └── params.txt
│ │ ├── 20231115-085643
│ │ ├── models
│ │ │ └── a3c.pt
│ │ └── results
│ │ │ ├── params.txt
│ │ │ ├── train_completion_rate.npy
│ │ │ ├── train_completion_rate_curve.eps
│ │ │ ├── train_ma_completion_rate.npy
│ │ │ ├── train_ma_rewards.npy
│ │ │ ├── train_rewards.npy
│ │ │ └── train_rewards_curve.eps
│ │ ├── 20231120-093833
│ │ ├── models
│ │ │ └── a3c.pt
│ │ └── results
│ │ │ ├── params.txt
│ │ │ ├── train_completion_rate.npy
│ │ │ ├── train_completion_rate_curve.eps
│ │ │ ├── train_ma_completion_rate.npy
│ │ │ ├── train_ma_rewards.npy
│ │ │ ├── train_rewards.npy
│ │ │ └── train_rewards_curve.eps
│ │ ├── 20231122-103437
│ │ ├── models
│ │ │ └── a3c.pt
│ │ └── results
│ │ │ ├── params.txt
│ │ │ ├── train_completion_rate.npy
│ │ │ ├── train_completion_rate_curve.eps
│ │ │ ├── train_ma_completion_rate.npy
│ │ │ ├── train_ma_rewards.npy
│ │ │ ├── train_rewards.npy
│ │ │ └── train_rewards_curve.eps
│ │ └── 20231123-164321
│ │ ├── models
│ │ └── a3c.pt
│ │ └── results
│ │ ├── params.txt
│ │ ├── train_completion_rate.npy
│ │ ├── train_completion_rate_curve.eps
│ │ ├── train_ma_completion_rate.npy
│ │ ├── train_ma_rewards.npy
│ │ ├── train_rewards.npy
│ │ └── train_rewards_curve.eps
└── run_A3C.py
├── DQN
├── README.md
├── dqn.py
├── outputs
│ └── Multihop-V2V
│ │ ├── 20231009-101444
│ │ └── results
│ │ │ ├── train_completion_rate.npy
│ │ │ ├── train_completion_rate_curve.eps
│ │ │ ├── train_ma_completion_rate.npy
│ │ │ ├── train_ma_rewards.npy
│ │ │ ├── train_rewards.npy
│ │ │ └── train_rewards_curve.eps
│ │ ├── 20231009-102149
│ │ └── results
│ │ │ ├── train_completion_rate.npy
│ │ │ ├── train_completion_rate_curve.eps
│ │ │ ├── train_ma_completion_rate.npy
│ │ │ ├── train_ma_rewards.npy
│ │ │ ├── train_rewards.npy
│ │ │ └── train_rewards_curve.eps
│ │ ├── 20231009-102312
│ │ └── results
│ │ │ ├── train_completion_rate.npy
│ │ │ ├── train_completion_rate_curve.eps
│ │ │ ├── train_ma_completion_rate.npy
│ │ │ ├── train_ma_rewards.npy
│ │ │ ├── train_rewards.npy
│ │ │ └── train_rewards_curve.eps
│ │ ├── 20231011-143243
│ │ └── results
│ │ │ ├── train_completion_rate.npy
│ │ │ ├── train_completion_rate_curve.eps
│ │ │ ├── train_ma_completion_rate.npy
│ │ │ ├── train_ma_rewards.npy
│ │ │ ├── train_rewards.npy
│ │ │ └── train_rewards_curve.eps
│ │ ├── 20231013-084908
│ │ └── results
│ │ │ ├── train_completion_rate.npy
│ │ │ ├── train_completion_rate_curve.eps
│ │ │ ├── train_ma_completion_rate.npy
│ │ │ ├── train_ma_rewards.npy
│ │ │ ├── train_rewards.npy
│ │ │ └── train_rewards_curve.eps
│ │ ├── 20231016-141744
│ │ └── results
│ │ │ ├── train_completion_rate.npy
│ │ │ ├── train_completion_rate_curve.eps
│ │ │ ├── train_ma_completion_rate.npy
│ │ │ ├── train_ma_rewards.npy
│ │ │ ├── train_rewards.npy
│ │ │ └── train_rewards_curve.eps
│ │ ├── 20231018-102922
│ │ └── results
│ │ │ ├── train_completion_rate.npy
│ │ │ ├── train_completion_rate_curve.eps
│ │ │ ├── train_ma_completion_rate.npy
│ │ │ ├── train_ma_rewards.npy
│ │ │ ├── train_rewards.npy
│ │ │ └── train_rewards_curve.eps
│ │ ├── 20231020-192110
│ │ └── results
│ │ │ ├── train_completion_rate.npy
│ │ │ ├── train_completion_rate_curve.eps
│ │ │ ├── train_ma_completion_rate.npy
│ │ │ ├── train_ma_rewards.npy
│ │ │ ├── train_rewards.npy
│ │ │ └── train_rewards_curve.eps
│ │ ├── 20231023-092959
│ │ └── results
│ │ │ ├── train_completion_rate.npy
│ │ │ ├── train_completion_rate_curve.eps
│ │ │ ├── train_ma_completion_rate.npy
│ │ │ ├── train_ma_rewards.npy
│ │ │ ├── train_rewards.npy
│ │ │ └── train_rewards_curve.eps
│ │ ├── 20231025-084607
│ │ └── results
│ │ │ ├── train_completion_rate.npy
│ │ │ ├── train_completion_rate_curve.eps
│ │ │ ├── train_ma_completion_rate.npy
│ │ │ ├── train_ma_rewards.npy
│ │ │ ├── train_rewards.npy
│ │ │ └── train_rewards_curve.eps
│ │ ├── 20231115-085643
│ │ └── results
│ │ │ ├── train_completion_rate.npy
│ │ │ ├── train_completion_rate_curve.eps
│ │ │ ├── train_ma_completion_rate.npy
│ │ │ ├── train_ma_rewards.npy
│ │ │ ├── train_rewards.npy
│ │ │ └── train_rewards_curve.eps
│ │ ├── 20231122-103436
│ │ └── results
│ │ │ ├── train_completion_rate.npy
│ │ │ ├── train_completion_rate_curve.eps
│ │ │ ├── train_ma_completion_rate.npy
│ │ │ ├── train_ma_rewards.npy
│ │ │ ├── train_rewards.npy
│ │ │ └── train_rewards_curve.eps
│ │ └── 20231123-161552
│ │ └── results
│ │ ├── train_completion_rate.npy
│ │ ├── train_completion_rate_curve.eps
│ │ ├── train_ma_completion_rate.npy
│ │ ├── train_ma_rewards.npy
│ │ ├── train_rewards.npy
│ │ └── train_rewards_curve.eps
└── run_DQN.py
├── Draw_pictures
├── A3C_completion_rate.pdf
├── A3C_completion_rate_lr.pdf
├── A3C_rewards.pdf
├── A3C_rewards_lr.pdf
├── contrast_completion_rate.pdf
├── contrast_rewards.pdf
├── different_task_computation_resource_average_rewards.pdf
├── different_tasksize_average_rewards.pdf
├── different_vehicle_number_average_rewards.pdf
├── different_vehicle_speed_average_rewards.pdf
└── draw.py
└── Greedy
├── greedy_tasksize.py
├── outputs
└── Multihop-V2V
│ ├── 20231005-192011
│ └── results
│ │ ├── train_completion_rate.npy
│ │ ├── train_completion_rate_curve.eps
│ │ ├── train_ma_completion_rate.npy
│ │ ├── train_ma_rewards.npy
│ │ ├── train_rewards.npy
│ │ └── train_rewards_curve.eps
│ ├── 20231008-130015
│ └── results
│ │ ├── train_completion_rate.npy
│ │ ├── train_completion_rate_curve.eps
│ │ ├── train_ma_completion_rate.npy
│ │ ├── train_ma_rewards.npy
│ │ ├── train_rewards.npy
│ │ └── train_rewards_curve.eps
│ ├── 20231016-193257
│ └── results
│ │ ├── train_completion_rate.npy
│ │ ├── train_completion_rate_curve.eps
│ │ ├── train_ma_completion_rate.npy
│ │ ├── train_ma_rewards.npy
│ │ ├── train_rewards.npy
│ │ └── train_rewards_curve.eps
│ ├── 20231020-215837
│ └── results
│ │ ├── train_completion_rate.npy
│ │ ├── train_completion_rate_curve.eps
│ │ ├── train_ma_completion_rate.npy
│ │ ├── train_ma_rewards.npy
│ │ ├── train_rewards.npy
│ │ └── train_rewards_curve.eps
│ ├── 20231025-140726
│ └── results
│ │ ├── train_completion_rate.npy
│ │ ├── train_completion_rate_curve.eps
│ │ ├── train_ma_completion_rate.npy
│ │ ├── train_ma_rewards.npy
│ │ ├── train_rewards.npy
│ │ └── train_rewards_curve.eps
│ ├── 20231025-152310
│ └── results
│ │ ├── train_completion_rate.npy
│ │ ├── train_completion_rate_curve.eps
│ │ ├── train_ma_completion_rate.npy
│ │ ├── train_ma_rewards.npy
│ │ ├── train_rewards.npy
│ │ └── train_rewards_curve.eps
│ ├── 20231105-092657
│ └── results
│ │ ├── train_completion_rate.npy
│ │ ├── train_completion_rate_curve.eps
│ │ ├── train_ma_completion_rate.npy
│ │ ├── train_ma_rewards.npy
│ │ ├── train_rewards.npy
│ │ └── train_rewards_curve.eps
│ ├── 20231105-150100
│ └── results
│ │ ├── train_completion_rate.npy
│ │ ├── train_completion_rate_curve.eps
│ │ ├── train_ma_completion_rate.npy
│ │ ├── train_ma_rewards.npy
│ │ ├── train_rewards.npy
│ │ └── train_rewards_curve.eps
│ ├── 20231106-084841
│ └── results
│ │ ├── train_completion_rate.npy
│ │ ├── train_completion_rate_curve.eps
│ │ ├── train_ma_completion_rate.npy
│ │ ├── train_ma_rewards.npy
│ │ ├── train_rewards.npy
│ │ └── train_rewards_curve.eps
│ ├── 20231106-143001
│ └── results
│ │ ├── train_completion_rate.npy
│ │ ├── train_completion_rate_curve.eps
│ │ ├── train_ma_completion_rate.npy
│ │ ├── train_ma_rewards.npy
│ │ ├── train_rewards.npy
│ │ └── train_rewards_curve.eps
│ ├── 20231106-182637
│ └── results
│ │ ├── train_completion_rate.npy
│ │ ├── train_completion_rate_curve.eps
│ │ ├── train_ma_completion_rate.npy
│ │ ├── train_ma_rewards.npy
│ │ ├── train_rewards.npy
│ │ └── train_rewards_curve.eps
│ ├── 20231107-103741
│ └── results
│ │ ├── train_completion_rate.npy
│ │ ├── train_completion_rate_curve.eps
│ │ ├── train_ma_completion_rate.npy
│ │ ├── train_ma_rewards.npy
│ │ ├── train_rewards.npy
│ │ └── train_rewards_curve.eps
│ ├── 20231107-161928
│ └── results
│ │ ├── train_completion_rate.npy
│ │ ├── train_completion_rate_curve.eps
│ │ ├── train_ma_completion_rate.npy
│ │ ├── train_ma_rewards.npy
│ │ ├── train_rewards.npy
│ │ └── train_rewards_curve.eps
│ ├── 20231108-082916
│ └── results
│ │ ├── train_completion_rate.npy
│ │ ├── train_completion_rate_curve.eps
│ │ ├── train_ma_completion_rate.npy
│ │ ├── train_ma_rewards.npy
│ │ ├── train_rewards.npy
│ │ └── train_rewards_curve.eps
│ ├── 20231108-091912
│ └── results
│ │ ├── train_completion_rate.npy
│ │ ├── train_completion_rate_curve.eps
│ │ ├── train_ma_completion_rate.npy
│ │ ├── train_ma_rewards.npy
│ │ ├── train_rewards.npy
│ │ └── train_rewards_curve.eps
│ ├── 20231108-102015
│ └── results
│ │ ├── train_completion_rate.npy
│ │ ├── train_completion_rate_curve.eps
│ │ ├── train_ma_completion_rate.npy
│ │ ├── train_ma_rewards.npy
│ │ ├── train_rewards.npy
│ │ └── train_rewards_curve.eps
│ └── 20231115-085643
│ └── results
│ ├── train_completion_rate.npy
│ ├── train_completion_rate_curve.eps
│ ├── train_ma_completion_rate.npy
│ ├── train_ma_rewards.npy
│ ├── train_rewards.npy
│ └── train_rewards_curve.eps
└── run_greedy.py
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /shelf/
3 | /workspace.xml
4 | # Datasource local storage ignored files
5 | /dataSources/
6 | /dataSources.local.xml
7 | # Editor-based HTTP Client requests
8 | /httpRequests/
9 |
--------------------------------------------------------------------------------
/.idea/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/Multihop-V2V.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/.idea/deployment.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/Project_Default.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/profiles_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks
2 | #This is the runnable Python project for the paper 'Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks'.
3 |
4 | './env/config' contains parameter configurations.
5 |
6 | './env/datastruct' contains the data struct of the project.
7 |
8 | './env/environment' contains environment settings.
9 |
10 | './methods/A3C' contains A3C algorithm.
11 |
12 | './methods/DQN' contains DQN algorithm.
13 |
14 | './methods/Greedy' contains Greedy algorithm.
15 |
16 |
17 |
18 |
19 |
20 |
--------------------------------------------------------------------------------
/env/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/env/__init__.py
--------------------------------------------------------------------------------
/env/config.py:
--------------------------------------------------------------------------------
1 | import dataclasses
2 | # 旨在简化数据类的定义,减少样板代码,并提供更好的代码可读性。这对于处理大量数据对象的情况特别有用。
3 | import numpy as np
4 |
5 |
6 | @dataclasses.dataclass
7 | class VehicularEnvConfig:
8 | def __init__(self):
9 | # 道路信息
10 | self.road_range: int = 1200 # 道路长度
11 | self.road_width: int = 50 # 道路宽度
12 |
13 | # 时间信息
14 | self.time_slot_start: int = 0
15 | self.time_slot_end: int = 99
16 |
17 | # 任务信息相关(要处理的任务)
18 | self.Function_min_task_datasize=2#
19 | self.Function_max_task_datasize = 5 #
20 | self.Function_task_computing_resource: float = 300 # 任务计算资源300cycles/bit
21 | self.Function_min_task_delay: int = 20 # 任务的最小延迟20s
22 | self.Function_max_task_delay: int = 25 # 任务的最大延迟25s
23 |
24 | # 任务队列相关(每个卸载对象自己产生的任务,即自身到达任务)
25 | self.min_rsu_task_number: int = 2 #RSU最小任务个数
26 | self.max_rsu_task_number: int = 3 #RSU最大任务个数
27 | self.min_vehicle_task_number: int = 4 #车辆最小任务个数,用于生成初始任务的个数
28 | self.max_vehicle_task_number: int = 5 #车辆最大任务个数,用于生成初始任务的个数
29 | self.min_task_datasize: float = 2 # 2 MB 每个任务的最小数据大小
30 | self.max_task_datasize: float = 4 # 4 MB 每个任务的最大数据大小
31 |
32 | # 车辆相关
33 | self.min_vehicle_speed: int = 30 #车辆行驶的最小速度
34 | self.max_vehicle_speed: int = 40 #车辆行驶的最大速度
35 | self.min_vehicle_compute_ability: float =20000 #最小计算能力20000Mcycles/s
36 | self.max_vehicle_compute_ability: float =25000 #最大计算能力40000Mcycles/s
37 | self.vehicle_number = 10 #车辆个数
38 | self.seed = 1 #随机种子
39 | self.min_vehicle_y_initial_location:float =0 #y坐标最小值
40 | self.max_vehicle_y_initial_location: float =50 #y坐标最大值
41 | self.vehicle_x_initial_location:list=[0,self.road_range]#x坐标初始值
42 | # RSU相关
43 | self.rsu_number = 3 #RSU的个数
44 | self.min_rsu_compute_ability: float = 25000 # 最小计算能力25000Mcycles/s
45 | self.max_rsu_compute_ability: float = 30000 # 最大计算能力30000Mcycles/s
46 | # self._rsu_x_location: dict = {"rsu_1": 200, "rsu_2": 600, "rsu_3": 1000}
47 | # self._rsu_y_location: dict = {"rsu_1": 50, "rsu_2": 50, "rsu_3": 50}
48 |
49 | # 通信相关
50 | self.rsu_range:int =400 #RSU通信距离400m
51 | self.vehicle_range: int = 200 #车辆通信距离200m
52 | self.r2v_B:float=20 #R2V带宽:20Mbps
53 | self.v2v_B:float=40#V2V带宽:40Mbps
54 | self.rsu_p:float=50 #RSU发射功率:50w
55 | self.vehicle_p:float=10 #车发射功率: 10w
56 | self.w:float=0.001 #噪声功率𝜔:0.001 W/Hz
57 | self.k:float=30 #固定损耗𝐾:20-40db取30
58 | self.theta:int=2 #路径损耗因子𝜎:2-6取2
59 | self.r2r_onehop_time:float=8#r2r一跳传输时间8s
60 | self.c2r_rate:float=0.2#C-R传输速率:0.2mb/s
61 | # self.cloud_compute_ability:float=1800 #cloud计算能力15000Mcycles/s
62 | self.min_transfer_rate:float=0.01 #最小传输速率:0.01mb/s
63 | self.rsu_connect_time:float=10000 #RSU之间的联通时间
64 | self.cloud_connect_time:float=10000 #R2C的连通时间
65 |
66 | #惩罚
67 | self.punishment=-200
68 |
69 | #环境相关
70 | self.action_size=(self.rsu_number+self.vehicle_number+1)** 3#动作空间
71 | # 状态空间的最大值
72 | self.high = np.array([np.finfo(np.float32).max for _ in range(self.rsu_number+self.vehicle_number)])
73 | # 状态空间的最小值
74 | self.low = np.array([0 for _ in range(self.rsu_number+self.vehicle_number)])
75 |
--------------------------------------------------------------------------------
/env/datastruct.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from typing import List # -> List[int] 指示应返回整数列表。
3 |
4 |
5 | #获取任务属性的类:获取任务信息大小,计算能力:每bit所需转数,任务延迟约束
6 | class Function(object):
7 | """任务属性及其操作"""
8 | #就是一个任务的三元数组
9 |
10 | def __init__(
11 | self,
12 | Function_task_datasize: float,
13 | Function_task_computing_resource: float,
14 | Function_task_delay: int
15 |
16 | ) -> None:
17 | self._Function_task_datasize = Function_task_datasize #感知任务的大小
18 | self._Function_task_computing_resource = Function_task_computing_resource #感知任务每bit的所需计算资源
19 | self._Function_task_delay = Function_task_delay #感知任务的延迟
20 | def get_task_datasize(self) -> float:
21 | return float(self._Function_task_datasize)
22 |
23 | def get_task_computing_resource(self) -> float:
24 | return float(self._Function_task_computing_resource)
25 |
26 | def get_task_delay(self) -> float:
27 | return float(self._Function_task_delay)
28 |
29 |
30 |
31 | ########################################################################
32 | #操作队列的类:这个类的构造函数初始化了任务队列的属性,包括任务数量、任务大小范围、和随机数生成的种子
33 | #功能有:获取任务列表,返回节点的总任务量,任务列表增加任务,自己产生任务(随着时间变化),处理任务(随着时间变化)
34 | class TaskList(object):
35 | """节点上的待执行任务队列属性及操作"""
36 |
37 | def __init__(
38 | self,
39 | task_number: int, #节点上待执行任务的个数
40 | minimum_datasize: float, #节点上待执行任务大小的最小值
41 | maximum_datasize: float #节点上待执行任务大小的最小值
42 | # seed: int
43 | ) -> None:
44 | self._task_number = task_number
45 | self._minimum_datasize = minimum_datasize
46 | self._maximum_datasize = maximum_datasize
47 | # self._seed=seed
48 |
49 |
50 | # 生成每个任务的数据量大小
51 | # np.random.seed(self._seed) #设置种子确保每次使用相同的种子值运行代码时,都会得到相同的随机数序列。
52 | self._datasizes = np.random.uniform(self._minimum_datasize, self._maximum_datasize, self._task_number)
53 | #这一行生成一个数组(self._data_sizes),其中包含了在 _minimum_data_size 和 _maximum_data_size 之间均匀分布的随机浮点数。生成的随机值的数量等于 _task_number。
54 | self._task_list = [_ for _ in self._datasizes] #列表化
55 |
56 | def get_task_list(self) -> List[float]:
57 | return self._task_list
58 |
59 | def sum_datasize(self) -> float:
60 | """返回该节点的总任务量"""
61 | return sum(self._task_list)
62 |
63 | def add_task_list(self, new_data_size) -> None:
64 | """如果卸载到该节点,任务队列会增加"""
65 | self._task_list.append(new_data_size)
66 |
67 | def add_by_slot(self, task_number) -> None:
68 | """在时间转移中任务队列自动生成的任务"""
69 | data_sizes = np.random.uniform(self._minimum_datasize, self._maximum_datasize, task_number)
70 | for datasize in data_sizes:
71 | self._task_list.append(datasize)
72 | self._task_number += 1
73 |
74 | def delete_data_list(self, process_ability) -> None:
75 | """在时间转移中对任务队列中的任务进行处理"""
76 | while True:
77 | # 如果队列中没有任务
78 | if len(self._task_list) == 0:
79 | break
80 | # 如果队列中有任务
81 | elif process_ability >= self._task_list[0]: # 单位时间计算能力大于数据量
82 | process_ability -= self._task_list[0]
83 | del self._task_list[0]
84 | else: # 单位时间计算能力小于数据量
85 | self._task_list[0] -= process_ability
86 | break
87 |
88 | ########################################################################
89 | #对每一辆车进行操作的类
90 | #功能有:获取生存时间,获取位置坐标,存货时间-1,判断是否不再存活,获取车辆行驶速度,获取车辆计算能力,获取车辆任务队列,获取车辆任务队列里的任务量之和
91 |
92 | class Vehicle(object):
93 | """车辆属性及其操作"""
94 |
95 | def __init__(
96 | self,
97 | road_range: int, #马路长度
98 | min_vehicle_speed: float, # 车辆最小行驶速度
99 | max_vehicle_speed: float, # 车辆最大行驶速度
100 | min_task_number: float, #车辆队列中任务最小个数
101 | max_task_number: float, #车辆队列中任务最大个数
102 | min_task_datasize: float, #每个任务大小的最大值
103 | max_task_datasize: float, # 每个任务大小的最小值
104 | min_vehicle_compute_ability: float, # 最小车辆计算能力
105 | max_vehicle_compute_ability: float, #最大车辆计算能力
106 | vehicle_x_initial_location: list, # 初始x坐标
107 | min_vehicle_y_initial_location: float, #初始y坐标最小值
108 | max_vehicle_y_initial_location: float,# 初始y坐标最大值
109 | seed: int
110 | ) -> None:
111 | # 车辆在场景中的生存时间生成
112 | self._road_range = road_range
113 | self._seed=seed
114 | #生成初始y坐标
115 | self._min_vehicle_y_initial_location=min_vehicle_y_initial_location
116 | self._max_vehicle_y_initial_location = max_vehicle_y_initial_location
117 | np.random.seed(self._seed)
118 | self._vehicle_y_initial_location = np.random.randint(self._min_vehicle_y_initial_location, self._max_vehicle_y_initial_location, 1)[0]
119 | # y坐标
120 | self._vehicle_y_location=self._vehicle_y_initial_location
121 | # 生成初始x坐标
122 | np.random.seed(self._seed)
123 | self._vehicle_x_initial_location=np.random.choice(vehicle_x_initial_location)
124 | # x坐标
125 | self._vehicle_x_location = self._vehicle_x_initial_location
126 | #生成速度
127 | np.random.seed(self._seed)
128 | self._vehicle_speed = np.random.randint(min_vehicle_speed, max_vehicle_speed) # 车辆速度
129 | if self._vehicle_x_initial_location==0:
130 | self._vehicle_speed = self._vehicle_speed
131 | else:
132 | self._vehicle_speed = -self._vehicle_speed
133 | # 生成存活时间
134 | self._stay_time = int(self._road_range / self._vehicle_speed)
135 |
136 | # 车辆计算能力生成
137 | self._max_compute_ability = max_vehicle_compute_ability
138 | self._min_compute_ability = min_vehicle_compute_ability
139 |
140 | # np.random.seed(self._seed)
141 | self._min_compute_ability = np.random.uniform(self._min_compute_ability, self._max_compute_ability, 1)
142 |
143 | # 车辆任务队列生成
144 | self._min_task_number = min_task_number
145 | self._max_task_number = max_task_number
146 | self._max_datasize = max_task_datasize
147 | self._min_datasize = min_task_datasize
148 | # np.random.seed(self._seed)
149 | self._task_number = np.random.randint(self._min_task_number, self._max_task_number)
150 | self._vehicle_task_list = TaskList(self._task_number, self._min_datasize, self._max_datasize)
151 | # self._vehicle_task_list = TaskList(self._task_number, self._min_datasize, self._max_datasize, self._seed)
152 |
153 |
154 |
155 | #获取原始信息
156 | def get_initial_data(self) -> list:
157 |
158 | data = [self._vehicle_x_initial_location, self._vehicle_y_initial_location,self._vehicle_speed]
159 | return data
160 |
161 | # 生存时间相关
162 | #获取车辆生存时间
163 | def get_stay_time(self) -> int:
164 | return self._stay_time
165 |
166 | #获取当前坐标变化
167 | def get_location(self) -> list:
168 | location = [self._vehicle_x_location, self._vehicle_y_location]
169 | return location
170 |
171 |
172 | #每秒坐标变化
173 | def change_location(self) -> list:
174 | self._vehicle_x_location = self._vehicle_x_location + self._vehicle_speed*1
175 | self._vehicle_y_location =self._vehicle_y_initial_location
176 | location = [self._vehicle_x_location, self._vehicle_y_location]
177 | return location
178 |
179 |
180 | #车辆生存时间-1
181 | def decrease_stay_time(self) -> int:
182 | self._stay_time -= 1
183 | return self._stay_time
184 |
185 | def is_out(self) -> bool:
186 | if self._stay_time <= 5: # 快要出去的车辆或者速度很大的车辆对任务不会感兴趣
187 | return True
188 | else:
189 | return False
190 |
191 | # 获取车辆行驶速度
192 | def get_vehicle_speed(self) -> float:
193 | return self._vehicle_speed
194 |
195 | #获取车辆计算速度
196 | def get_vehicle_compute_ability(self) -> float:
197 | return self._max_compute_ability
198 |
199 | # 车辆任务队列相关
200 | #获取车辆上的任务队列
201 | def get_task_list(self) -> TaskList:
202 | return self._vehicle_task_list
203 |
204 | # 获取车辆上的任务队列里所有任务的数据量之和
205 | def get_sum_tasks(self) -> float:
206 | if len(self._vehicle_task_list.get_task_list()) == 0: # 车辆上没有任务
207 | return 0
208 | else:
209 | return self._vehicle_task_list.sum_datasize() # 车辆上有任务
210 |
211 |
212 |
213 | ########################################################################
214 | #对所有车辆进行操作的类
215 | # 功能有:获取车辆数量,获取车辆基础信息列表,增加车辆数量,从车辆队列中删除不在范围内的车辆
216 | class VehicleList(object):
217 | """实现场景中车辆的管理,包括车辆更新、停留时间更新以及任务队列更新"""
218 |
219 | def __init__(
220 | self,
221 | vehicle_number: int, #车辆个数
222 | road_range: int, #马路长度
223 | min_vehicle_speed: float, #车辆最小行驶速度
224 | max_vehicle_speed: float, #车辆最大行驶速度
225 | min_task_number: float, #车辆队列中任务最小个数
226 | max_task_number: float, #车辆队列中任务最大个数
227 | min_task_datasize: float, #车辆队列中任务大小的最小值
228 | max_task_datasize: float, #车辆队列中任务大小的最大值
229 | min_vehicle_compute_ability: float, #车辆计算速度的最小值
230 | max_vehicle_compute_ability: float, #车辆计算速度的最大值
231 | vehicle_x_initial_location: list, # 初始x坐标
232 | min_vehicle_y_initial_location: float, # 初始y坐标最小值
233 | max_vehicle_y_initial_location: float, # 初始y坐标最大值
234 | seed: int
235 |
236 |
237 | ) -> None:
238 | self._seed = seed
239 | self._vehicle_number = vehicle_number
240 | self._road_range = road_range
241 | self._min_vehicle_speed = min_vehicle_speed
242 | self._max_vehicle_speed = max_vehicle_speed
243 | self._min_task_number = min_task_number
244 | self._max_task_number = max_task_number
245 | self._min_datasize = min_task_datasize
246 | self._max_datasize = max_task_datasize
247 | self._min_compute_ability = min_vehicle_compute_ability
248 | self._max_compute_ability = max_vehicle_compute_ability
249 | self._vehicle_x_initial_location=vehicle_x_initial_location
250 | self._min_vehicle_y_initial_location= min_vehicle_y_initial_location
251 | self._max_vehicle_y_initial_location= max_vehicle_y_initial_location
252 |
253 |
254 | #车辆基础信息列表,n辆车就有n个信息组
255 | self.vehicle_list = [
256 | Vehicle(
257 | road_range=self._road_range,
258 | min_vehicle_speed=self._min_vehicle_speed,
259 | max_vehicle_speed=self._max_vehicle_speed,
260 | min_task_number=self._min_task_number,
261 | max_task_number=self._max_task_number,
262 | min_task_datasize=self._min_datasize,
263 | max_task_datasize=self._max_datasize,
264 | min_vehicle_compute_ability=self._min_compute_ability,
265 | max_vehicle_compute_ability=self._max_compute_ability,
266 | vehicle_x_initial_location=self._vehicle_x_initial_location, # 初始x坐标
267 | min_vehicle_y_initial_location=self._min_vehicle_y_initial_location, # 初始y坐标最小值
268 | max_vehicle_y_initial_location=self._max_vehicle_y_initial_location, # 初始y坐标最大值
269 | seed=self._seed+_
270 | )
271 | for _ in range(self._vehicle_number)]
272 |
273 | def get_vehicle_number(self) -> int:
274 | """获取车辆数量"""
275 | return self._vehicle_number
276 |
277 | def get_vehicle_list(self) -> List[Vehicle]:
278 | """获取车辆基础信息队列"""
279 | return self.vehicle_list
280 |
281 | def add_stay_vehicle(self, new_vehicle_number,time_now) -> None:
282 | """增加车辆数量"""
283 | # np.random.seed(self._seed)
284 | new_vehicle_list = [
285 | Vehicle(
286 | road_range=self._road_range,
287 | min_vehicle_speed=self._min_vehicle_speed,
288 | max_vehicle_speed=self._max_vehicle_speed,
289 | min_task_number=self._min_task_number,
290 | max_task_number=self._max_task_number,
291 | min_task_datasize=self._min_datasize,
292 | max_task_datasize=self._max_datasize,
293 | min_vehicle_compute_ability=self._min_compute_ability,
294 | max_vehicle_compute_ability=self._max_compute_ability,
295 | vehicle_x_initial_location=self._vehicle_x_initial_location, # 初始x坐标
296 | min_vehicle_y_initial_location=self._min_vehicle_y_initial_location, # 初始y坐标最小值
297 | max_vehicle_y_initial_location=self._max_vehicle_y_initial_location, # 初始y坐标最大值
298 | seed=time_now+_
299 | )
300 | for _ in range(new_vehicle_number)]
301 |
302 | self.vehicle_list = self.vehicle_list + new_vehicle_list
303 | self._vehicle_number += new_vehicle_number
304 |
305 | def delete_out_vehicle(self) -> None:
306 | """从队列中删除不在范围内的车辆"""
307 | i = 0
308 | while i < len(self.vehicle_list):
309 | if len(self.vehicle_list) == 0:#不一定需要判断
310 | pass
311 | elif self.vehicle_list[i].is_out():
312 | del self.vehicle_list[i]
313 | self._vehicle_number -= 1
314 | else:
315 | i += 1
316 | ########################################################################
317 | #对某个RSU进行操作的类
318 | #功能有:获取RSU计算能力,获取RSU任务队列,获取RSU任务队列上的任务量之和
319 | class RSU(object):
320 | """RSU"""
321 |
322 | def __init__(
323 | self,
324 | min_task_number: float, #RSU队列中任务最小个数
325 | max_task_number: float, #RSU队列中任务最大个数
326 | min_task_datasize: float, # RSU队列中任务大小的最小值
327 | max_task_datasize: float, #RSU队列中任务大小的最大值
328 | min_rsu_compute_ability: float, # RSU计算速度的最小值
329 | max_rsu_compute_ability: float #RSU计算速度的最大值
330 | # seed: int
331 | ) -> None:
332 | # rsu计算速度生成
333 | self._max_compute_ability = max_rsu_compute_ability
334 | self._min_compute_ability = min_rsu_compute_ability
335 | # self._seed = seed
336 | # np.random.seed(self._seed)
337 | self._compute_ability = np.random.uniform(self._min_compute_ability, self._max_compute_ability, 1)
338 |
339 | # 车辆任务队列生成
340 | self._min_task_number = min_task_number
341 | self._max_task_number = max_task_number
342 | self._max_datasize = max_task_datasize
343 | self._min_datasize = min_task_datasize
344 | # np.random.seed(self._seed)
345 | self._task_number = np.random.randint(self._min_task_number, self._max_task_number)
346 | self._rsu_task_list = TaskList(self._task_number, self._min_datasize, self._max_datasize)
347 | # self._rsu_task_list = TaskList(self._task_number, self._min_datasize, self._max_datasize, self._seed)
348 |
349 | #获取RSU计算速度
350 | def get_rsu_compute_ability(self) -> float:
351 | return self._compute_ability
352 |
353 | # 获取RSU任务队列
354 | def get_task_list(self) -> TaskList:
355 | return self._rsu_task_list
356 |
357 | # 获取RSU上的任务队列里所有任务的数据量之和
358 | def get_sum_tasks(self) -> float:
359 | if len(self._rsu_task_list.get_task_list()) == 0: # RSU上没有任务
360 | return 0
361 | else:
362 | return self._rsu_task_list.sum_datasize() # RSU上有任务
363 |
364 |
365 |
366 | ########################################################################
367 | #对所有RSU进行操作的类
368 | #获取RSU个数,获取RSU上的基础信息组
369 | class RSUList(object):
370 | """RSU队列管理"""
371 |
372 | def __init__(
373 | self,
374 | rsu_number, #RSU个数
375 | min_task_number: float, # RSU队列中任务最小个数
376 | max_task_number: float, #RSU队列中任务最大个数
377 | min_task_datasize: float, # RSU队列中任务大小的最小值
378 | max_task_datasize: float, #RSU队列中任务大小的最大值
379 | min_rsu_compute_ability: float, # RSU计算速度的最小值
380 | max_rsu_compute_ability: float #RSU计算速度的最大值
381 | # seed: int
382 | ) -> None:
383 | # self._seed = seed
384 | self._rsu_number = rsu_number
385 |
386 | self._min_task_number = min_task_number
387 | self._max_task_number = max_task_number
388 | self._min_datasize = min_task_datasize
389 | self._max_datasize = max_task_datasize
390 | self._min_compute_ability = min_rsu_compute_ability
391 | self._max_compute_ability = max_rsu_compute_ability
392 |
393 | # 获取RSU类
394 | self.rsu_list = [
395 | RSU(
396 | min_task_number=self._min_task_number,
397 | max_task_number=self._max_task_number,
398 | min_task_datasize=self._min_datasize,
399 | max_task_datasize=self._max_datasize,
400 | min_rsu_compute_ability=self._min_compute_ability,
401 | max_rsu_compute_ability=self._max_compute_ability
402 | # seed=self._seed+_
403 | )
404 | for _ in range(rsu_number)]
405 |
406 | #获取RSU个数
407 | def get_rsu_number(self):
408 | return self._rsu_number
409 | #获取RSU的基础信息组
410 | def get_rsu_list(self):
411 | return self.rsu_list
412 |
413 | #对时隙进行操作的类
414 | class TimeSlot(object):
415 | """时隙属性及操作"""
416 |
417 | def __init__(self, start: int, end: int) -> None:
418 | self.start = start #时间起始间隙
419 | self.end = end #时间截止间隙
420 | self.slot_length = self.end - self.start #时间间隙长度
421 |
422 | self.now = start #当前时间间隙定位
423 | self.reset() #做一些操作来将对象的属性或状态还原到初始状态
424 |
425 | def __str__(self):
426 | return f"now time: {self.now}, [{self.start} , {self.end}] with {self.slot_length} slots"
427 |
428 | #随着时间增加
429 | def add_time(self) -> None:
430 | """add time to the system"""
431 | self.now += 1
432 |
433 | #当前是否在时间截止间隙
434 | def is_end(self) -> bool:
435 | """check if the system is at the end of the time slots"""
436 | return self.now >= self.end
437 | #获取时间间隙长度
438 | def get_slot_length(self) -> int:
439 | """get the length of each time slot"""
440 | return self.slot_length
441 | #获取当前时间间隙定位
442 | def get_now(self) -> int:
443 | return self.now
444 | #重置
445 | def reset(self) -> None:
446 | self.now = self.start
447 |
--------------------------------------------------------------------------------
/env/utils.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | """
4 | Author: John
5 | Email: johnjim0816@gmail.com
6 | Date: 2021-03-12 16:02:24
7 | LastEditor: John
8 | LastEditTime: 2022-07-13 22:15:46
9 | Description:
10 | Environment:
11 | """
12 | import os
13 | import numpy as np
14 | from pathlib import Path
15 | import matplotlib.pyplot as plt
16 | import seaborn as sns
17 |
18 | from matplotlib.font_manager import FontProperties # 导入字体模块
19 |
20 |
21 | def chinese_font():
22 | """ 设置中文字体,注意需要根据自己电脑情况更改字体路径,否则还是默认的字体 """
23 | try:
24 | font = FontProperties(
25 | fname='C:/Windows/Fonts/STSONG.TTF', size=15) # fname系统字体路径,此处是windows的
26 | except:
27 | font = None
28 | return font
29 |
30 |
31 |
32 |
33 |
34 |
35 | def plot_rewards_cn(rewards, ma_rewards, cfg, tag='train'):
36 | """ 中文画图 """
37 | sns.set()
38 | plt.figure()
39 | plt.title(u"{}环境下{}算法的学习曲线".format(cfg.env_name, cfg.algo_name), fontproperties=chinese_font())
40 | plt.xlabel(u'回合数', fontproperties=chinese_font())
41 | plt.plot(rewards)
42 | plt.plot(ma_rewards)
43 | plt.legend((u'奖励', u'滑动平均奖励',), loc="best", prop=chinese_font())
44 | if cfg.save:
45 | plt.savefig(cfg.result_path + f"{tag}_rewards_curve_cn.eps", format='eps', dpi=1000)
46 | plt.show()
47 |
48 |
49 | def plot_rewards(rewards, ma_rewards, cfg, tag='train'):
50 | sns.set()
51 | plt.figure() # 创建一个图形实例,方便同时多画几个图
52 | plt.title("learning curve on {} of {}".format(cfg.device, cfg.algo_name), fontsize=18)
53 | plt.xlabel('epsiodes', fontsize=18)
54 | plt.plot(rewards, label='rewards')
55 | plt.plot(ma_rewards, label='ma rewards')
56 | plt.legend()
57 | plt.grid()
58 | if cfg.save_fig:
59 | plt.savefig(cfg.result_path + "{}_rewards_curve.eps".format(tag), format='eps', dpi=1000)
60 | plt.show()
61 |
62 |
63 | def plot_completion_rate(completion_rate, ma_completion_rate, cfg, tag='train'):
64 | # sns.set()
65 | plt.rcParams['figure.facecolor'] = 'white' # 设置画布背景颜色为白色
66 | plt.rcParams['axes.facecolor'] = 'white' # 设置坐标轴背景颜色为白色
67 | plt.figure() # 创建一个图形实例,方便同时多画几个图
68 | # plt.title("learning curve on {} of {}".format(cfg.device, cfg.algo_name), fontsize=18)
69 | plt.xticks( fontsize=16, fontname='Times New Roman')
70 | plt.yticks( fontsize=16, fontname='Times New Roman')
71 | plt.xlabel('episodes', fontsize=18, fontname='Times New Roman')
72 | plt.ylabel('completion ratio', fontsize=18, fontname='Times New Roman')
73 | plt.plot(completion_rate, label='completion_rate')
74 | plt.plot(ma_completion_rate, label='ma_completion_rate')
75 | plt.grid(True,linestyle='--', linewidth=0.5, color='gray')
76 | plt.legend(prop={'size':18, 'family': 'Times New Roman'})
77 | if cfg.save_fig:
78 | plt.savefig(cfg.result_path + "{}_completion_rate_curve.eps".format(tag), format='eps', dpi=1000)
79 | plt.show()
80 | ########################################################################################################################
81 | def plot_A3C_rewards(A3C_train_ma_rewards_1,A3C_train_ma_rewards_2,A3C_train_ma_rewards_3):
82 | # sns.set()
83 | plt.figure() # 创建一个图形实例,方便同时多画几个图
84 | plt.rcParams['figure.facecolor'] = 'white' # 设置画布背景颜色为白色
85 | plt.rcParams['axes.facecolor'] = 'white' # 设置坐标轴背景颜色为白色
86 | # plt.title("Convergence graph for different number of processess", fontsize=14)
87 | plt.xlabel('episodes', fontsize=26, fontname='Times New Roman')
88 | plt.ylabel('rewards', fontsize=26, fontname='Times New Roman')
89 | plt.xticks( fontsize=22, fontname='Times New Roman')
90 | plt.yticks( fontsize=22, fontname='Times New Roman')
91 | plt.plot(A3C_train_ma_rewards_1, label='number of training processes=2')
92 | plt.plot(A3C_train_ma_rewards_2, label='number of training processes=4')
93 | plt.plot(A3C_train_ma_rewards_3, label='number of training processes=6')
94 | plt.grid(True,linestyle='--', linewidth=0.5, color='gray')
95 | plt.legend(prop={'size': 18, 'family': 'Times New Roman'})
96 | plt.tight_layout()
97 | plt.savefig('A3C_rewards.pdf', format='pdf')
98 | plt.show()
99 |
100 |
101 | def plot_A3C_rewards_lr(A3C_train_ma_rewards_1_lr,A3C_train_ma_rewards_2_lr,A3C_train_ma_rewards_3_lr):
102 | # sns.set()
103 | plt.figure() # 创建一个图形实例,方便同时多画几个图
104 | plt.rcParams['figure.facecolor'] = 'white' # 设置画布背景颜色为白色
105 | plt.rcParams['axes.facecolor'] = 'white' # 设置坐标轴背景颜色为白色
106 | # plt.title("Convergence graph for different number of processess", fontsize=14)
107 | plt.xlabel('episodes', fontsize=26, fontname='Times New Roman')
108 | plt.ylabel('rewards', fontsize=26, fontname='Times New Roman')
109 | plt.xticks( fontsize=22, fontname='Times New Roman')
110 | plt.yticks( fontsize=22, fontname='Times New Roman')
111 | plt.plot(A3C_train_ma_rewards_1_lr, label='lr=0.002')
112 | plt.plot(A3C_train_ma_rewards_2_lr, label='lr=0.0002')
113 | plt.plot(A3C_train_ma_rewards_3_lr, label='lr=0.00002')
114 | plt.grid(True,linestyle='--', linewidth=0.5, color='gray')
115 | plt.legend(prop={'size': 18, 'family': 'Times New Roman'})
116 | plt.tight_layout()
117 | plt.savefig('A3C_rewards_lr.pdf', format='pdf')
118 | plt.show()
119 |
120 |
121 | def plot_contrast_rewards(A3C_train_ma_rewards,Greedy_train_ma_rewards,DQN_train_ma_rewards):
122 | # sns.set()
123 | plt.figure() # 创建一个图形实例,方便同时多画几个图
124 | plt.rcParams['figure.facecolor'] = 'white' # 设置画布背景颜色为白色
125 | plt.rcParams['axes.facecolor'] = 'white' # 设置坐标轴背景颜色为白色
126 | # plt.title("Convergence graphs of different algorithms", fontsize=14)
127 | plt.xticks( fontsize=22, fontname='Times New Roman')
128 | plt.yticks( fontsize=22, fontname='Times New Roman')
129 |
130 | plt.xlabel('episodes', fontsize=26, fontname='Times New Roman')
131 | plt.ylabel('rewards', fontsize=26, fontname='Times New Roman')
132 | plt.plot(A3C_train_ma_rewards, label='A3C')
133 | plt.plot(Greedy_train_ma_rewards, label='Greedy')
134 | plt.plot(DQN_train_ma_rewards, label='DQN')
135 | plt.grid(True,linestyle='--', linewidth=0.5, color='gray')
136 | plt.legend(prop={'size': 18, 'family': 'Times New Roman'})
137 | plt.tight_layout()
138 | plt.savefig('contrast_rewards.pdf', format='pdf')
139 | plt.show()
140 |
141 |
142 | def plot_A3C_completion_rate(A3C_train_ma_completion_rate_1,A3C_train_ma_completion_rate_2,A3C_train_ma_completion_rate_3):
143 | # sns.set()
144 | plt.rcParams['figure.facecolor'] = 'white' # 设置画布背景颜色为白色
145 | plt.rcParams['axes.facecolor'] = 'white' # 设置坐标轴背景颜色为白色
146 | plt.figure() # 创建一个图形实例,方便同时多画几个图
147 | # plt.title("Convergence graph for different number of processess", fontsize=14)
148 | plt.xticks( fontsize=22, fontname='Times New Roman')
149 | plt.yticks( fontsize=22, fontname='Times New Roman')
150 | plt.xlabel('episodes', fontsize=26, fontname='Times New Roman')
151 | plt.ylabel('completion ratio', fontsize=26, fontname='Times New Roman')
152 | plt.plot(A3C_train_ma_completion_rate_1, label='number of training processes=2')
153 | plt.plot(A3C_train_ma_completion_rate_2, label='number of training processes=4')
154 | plt.plot(A3C_train_ma_completion_rate_3, label='number of training processes=6')
155 | plt.grid(True,linestyle='--', linewidth=0.5, color='gray')
156 | plt.legend(prop={'size': 18, 'family': 'Times New Roman'})
157 | plt.tight_layout()
158 | plt.savefig('A3C_completion_rate.pdf', format='pdf')
159 | plt.show()
160 |
161 |
162 |
163 | def plot_A3C_completion_rate_lr(A3C_train_ma_completion_rate_1_lr,A3C_train_ma_completion_rate_2_lr,A3C_train_ma_completion_rate_3_lr):
164 | # sns.set()
165 | plt.rcParams['figure.facecolor'] = 'white' # 设置画布背景颜色为白色
166 | plt.rcParams['axes.facecolor'] = 'white' # 设置坐标轴背景颜色为白色
167 | plt.figure() # 创建一个图形实例,方便同时多画几个图
168 | # plt.title("Convergence graph for different number of processess", fontsize=14)
169 | plt.xticks( fontsize=22, fontname='Times New Roman')
170 | plt.yticks( fontsize=22, fontname='Times New Roman')
171 | plt.xlabel('episodes', fontsize=26, fontname='Times New Roman')
172 | plt.ylabel('completion ratio', fontsize=26, fontname='Times New Roman')
173 | plt.plot(A3C_train_ma_completion_rate_1_lr, label='lr=0.002')
174 | plt.plot(A3C_train_ma_completion_rate_2_lr, label='lr=0.0002')
175 | plt.plot(A3C_train_ma_completion_rate_3_lr, label='lr=0.00002')
176 | plt.grid(True,linestyle='--', linewidth=0.5, color='gray')
177 | plt.legend(prop={'size': 18, 'family': 'Times New Roman'})
178 | plt.tight_layout()
179 | plt.savefig('A3C_completion_rate_lr.pdf', format='pdf')
180 | plt.show()
181 |
182 |
183 | def plot_contrast_completion_rate(A3C_train_ma_completion_rate,Greedy_train_ma_completion_rate,DQN_train_ma_completion_rate):
184 | # sns.set()
185 | plt.rcParams['figure.facecolor'] = 'white' # 设置画布背景颜色为白色
186 | plt.rcParams['axes.facecolor'] = 'white' # 设置坐标轴背景颜色为白色
187 | plt.figure() # 创建一个图形实例,方便同时多画几个图
188 | # plt.title("Convergence graphs of different algorithms", fontsize=14)
189 | plt.xticks( fontsize=22, fontname='Times New Roman')
190 | plt.yticks( fontsize=22, fontname='Times New Roman')
191 | plt.xlabel('episodes', fontsize=26, fontname='Times New Roman')
192 | plt.ylabel('completion ratio', fontsize=26, fontname='Times New Roman')
193 | plt.plot(A3C_train_ma_completion_rate, label='A3C')
194 | plt.plot(Greedy_train_ma_completion_rate, label='Greedy')
195 | plt.plot(DQN_train_ma_completion_rate, label='DQN')
196 | plt.grid(True, linestyle='--', linewidth=0.5, color='gray')
197 | plt.legend(prop={'size': 18, 'family': 'Times New Roman'})
198 | plt.tight_layout()
199 | plt.savefig('contrast_completion_rate.pdf', format='pdf')
200 | plt.show()
201 |
202 |
203 | def plot_different_tasksize_average_rewards(A3C_average,Greedy_average,DQN_average):
204 | # sns.set()
205 | plt.rcParams['figure.facecolor'] = 'white' # 设置画布背景颜色为白色
206 | plt.rcParams['axes.facecolor'] = 'white' # 设置坐标轴背景颜色为白色
207 | plt.figure() # 创建一个图形实例,方便同时多画几个图
208 | # plt.title("different tasksize average rewards", fontsize=18)
209 | plt.xlabel('size of tasks', fontsize=26, fontname='Times New Roman')
210 | plt.ylabel('average rewards', fontsize=26, fontname='Times New Roman')
211 | x_datasize=[2,3,4,5,6]
212 | A3C_y_datasize=A3C_average
213 | Greedy_y_datasize= Greedy_average
214 | DQN_y_datasize=DQN_average
215 | plt.plot(x_datasize, A3C_y_datasize,label='A3C', linestyle='-', marker='o', markersize=8, markerfacecolor='red')
216 | plt.plot(x_datasize, Greedy_y_datasize,label='Greedy', linestyle='-', marker='s', markersize=8, markerfacecolor='orange')
217 | plt.plot(x_datasize, DQN_y_datasize,label='DQN', linestyle='-', marker='D', markersize=8, markerfacecolor='blue')
218 | plt.xticks(x_datasize,fontsize=22, fontname='Times New Roman')
219 | plt.yticks( fontsize=22, fontname='Times New Roman')
220 | plt.grid(True,linestyle='--', linewidth=0.5, color='gray')
221 | plt.legend(prop={'size': 18, 'family': 'Times New Roman'})
222 | plt.tight_layout()
223 | plt.savefig('different_tasksize_average_rewards.pdf', format='pdf')
224 | plt.show()
225 |
226 |
227 |
228 |
229 | def plot_different_vehicle_number_average_rewards(A3C_average_vehicle_number,Greedy_average_vehicle_number,DQN_average_vehicle_number):
230 | # sns.set()
231 | plt.rcParams['figure.facecolor'] = 'white' # 设置画布背景颜色为白色
232 | plt.rcParams['axes.facecolor'] = 'white' # 设置坐标轴背景颜色为白色
233 | plt.figure() # 创建一个图形实例,方便同时多画几个图
234 | # plt.title("different vehicle number average rewards", fontsize=18)
235 | plt.xlabel('number of vehicles', fontsize=26, fontname='Times New Roman')
236 | plt.ylabel('average rewards', fontsize=26, fontname='Times New Roman')
237 | x_datasize = [8, 9, 10]
238 | A3C_y_datasize = A3C_average_vehicle_number
239 | Greedy_y_datasize = Greedy_average_vehicle_number
240 | DQN_y_datasize = DQN_average_vehicle_number
241 | plt.plot(x_datasize, A3C_y_datasize, label='A3C', linestyle='-', marker='o', markersize=8, markerfacecolor='red')
242 | plt.plot(x_datasize, Greedy_y_datasize, label='Greedy', linestyle='-', marker='s', markersize=8,
243 | markerfacecolor='orange')
244 | plt.plot(x_datasize, DQN_y_datasize, label='DQN', linestyle='-', marker='D', markersize=8, markerfacecolor='blue')
245 | plt.xticks(x_datasize,fontsize=22, fontname='Times New Roman')
246 | plt.yticks( fontsize=22, fontname='Times New Roman')
247 | plt.grid(True,linestyle='--', linewidth=0.5, color='gray')
248 | plt.legend(prop={'size': 18, 'family': 'Times New Roman'})
249 | plt.tight_layout()
250 | plt.savefig('different_vehicle_number_average_rewards.pdf', format='pdf')
251 | plt.show()
252 |
253 |
254 |
255 |
256 | def plot_different_vehicle_speed_average_rewards(A3C_speed,Greedy_speed,DQN_speed):
257 | # sns.set()
258 | plt.rcParams['figure.facecolor'] = 'white' # 设置画布背景颜色为白色
259 | plt.rcParams['axes.facecolor'] = 'white' # 设置坐标轴背景颜色为白色
260 | plt.figure() # 创建一个图形实例,方便同时多画几个图
261 | # plt.title("different vehicle speed average rewards", fontsize=18)
262 | plt.xlabel('speed of vehicles', fontsize=26, fontname='Times New Roman')
263 | plt.ylabel('average rewards', fontsize=26, fontname='Times New Roman')
264 | x_datasize=['20-25','25-30','30-35']
265 | A3C_y_speed=A3C_speed
266 | Greedy_y_speed= Greedy_speed
267 | DQN_y_speed=DQN_speed
268 | plt.xticks(fontsize=22, fontname='Times New Roman')
269 | plt.yticks( fontsize=22, fontname='Times New Roman')
270 | plt.plot(x_datasize, A3C_y_speed,label='A3C', linestyle='-', marker='o', markersize=8, markerfacecolor='red')
271 | plt.plot(x_datasize, Greedy_y_speed,label='Greedy', linestyle='-', marker='s', markersize=8, markerfacecolor='orange')
272 | plt.plot(x_datasize, DQN_y_speed,label='DQN', linestyle='-', marker='D', markersize=8, markerfacecolor='blue')
273 | plt.xticks(x_datasize,fontsize=22, fontname='Times New Roman')
274 | plt.yticks( fontsize=22, fontname='Times New Roman')
275 | plt.grid(True,linestyle='--', linewidth=0.5, color='gray')
276 | plt.legend(prop={'size': 18, 'family': 'Times New Roman'})
277 | plt.tight_layout()
278 | plt.savefig('different_vehicle_speed_average_rewards.pdf', format='pdf')
279 | plt.show()
280 |
281 |
282 | def plot_different_task_computation_resource_average_rewards(A3C_task_computation_resource,Greedy_task_computation_resource,DQN_task_computation_resource):
283 | # sns.set()
284 | plt.rcParams['figure.facecolor'] = 'white' # 设置画布背景颜色为白色
285 | plt.rcParams['axes.facecolor'] = 'white' # 设置坐标轴背景颜色为白色
286 | plt.figure() # 创建一个图形实例,方便同时多画几个图
287 | # plt.title("different vehicle speed average rewards", fontsize=18)
288 | plt.xlabel('computation intensity of tasks', fontsize=26, fontname='Times New Roman')
289 | plt.ylabel('average rewards', fontsize=26, fontname='Times New Roman')
290 | x_datasize=['300','325','350','375','400']
291 | A3C_y_task_computation_resource=A3C_task_computation_resource
292 | Greedy_y_task_computation_resource= Greedy_task_computation_resource
293 | DQN_y_task_computation_resource=DQN_task_computation_resource
294 | plt.xticks(fontsize=22, fontname='Times New Roman')
295 | plt.yticks( fontsize=22, fontname='Times New Roman')
296 | plt.plot(x_datasize, A3C_y_task_computation_resource,label='A3C', linestyle='-', marker='o', markersize=8, markerfacecolor='red')
297 | plt.plot(x_datasize, Greedy_y_task_computation_resource,label='Greedy', linestyle='-', marker='s', markersize=8, markerfacecolor='orange')
298 | plt.plot(x_datasize, DQN_y_task_computation_resource,label='DQN', linestyle='-', marker='D', markersize=8, markerfacecolor='blue')
299 | plt.xticks(x_datasize,fontsize=22, fontname='Times New Roman')
300 | plt.yticks( fontsize=22, fontname='Times New Roman')
301 | plt.grid(True,linestyle='--', linewidth=0.5, color='gray')
302 | plt.legend(prop={'size': 18, 'family': 'Times New Roman'})
303 | plt.tight_layout()
304 | plt.savefig('different_task_computation_resource_average_rewards.pdf', format='pdf')
305 | plt.show()
306 |
307 |
308 | ########################################################################################################################
309 |
310 | def plot_different_vehicle_speed_average_rewards_1(A3C_average_vehicle_speed,DQN_average_vehicle_speed,Greedy_average_vehicle_speed):
311 | plt.rcParams['figure.facecolor'] = 'white' # 设置画布背景颜色为白色
312 | plt.rcParams['axes.facecolor'] = 'white' # 设置坐标轴背景颜色为白色
313 | plt.figure() # 创建一个图形实例,方便同时多画几个图
314 | plt.xlabel('speed of vehicles', fontsize=26, fontname='Times New Roman')
315 | plt.ylabel('average rewards', fontsize=26, fontname='Times New Roman')
316 | Width = 0.2
317 | x1 = np.arange(len(A3C_average_vehicle_speed))
318 | x2=[x + Width for x in x1]
319 | x3=[x + Width for x in x2]
320 | A3C_y=A3C_average_vehicle_speed
321 | Greedy_y = Greedy_average_vehicle_speed
322 | DQN_y = DQN_average_vehicle_speed
323 | plt.xticks(fontsize=22, fontname='Times New Roman')
324 | plt.yticks( fontsize=22, fontname='Times New Roman')
325 | plt.bar(x1, A3C_y,label='A3C',width=Width,color='red')
326 | plt.bar(x2, DQN_y, label='DQN',width=Width,color='orange')
327 | plt.bar(x3, Greedy_y, label='Greedy',width=Width,color='green')
328 | plt.xticks([r + Width for r in range(len(A3C_average_vehicle_speed))], ['20-25','25-30','30-35'])
329 | # 获取 y 轴刻度标签
330 | ticks = plt.gca().get_yticks()
331 | # 在每个刻度前面加上负号
332 | tick_labels = ['-' + str(int(abs(t))) if t != 0 else '0' for t in ticks]
333 | # 设置 y 轴刻度标签
334 | plt.yticks(ticks, tick_labels)
335 | plt.grid(True,linestyle='--', linewidth=0.5, color='gray')
336 | plt.legend(prop={'size': 18, 'family': 'Times New Roman'})
337 | plt.tight_layout()
338 | plt.savefig('different_vehicle_speed_average_rewards.pdf', format='pdf')
339 | plt.show()
340 |
341 |
342 | def plot_different_vehicle_number_average_rewards_1(A3C_average_rewards_vehicle_number,DQN_average_rewards_vehicle_number,Greedy_average_rewards_vehicle_number):
343 | plt.rcParams['figure.facecolor'] = 'white' # 设置画布背景颜色为白色
344 | plt.rcParams['axes.facecolor'] = 'white' # 设置坐标轴背景颜色为白色
345 | plt.figure() # 创建一个图形实例,方便同时多画几个图
346 | plt.xlabel('number of vehicles', fontsize=26, fontname='Times New Roman')
347 | plt.ylabel('average rewards', fontsize=26, fontname='Times New Roman')
348 | Width = 0.2
349 | x1 = np.arange(len(A3C_average_rewards_vehicle_number))
350 | x2=[x + Width for x in x1]
351 | x3=[x + Width for x in x2]
352 | A3C_y=A3C_average_rewards_vehicle_number
353 | Greedy_y = Greedy_average_rewards_vehicle_number
354 | DQN_y = DQN_average_rewards_vehicle_number
355 | plt.xticks(fontsize=22, fontname='Times New Roman')
356 | plt.yticks( fontsize=22, fontname='Times New Roman')
357 | plt.bar(x1, A3C_y,label='A3C',width=Width,color='red')
358 | plt.bar(x2, DQN_y, label='DQN',width=Width,color='orange')
359 | plt.bar(x3, Greedy_y, label='Greedy',width=Width,color='green')
360 | plt.xticks([r + Width for r in range(len(A3C_average_rewards_vehicle_number))], ['8','9','10'])
361 | # 获取 y 轴刻度标签
362 | ticks = plt.gca().get_yticks()
363 | # 在每个刻度前面加上负号
364 | tick_labels = ['-' + str(int(abs(t))) if t != 0 else '0' for t in ticks]
365 | # 设置 y 轴刻度标签
366 | plt.yticks(ticks, tick_labels)
367 | plt.grid(True,linestyle='--', linewidth=0.5, color='gray')
368 | plt.legend(prop={'size': 18, 'family': 'Times New Roman'})
369 | plt.tight_layout()
370 | plt.savefig('different_vehicle_number_average_rewards.pdf', format='pdf')
371 | plt.show()
372 |
373 |
374 |
375 | def plot_losses(losses, algo="DQN", save=True, path='./'):
376 | sns.set()
377 | plt.figure()
378 | plt.title("loss curve of {}".format(algo), fontsize=18)
379 | plt.xlabel('epsiodes', fontsize=18)
380 | plt.plot(losses, label='rewards')
381 | plt.legend()
382 | if save:
383 | plt.savefig(path + "losses_curve.eps", format='eps', dpi=1000)
384 | plt.show()
385 |
386 |
387 | def save_results_1(dic, tag='train', path='./results'):
388 | """ 保存奖励 """
389 | for key, value in dic.items():
390 | np.save(path + '{}_{}.npy'.format(tag, key), value)
391 | print('Results saved!')
392 |
393 |
394 | def save_results(rewards, ma_rewards, tag='train', path='./results'):
395 | """ 保存奖励 """
396 | np.save(path + '{}_rewards.npy'.format(tag), rewards)
397 | np.save(path + '{}_ma_rewards.npy'.format(tag), ma_rewards)
398 | print('Result saved!')
399 |
400 |
401 |
402 | def make_dir(*paths):
403 | """ 创建文件夹 """
404 | for path in paths:
405 | Path(path).mkdir(parents=True, exist_ok=True)
406 |
407 |
408 | def del_empty_dir(*paths):
409 | """ 删除目录下所有空文件夹 """
410 | for path in paths:
411 | dirs = os.listdir(path)
412 | for dir in dirs:
413 | if not os.listdir(os.path.join(path, dir)):
414 | os.removedirs(os.path.join(path, dir))
415 |
416 |
417 | def save_args(args):
418 | # save parameters
419 | argsDict = args.__dict__
420 | with open(args.result_path + 'params.txt', 'w') as f:
421 | f.writelines('------------------ start ------------------' + '\n')
422 | for eachArg, value in argsDict.items():
423 | f.writelines(eachArg + ' : ' + str(value) + '\n')
424 | f.writelines('------------------- end -------------------')
425 | print("Parameters saved!")
426 |
427 |
428 |
429 | #
430 | # def save_results_from_npy_to_txt(file_path, output_path):
431 | # """ 从.npy文件中提取数据并保存为文本文件 """
432 | # data = np.load(file_path)
433 | # with open(output_path, 'w') as file:
434 | # for item in data:
435 | # file.write(' '.join(str(x) for x in item) + '\n')
436 | # print('Results extracted from .npy file and saved to text file!')
437 |
438 |
439 |
440 | # def plot_offloading_rate(offloading_rate_c, offloading_rate_r, offloading_rate_v, cfg, tag='train'):
441 | # sns.set()
442 | # plt.figure() # 创建一个图形实例,方便同时多画几个图
443 | # plt.title("offloading rate curve on {} of {}".format(cfg.device, cfg.algo_name), fontsize=18)
444 | # plt.xlabel('epsiodes', fontsize=18)
445 | # plt.plot(offloading_rate_c, label='offloading_rate_c')
446 | # plt.plot(offloading_rate_r, label='offloading_rate_r')
447 | # plt.plot(offloading_rate_v, label='offloading_rate_v')
448 | # plt.legend()
449 | # if cfg.save_fig:
450 | # plt.savefig(cfg.result_path + "{}_offloading_rate_curve.eps".format(tag), format='eps', dpi=1000)
451 | # plt.show()
452 |
453 |
454 |
455 | # def plot_finish_rate(finish_rate, ma_finish_rate, cfg, tag='train'):
456 | # sns.set()
457 | # plt.figure() # 创建一个图形实例,方便同时多画几个图
458 | # plt.title("finish rate curve on {} of {}".format(cfg.device, cfg.algo_name), fontsize=18)
459 | # plt.xlabel('epsiodes', fontsize=18)
460 | # plt.plot(finish_rate, label='finish rate')
461 | # plt.plot(ma_finish_rate, label='ma finish rate')
462 | # plt.legend()
463 | # if cfg.save_fig:
464 | # plt.savefig(cfg.result_path + "{}_finish_rate_curve.eps".format(tag), format='eps', dpi=1000)
465 | # plt.show()
466 |
467 |
468 |
469 |
--------------------------------------------------------------------------------
/methods/A3C/a3c.py:
--------------------------------------------------------------------------------
1 | import torch.optim as optim
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 | from torch.distributions import Categorical
5 | import torch
6 | import os
7 | class ActorCritic(nn.Module):
8 | def __init__(self,input_dim, output_dim, hidden_dim):
9 | super(ActorCritic, self).__init__()
10 | self.fc1 = nn.Linear(input_dim, hidden_dim)
11 | self.fc_actor = nn.Linear( hidden_dim, output_dim)
12 | self.fc_critic = nn.Linear( hidden_dim, 1)
13 |
14 | def actor(self, x, softmax_dim=0):
15 | x = F.relu(self.fc1(x))
16 | x = self.fc_actor(x)
17 | prob = F.softmax(x, dim=softmax_dim)
18 | return prob
19 |
20 | def critic(self, x):
21 | x = F.relu(self.fc1(x))
22 | v = self.fc_critic(x)
23 | return v
24 |
25 |
26 |
27 |
28 | def save(self, path):
29 | checkpoint = os.path.join(path, 'a3c.pt')
30 | torch.save(self.state_dict(), checkpoint)
31 |
32 |
33 | def load(self, path):
34 | checkpoint = os.path.join(path, 'a3c.pt')
35 | self.aload_state_dict(torch.load(checkpoint))
36 |
37 |
38 | # import torch.optim as optim
39 | # import torch.nn as nn
40 | # import torch.nn.functional as F
41 | # from torch.distributions import Categorical
42 | # import torch
43 | # import os
44 | #
45 | # class ActorCritic(nn.Module):
46 | # def __init__(self, input_dim, output_dim, hidden_dim, device):
47 | # super(ActorCritic, self).__init__()
48 | # self.fc1 = nn.Linear(input_dim, hidden_dim).to(device)
49 | # self.fc_actor = nn.Linear(hidden_dim, output_dim).to(device)
50 | # self.fc_critic = nn.Linear(hidden_dim, 1).to(device)
51 | #
52 | # def actor(self, x, softmax_dim=0):
53 | # x = F.relu(self.fc1(x))
54 | # x = self.fc_actor(x)
55 | # prob = F.softmax(x, dim=softmax_dim)
56 | # return prob
57 | #
58 | # def critic(self, x):
59 | # x = F.relu(self.fc1(x))
60 | # v = self.fc_critic(x)
61 | # return v
62 | #
63 | # def save(self, path):
64 | # checkpoint = os.path.join(path, 'a3c.pt')
65 | # state_dict_on_cpu = {key: val.cpu() for key, val in self.state_dict().items()}
66 | # torch.save(state_dict_on_cpu, checkpoint)
67 | #
68 | # def load(self, path, device):
69 | # checkpoint = os.path.join(path, 'a3c.pt')
70 | # state_dict = torch.load(checkpoint, map_location=device)
71 | # self.load_state_dict(state_dict)
72 |
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20230926-211815/models/a3c.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20230926-211815/models/a3c.pt
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20230926-211815/results/config_params.txt:
--------------------------------------------------------------------------------
1 | import dataclasses
2 | # 旨在简化数据类的定义,减少样板代码,并提供更好的代码可读性。这对于处理大量数据对象的情况特别有用。
3 | import numpy as np
4 |
5 |
6 | @dataclasses.dataclass
7 | class VehicularEnvConfig:
8 | def __init__(self):
9 | # 道路信息
10 | self.road_range: int = 1200 # 500m
11 | self.road_width: int = 50 # 500m
12 |
13 | # 时间信息
14 | self.time_slot_start: int = 0
15 | self.time_slot_end: int = 99
16 | # 任务信息相关(要处理的任务)
17 | self.Function_min_task_datasize=2#
18 | self.Function_max_task_datasize = 5 #
19 | self.Function_task_computing_resource: float = 300 # 任务计算资源200cycles/bit
20 | self.Function_min_task_delay: int = 20 # 任务的最小延迟5s
21 | self.Function_max_task_delay: int = 25 # 任务的最大延迟10s
22 |
23 |
24 |
25 | # 任务队列相关(每个卸载对象自己产生的任务)
26 | self.min_rsu_task_number: int = 2 #RSU最小任务个数
27 | self.max_rsu_task_number: int = 3 #RSU最大任务个数
28 | self.min_vehicle_task_number: int = 4 #车辆最小任务个数,用于生成初始任务的个数
29 | self.max_vehicle_task_number: int = 5 #车辆最大任务个数,用于生成初始任务的个数
30 | self.min_task_datasize: float = 2 # 5 MB 每个任务的最小数据大小
31 | self.max_task_datasize: float = 4 # 10 MB 每个任务的最大数据大小
32 |
33 | # 车辆相关
34 | self.min_vehicle_speed: int = 30 #车辆行驶的最小速度
35 | self.max_vehicle_speed: int = 40 #车辆行驶的最大速度
36 | self.min_vehicle_compute_ability: float =20000 #最小计算能力25000Mcycles/s
37 | self.max_vehicle_compute_ability: float =25000 #最大计算能力30000Mcycles/s
38 | self.vehicle_number = 10 #车辆个数
39 | self.seed = 1 #随机种子
40 | self.min_vehicle_y_initial_location:float =0 #y坐标最小值
41 | self.max_vehicle_y_initial_location: float =50 #y坐标最大值
42 | self.vehicle_x_initial_location:list=[0,self.road_range]#x坐标初始值
43 | # RSU相关
44 | self.rsu_number = 3 #RSU的个数
45 | self.min_rsu_compute_ability: float = 30000 # 最小计算能力400Mcycles/s
46 | self.max_rsu_compute_ability: float = 35000 # 最大计算能力600Mcycles/s
47 | # self._rsu_x_location: dict = {"rsu_1": 200, "rsu_2": 600, "rsu_3": 1000}
48 | # self._rsu_y_location: dict = {"rsu_1": 50, "rsu_2": 50, "rsu_3": 50}
49 |
50 | # 通信相关
51 | self.rsu_range:int =400 #RSU通信距离200m
52 | self.vehicle_range: int = 200 #车辆通信距离100m
53 | self.r2v_B:float=20 #R2V带宽:10Mbps
54 | self.v2v_B:float=40#V2V带宽:20Mbps
55 | self.rsu_p:float=50 #RSU发射功率:100w
56 | self.vehicle_p:float=10 #车发射功率: 20w
57 | self.w:float=0.001 #噪声功率𝜔:0.001 W/Hz
58 | self.k:float=30 #固定损耗𝐾:20-40db取30
59 | self.theta:int=2 #路径损耗因子𝜎:2-6取2
60 | self.r2r_onehop_time:float=6#r2r一跳传输时间2s
61 | self.c2r_rate:float=0.3#C-R传输速率:2mb/s
62 | # self.cloud_compute_ability:float=1800 #cloud计算能力15000Mcycles/s
63 | self.min_transfer_rate:float=0.01 #最小传输速率:0.1mb/s
64 | self.rsu_connect_time:float=10000 #RSU之间的联通时间
65 | self.cloud_connect_time:float=10000 #R2C的连通时间
66 | #惩罚
67 | self.punishment=-200
68 | #环境相关
69 | self.action_size=(self.rsu_number+self.vehicle_number+1)** 3#动作空间
70 | self.high = np.array([np.finfo(np.float32).max for _ in range(self.rsu_number+self.vehicle_number)]) # 状态空间的最大值
71 | # high 变量将是一个包含 107 个元素的 NumPy 数组,每个元素将具有可由 32 位浮点数表示的最大有限值。
72 | self.low = np.array([0 for _ in range(self.rsu_number+self.vehicle_number)]) # 状态空间的最大值
73 |
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20230926-211815/results/params.txt:
--------------------------------------------------------------------------------
1 | ------------------ start ------------------
2 | algo_name : A3C
3 | env_name : Multihop-V2V
4 | n_train_processes : 6
5 | max_train_ep : 150
6 | max_test_ep : 300
7 | update_interval : 5
8 | gamma : 0.98
9 | learning_rate : 0.0002
10 | hidden_dim : 256
11 | device : cuda
12 | result_path : C:\Users\Mr.yang\Desktop\Multihop-V2V-no-seed\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20230926-211815/results/
13 | model_path : C:\Users\Mr.yang\Desktop\Multihop-V2V-no-seed\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20230926-211815/models/
14 | save_fig : True
15 | ------------------- end -------------------
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20230926-211815/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20230926-211815/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20230926-211815/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20230926-211815/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20230926-211815/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20230926-211815/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20230926-211815/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20230926-211815/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20230927-153132/models/a3c.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20230927-153132/models/a3c.pt
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20230927-153132/results/config_params.txt:
--------------------------------------------------------------------------------
1 | import dataclasses
2 | # 旨在简化数据类的定义,减少样板代码,并提供更好的代码可读性。这对于处理大量数据对象的情况特别有用。
3 | import numpy as np
4 |
5 |
6 | @dataclasses.dataclass
7 | class VehicularEnvConfig:
8 | def __init__(self):
9 | # 道路信息
10 | self.road_range: int = 1200 # 500m
11 | self.road_width: int = 50 # 500m
12 |
13 | # 时间信息
14 | self.time_slot_start: int = 0
15 | self.time_slot_end: int = 99
16 | # 任务信息相关(要处理的任务)
17 | self.Function_min_task_datasize=2#
18 | self.Function_max_task_datasize = 5 #
19 | self.Function_task_computing_resource: float = 300 # 任务计算资源200cycles/bit
20 | self.Function_min_task_delay: int = 20 # 任务的最小延迟5s
21 | self.Function_max_task_delay: int = 25 # 任务的最大延迟10s
22 |
23 |
24 |
25 | # 任务队列相关(每个卸载对象自己产生的任务)
26 | self.min_rsu_task_number: int = 2 #RSU最小任务个数
27 | self.max_rsu_task_number: int = 3 #RSU最大任务个数
28 | self.min_vehicle_task_number: int = 4 #车辆最小任务个数,用于生成初始任务的个数
29 | self.max_vehicle_task_number: int = 5 #车辆最大任务个数,用于生成初始任务的个数
30 | self.min_task_datasize: float = 2 # 5 MB 每个任务的最小数据大小
31 | self.max_task_datasize: float = 4 # 10 MB 每个任务的最大数据大小
32 |
33 | # 车辆相关
34 | self.min_vehicle_speed: int = 30 #车辆行驶的最小速度
35 | self.max_vehicle_speed: int = 40 #车辆行驶的最大速度
36 | self.min_vehicle_compute_ability: float =20000 #最小计算能力25000Mcycles/s
37 | self.max_vehicle_compute_ability: float =25000 #最大计算能力30000Mcycles/s
38 | self.vehicle_number = 10 #车辆个数
39 | self.seed = 1 #随机种子
40 | self.min_vehicle_y_initial_location:float =0 #y坐标最小值
41 | self.max_vehicle_y_initial_location: float =50 #y坐标最大值
42 | self.vehicle_x_initial_location:list=[0,self.road_range]#x坐标初始值
43 | # RSU相关
44 | self.rsu_number = 3 #RSU的个数
45 | self.min_rsu_compute_ability: float = 25000 # 最小计算能力400Mcycles/s
46 | self.max_rsu_compute_ability: float = 30000 # 最大计算能力600Mcycles/s
47 | # self._rsu_x_location: dict = {"rsu_1": 200, "rsu_2": 600, "rsu_3": 1000}
48 | # self._rsu_y_location: dict = {"rsu_1": 50, "rsu_2": 50, "rsu_3": 50}
49 |
50 | # 通信相关
51 | self.rsu_range:int =400 #RSU通信距离200m
52 | self.vehicle_range: int = 200 #车辆通信距离100m
53 | self.r2v_B:float=20 #R2V带宽:10Mbps
54 | self.v2v_B:float=40#V2V带宽:20Mbps
55 | self.rsu_p:float=50 #RSU发射功率:100w
56 | self.vehicle_p:float=10 #车发射功率: 20w
57 | self.w:float=0.001 #噪声功率𝜔:0.001 W/Hz
58 | self.k:float=30 #固定损耗𝐾:20-40db取30
59 | self.theta:int=2 #路径损耗因子𝜎:2-6取2
60 | self.r2r_onehop_time:float=6#r2r一跳传输时间2s
61 | self.c2r_rate:float=0.25#C-R传输速率:2mb/s
62 | # self.cloud_compute_ability:float=1800 #cloud计算能力15000Mcycles/s
63 | self.min_transfer_rate:float=0.01 #最小传输速率:0.1mb/s
64 | self.rsu_connect_time:float=10000 #RSU之间的联通时间
65 | self.cloud_connect_time:float=10000 #R2C的连通时间
66 | #惩罚
67 | self.punishment=-200
68 | #环境相关
69 | self.action_size=(self.rsu_number+self.vehicle_number+1)** 3#动作空间
70 | self.high = np.array([np.finfo(np.float32).max for _ in range(self.rsu_number+self.vehicle_number)]) # 状态空间的最大值
71 | # high 变量将是一个包含 107 个元素的 NumPy 数组,每个元素将具有可由 32 位浮点数表示的最大有限值。
72 | self.low = np.array([0 for _ in range(self.rsu_number+self.vehicle_number)]) # 状态空间的最大值
73 |
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20230927-153132/results/params.txt:
--------------------------------------------------------------------------------
1 | ------------------ start ------------------
2 | algo_name : A3C
3 | env_name : Multihop-V2V
4 | n_train_processes : 6
5 | max_train_ep : 150
6 | max_test_ep : 300
7 | update_interval : 5
8 | gamma : 0.98
9 | learning_rate : 0.0002
10 | hidden_dim : 256
11 | device : cuda
12 | result_path : C:\Users\gaopeng\Desktop\Multihop-V2V-no-seed\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20230927-153132/results/
13 | model_path : C:\Users\gaopeng\Desktop\Multihop-V2V-no-seed\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20230927-153132/models/
14 | save_fig : True
15 | ------------------- end -------------------
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20230927-153132/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20230927-153132/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20230927-153132/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20230927-153132/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20230927-153132/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20230927-153132/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20230927-153132/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20230927-153132/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20230927-153927/models/a3c.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20230927-153927/models/a3c.pt
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20230927-153927/results/config_params.txt:
--------------------------------------------------------------------------------
1 | import dataclasses
2 | # 旨在简化数据类的定义,减少样板代码,并提供更好的代码可读性。这对于处理大量数据对象的情况特别有用。
3 | import numpy as np
4 |
5 |
6 | @dataclasses.dataclass
7 | class VehicularEnvConfig:
8 | def __init__(self):
9 | # 道路信息
10 | self.road_range: int = 1200 # 500m
11 | self.road_width: int = 50 # 500m
12 |
13 | # 时间信息
14 | self.time_slot_start: int = 0
15 | self.time_slot_end: int = 99
16 | # 任务信息相关(要处理的任务)
17 | self.Function_min_task_datasize=2#
18 | self.Function_max_task_datasize = 5 #
19 | self.Function_task_computing_resource: float = 300 # 任务计算资源200cycles/bit
20 | self.Function_min_task_delay: int = 20 # 任务的最小延迟5s
21 | self.Function_max_task_delay: int = 25 # 任务的最大延迟10s
22 |
23 |
24 |
25 | # 任务队列相关(每个卸载对象自己产生的任务)
26 | self.min_rsu_task_number: int = 2 #RSU最小任务个数
27 | self.max_rsu_task_number: int = 3 #RSU最大任务个数
28 | self.min_vehicle_task_number: int = 4 #车辆最小任务个数,用于生成初始任务的个数
29 | self.max_vehicle_task_number: int = 5 #车辆最大任务个数,用于生成初始任务的个数
30 | self.min_task_datasize: float = 2 # 5 MB 每个任务的最小数据大小
31 | self.max_task_datasize: float = 4 # 10 MB 每个任务的最大数据大小
32 |
33 | # 车辆相关
34 | self.min_vehicle_speed: int = 30 #车辆行驶的最小速度
35 | self.max_vehicle_speed: int = 40 #车辆行驶的最大速度
36 | self.min_vehicle_compute_ability: float =20000 #最小计算能力25000Mcycles/s
37 | self.max_vehicle_compute_ability: float =25000 #最大计算能力30000Mcycles/s
38 | self.vehicle_number = 10 #车辆个数
39 | self.seed = 1 #随机种子
40 | self.min_vehicle_y_initial_location:float =0 #y坐标最小值
41 | self.max_vehicle_y_initial_location: float =50 #y坐标最大值
42 | self.vehicle_x_initial_location:list=[0,self.road_range]#x坐标初始值
43 | # RSU相关
44 | self.rsu_number = 3 #RSU的个数
45 | self.min_rsu_compute_ability: float = 25000 # 最小计算能力400Mcycles/s
46 | self.max_rsu_compute_ability: float = 30000 # 最大计算能力600Mcycles/s
47 | # self._rsu_x_location: dict = {"rsu_1": 200, "rsu_2": 600, "rsu_3": 1000}
48 | # self._rsu_y_location: dict = {"rsu_1": 50, "rsu_2": 50, "rsu_3": 50}
49 |
50 | # 通信相关
51 | self.rsu_range:int =400 #RSU通信距离200m
52 | self.vehicle_range: int = 200 #车辆通信距离100m
53 | self.r2v_B:float=20 #R2V带宽:10Mbps
54 | self.v2v_B:float=40#V2V带宽:20Mbps
55 | self.rsu_p:float=50 #RSU发射功率:100w
56 | self.vehicle_p:float=10 #车发射功率: 20w
57 | self.w:float=0.001 #噪声功率𝜔:0.001 W/Hz
58 | self.k:float=30 #固定损耗𝐾:20-40db取30
59 | self.theta:int=2 #路径损耗因子𝜎:2-6取2
60 | self.r2r_onehop_time:float=6#r2r一跳传输时间2s
61 | self.c2r_rate:float=0.25#C-R传输速率:2mb/s
62 | # self.cloud_compute_ability:float=1800 #cloud计算能力15000Mcycles/s
63 | self.min_transfer_rate:float=0.01 #最小传输速率:0.1mb/s
64 | self.rsu_connect_time:float=10000 #RSU之间的联通时间
65 | self.cloud_connect_time:float=10000 #R2C的连通时间
66 | #惩罚
67 | self.punishment=-200
68 | #环境相关
69 | self.action_size=(self.rsu_number+self.vehicle_number+1)** 3#动作空间
70 | self.high = np.array([np.finfo(np.float32).max for _ in range(self.rsu_number+self.vehicle_number)]) # 状态空间的最大值
71 | # high 变量将是一个包含 107 个元素的 NumPy 数组,每个元素将具有可由 32 位浮点数表示的最大有限值。
72 | self.low = np.array([0 for _ in range(self.rsu_number+self.vehicle_number)]) # 状态空间的最大值
73 |
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20230927-153927/results/params.txt:
--------------------------------------------------------------------------------
1 | ------------------ start ------------------
2 | algo_name : A3C
3 | env_name : Multihop-V2V
4 | n_train_processes : 6
5 | max_train_ep : 150
6 | max_test_ep : 300
7 | update_interval : 5
8 | gamma : 0.98
9 | learning_rate : 0.0002
10 | hidden_dim : 256
11 | device : cuda
12 | result_path : C:\Users\Mr.yang\Desktop\Multihop-V2V-no-seed\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20230927-153927/results/
13 | model_path : C:\Users\Mr.yang\Desktop\Multihop-V2V-no-seed\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20230927-153927/models/
14 | save_fig : True
15 | ------------------- end -------------------
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20230927-153927/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20230927-153927/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20230927-153927/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20230927-153927/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20230927-153927/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20230927-153927/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20230927-153927/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20230927-153927/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20230928-110901/models/a3c.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20230928-110901/models/a3c.pt
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20230928-110901/results/params.txt:
--------------------------------------------------------------------------------
1 | ------------------ start ------------------
2 | algo_name : A3C
3 | env_name : Multihop-V2V
4 | n_train_processes : 6
5 | max_train_ep : 300
6 | max_test_ep : 500
7 | update_interval : 5
8 | gamma : 0.98
9 | learning_rate : 0.0002
10 | hidden_dim : 256
11 | device : cuda
12 | result_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20230928-110901/results/
13 | model_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20230928-110901/models/
14 | save_fig : True
15 | ------------------- end -------------------
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20230928-110901/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20230928-110901/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20230928-110901/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20230928-110901/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20230928-110901/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20230928-110901/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20230928-110901/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20230928-110901/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20230930-114045/models/a3c.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20230930-114045/models/a3c.pt
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20230930-114045/results/params.txt:
--------------------------------------------------------------------------------
1 | ------------------ start ------------------
2 | algo_name : A3C
3 | env_name : Multihop-V2V
4 | n_train_processes : 6
5 | max_train_ep : 300
6 | max_test_ep : 500
7 | update_interval : 5
8 | gamma : 0.98
9 | learning_rate : 0.0002
10 | hidden_dim : 256
11 | device : cuda
12 | result_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20230930-114045/results/
13 | model_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20230930-114045/models/
14 | save_fig : True
15 | ------------------- end -------------------
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20230930-114045/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20230930-114045/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20230930-114045/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20230930-114045/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20230930-114045/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20230930-114045/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20230930-114045/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20230930-114045/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231002-214935/models/a3c.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231002-214935/models/a3c.pt
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231002-214935/results/config_params.txt:
--------------------------------------------------------------------------------
1 | import dataclasses
2 | # 旨在简化数据类的定义,减少样板代码,并提供更好的代码可读性。这对于处理大量数据对象的情况特别有用。
3 | import numpy as np
4 |
5 |
6 | @dataclasses.dataclass
7 | class VehicularEnvConfig:
8 | def __init__(self):
9 | # 道路信息
10 | self.road_range: int = 1200 # 500m
11 | self.road_width: int = 50 # 500m
12 |
13 | # 时间信息
14 | self.time_slot_start: int = 0
15 | self.time_slot_end: int = 99
16 | # 任务信息相关(要处理的任务)
17 | self.Function_min_task_datasize=2#
18 | self.Function_max_task_datasize = 5 #
19 | self.Function_task_computing_resource: float = 300 # 任务计算资源200cycles/bit
20 | self.Function_min_task_delay: int = 20 # 任务的最小延迟5s
21 | self.Function_max_task_delay: int = 25 # 任务的最大延迟10s
22 |
23 |
24 |
25 | # 任务队列相关(每个卸载对象自己产生的任务)
26 | self.min_rsu_task_number: int = 2 #RSU最小任务个数
27 | self.max_rsu_task_number: int = 3 #RSU最大任务个数
28 | self.min_vehicle_task_number: int = 4 #车辆最小任务个数,用于生成初始任务的个数
29 | self.max_vehicle_task_number: int = 5 #车辆最大任务个数,用于生成初始任务的个数
30 | self.min_task_datasize: float = 2 # 5 MB 每个任务的最小数据大小
31 | self.max_task_datasize: float = 4 # 10 MB 每个任务的最大数据大小
32 |
33 | # 车辆相关
34 | self.min_vehicle_speed: int = 30 #车辆行驶的最小速度
35 | self.max_vehicle_speed: int = 40 #车辆行驶的最大速度
36 | self.min_vehicle_compute_ability: float =20000 #最小计算能力200Mcycles/s
37 | self.max_vehicle_compute_ability: float =25000 #最大计算能力400Mcycles/s
38 | self.vehicle_number = 10 #车辆个数
39 | self.seed = 1 #随机种子
40 | self.min_vehicle_y_initial_location:float =0 #y坐标最小值
41 | self.max_vehicle_y_initial_location: float =50 #y坐标最大值
42 | self.vehicle_x_initial_location:list=[0,self.road_range]#x坐标初始值
43 | # RSU相关
44 | self.rsu_number = 3 #RSU的个数
45 | self.min_rsu_compute_ability: float = 25000 # 最小计算能力400Mcycles/s
46 | self.max_rsu_compute_ability: float = 30000 # 最大计算能力600Mcycles/s
47 | # self._rsu_x_location: dict = {"rsu_1": 200, "rsu_2": 600, "rsu_3": 1000}
48 | # self._rsu_y_location: dict = {"rsu_1": 50, "rsu_2": 50, "rsu_3": 50}
49 |
50 | # 通信相关
51 | self.rsu_range:int =400 #RSU通信距离200m
52 | self.vehicle_range: int = 200 #车辆通信距离100m
53 | self.r2v_B:float=20 #R2V带宽:10Mbps
54 | self.v2v_B:float=40#V2V带宽:20Mbps
55 | self.rsu_p:float=50 #RSU发射功率:100w
56 | self.vehicle_p:float=10 #车发射功率: 20w
57 | self.w:float=0.001 #噪声功率𝜔:0.001 W/Hz
58 | self.k:float=30 #固定损耗𝐾:20-40db取30
59 | self.theta:int=2 #路径损耗因子𝜎:2-6取2
60 | self.r2r_onehop_time:float=8#r2r一跳传输时间2s
61 | self.c2r_rate:float=0.2#C-R传输速率:2mb/s
62 | # self.cloud_compute_ability:float=1800 #cloud计算能力15000Mcycles/s
63 | self.min_transfer_rate:float=0.01 #最小传输速率:0.1mb/s
64 | self.rsu_connect_time:float=10000 #RSU之间的联通时间
65 | self.cloud_connect_time:float=10000 #R2C的连通时间
66 | #惩罚
67 | self.punishment=-200
68 | #环境相关
69 | self.action_size=(self.rsu_number+self.vehicle_number+1)** 3#动作空间
70 | self.high = np.array([np.finfo(np.float32).max for _ in range(self.rsu_number+self.vehicle_number)]) # 状态空间的最大值
71 | # high 变量将是一个包含 107 个元素的 NumPy 数组,每个元素将具有可由 32 位浮点数表示的最大有限值。
72 | self.low = np.array([0 for _ in range(self.rsu_number+self.vehicle_number)]) # 状态空间的最大值
73 |
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231002-214935/results/params.txt:
--------------------------------------------------------------------------------
1 | ------------------ start ------------------
2 | algo_name : A3C
3 | env_name : Multihop-V2V
4 | n_train_processes : 6
5 | max_train_ep : 300
6 | max_test_ep : 500
7 | update_interval : 5
8 | gamma : 0.98
9 | learning_rate : 0.0002
10 | hidden_dim : 256
11 | device : cuda
12 | result_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20231002-214935/results/
13 | model_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20231002-214935/models/
14 | save_fig : True
15 | ------------------- end -------------------
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231002-214935/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231002-214935/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231002-214935/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231002-214935/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231002-214935/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231002-214935/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231002-214935/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231002-214935/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231004-150720/models/a3c.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231004-150720/models/a3c.pt
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231004-150720/results/params.txt:
--------------------------------------------------------------------------------
1 | ------------------ start ------------------
2 | algo_name : A3C
3 | env_name : Multihop-V2V
4 | n_train_processes : 6
5 | max_train_ep : 300
6 | max_test_ep : 500
7 | update_interval : 5
8 | gamma : 0.98
9 | learning_rate : 0.0002
10 | hidden_dim : 256
11 | device : cuda
12 | result_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20231004-150720/results/
13 | model_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20231004-150720/models/
14 | save_fig : True
15 | ------------------- end -------------------
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231004-150720/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231004-150720/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231004-150720/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231004-150720/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231004-150720/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231004-150720/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231004-150720/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231004-150720/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231004-162835/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231004-162835/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231004-162835/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231004-162835/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231004-162835/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231004-162835/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231004-162835/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231004-162835/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231005-153812/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231005-153812/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231005-153812/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231005-153812/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231005-153812/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231005-153812/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231005-153812/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231005-153812/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231005-192015/models/a3c.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231005-192015/models/a3c.pt
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231005-192015/results/config_params.txt:
--------------------------------------------------------------------------------
1 | import dataclasses
2 | # 旨在简化数据类的定义,减少样板代码,并提供更好的代码可读性。这对于处理大量数据对象的情况特别有用。
3 | import numpy as np
4 |
5 |
6 | @dataclasses.dataclass
7 | class VehicularEnvConfig:
8 | def __init__(self):
9 | # 道路信息
10 | self.road_range: int = 1200 # 500m
11 | self.road_width: int = 50 # 500m
12 |
13 | # 时间信息
14 | self.time_slot_start: int = 0
15 | self.time_slot_end: int = 99
16 | # 任务信息相关(要处理的任务)
17 | self.Function_min_task_datasize=2#
18 | self.Function_max_task_datasize = 5 #
19 | self.Function_task_computing_resource: float = 300 # 任务计算资源200cycles/bit
20 | self.Function_min_task_delay: int = 20 # 任务的最小延迟5s
21 | self.Function_max_task_delay: int = 25 # 任务的最大延迟10s
22 |
23 |
24 |
25 | # 任务队列相关(每个卸载对象自己产生的任务)
26 | self.min_rsu_task_number: int = 2 #RSU最小任务个数
27 | self.max_rsu_task_number: int = 3 #RSU最大任务个数
28 | self.min_vehicle_task_number: int = 4 #车辆最小任务个数,用于生成初始任务的个数
29 | self.max_vehicle_task_number: int = 5 #车辆最大任务个数,用于生成初始任务的个数
30 | self.min_task_datasize: float = 2 # 5 MB 每个任务的最小数据大小
31 | self.max_task_datasize: float = 4 # 10 MB 每个任务的最大数据大小
32 |
33 | # 车辆相关
34 | self.min_vehicle_speed: int = 30 #车辆行驶的最小速度
35 | self.max_vehicle_speed: int = 40 #车辆行驶的最大速度
36 | self.min_vehicle_compute_ability: float =24000 #最小计算能力200Mcycles/s
37 | self.max_vehicle_compute_ability: float =25000 #最大计算能力400Mcycles/s
38 | self.vehicle_number = 10 #车辆个数
39 | self.seed = 1 #随机种子
40 | self.min_vehicle_y_initial_location:float =0 #y坐标最小值
41 | self.max_vehicle_y_initial_location: float =50 #y坐标最大值
42 | self.vehicle_x_initial_location:list=[0,self.road_range]#x坐标初始值
43 | # RSU相关
44 | self.rsu_number = 3 #RSU的个数
45 | self.min_rsu_compute_ability: float = 25000 # 最小计算能力400Mcycles/s
46 | self.max_rsu_compute_ability: float = 30000 # 最大计算能力600Mcycles/s
47 | # self._rsu_x_location: dict = {"rsu_1": 200, "rsu_2": 600, "rsu_3": 1000}
48 | # self._rsu_y_location: dict = {"rsu_1": 50, "rsu_2": 50, "rsu_3": 50}
49 |
50 | # 通信相关
51 | self.rsu_range:int =400 #RSU通信距离200m
52 | self.vehicle_range: int = 200 #车辆通信距离100m
53 | self.r2v_B:float=20 #R2V带宽:10Mbps
54 | self.v2v_B:float=40#V2V带宽:20Mbps
55 | self.rsu_p:float=50 #RSU发射功率:100w
56 | self.vehicle_p:float=10 #车发射功率: 20w
57 | self.w:float=0.001 #噪声功率𝜔:0.001 W/Hz
58 | self.k:float=30 #固定损耗𝐾:20-40db取30
59 | self.theta:int=2 #路径损耗因子𝜎:2-6取2
60 | self.r2r_onehop_time:float=8#r2r一跳传输时间2s
61 | self.c2r_rate:float=0.2#C-R传输速率:2mb/s
62 | # self.cloud_compute_ability:float=1800 #cloud计算能力15000Mcycles/s
63 | self.min_transfer_rate:float=0.01 #最小传输速率:0.1mb/s
64 | self.rsu_connect_time:float=10000 #RSU之间的联通时间
65 | self.cloud_connect_time:float=10000 #R2C的连通时间
66 | #惩罚
67 | self.punishment=-200
68 | #环境相关
69 | self.action_size=(self.rsu_number+self.vehicle_number+1)** 3#动作空间
70 | self.high = np.array([np.finfo(np.float32).max for _ in range(self.rsu_number+self.vehicle_number)]) # 状态空间的最大值
71 | # high 变量将是一个包含 107 个元素的 NumPy 数组,每个元素将具有可由 32 位浮点数表示的最大有限值。
72 | self.low = np.array([0 for _ in range(self.rsu_number+self.vehicle_number)]) # 状态空间的最大值
73 |
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231005-192015/results/params.txt:
--------------------------------------------------------------------------------
1 | ------------------ start ------------------
2 | algo_name : A3C
3 | env_name : Multihop-V2V
4 | n_train_processes : 6
5 | max_train_ep : 300
6 | max_test_ep : 500
7 | update_interval : 5
8 | gamma : 0.98
9 | learning_rate : 0.0002
10 | hidden_dim : 256
11 | device : cuda
12 | result_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20231005-192015/results/
13 | model_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20231005-192015/models/
14 | save_fig : True
15 | ------------------- end -------------------
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231005-192015/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231005-192015/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231005-192015/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231005-192015/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231005-192015/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231005-192015/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231005-192015/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231005-192015/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231008-130030/models/a3c.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231008-130030/models/a3c.pt
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231008-130030/results/params.txt:
--------------------------------------------------------------------------------
1 | ------------------ start ------------------
2 | algo_name : A3C
3 | env_name : Multihop-V2V
4 | n_train_processes : 5
5 | max_train_ep : 300
6 | max_test_ep : 500
7 | update_interval : 5
8 | gamma : 0.98
9 | learning_rate : 0.0002
10 | hidden_dim : 256
11 | device : cuda
12 | result_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20231008-130030/results/
13 | model_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20231008-130030/models/
14 | save_fig : True
15 | ------------------- end -------------------
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231008-130030/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231008-130030/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231008-130030/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231008-130030/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231008-130030/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231008-130030/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231008-130030/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231008-130030/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231011-143350/models/a3c.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231011-143350/models/a3c.pt
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231011-143350/results/params.txt:
--------------------------------------------------------------------------------
1 | ------------------ start ------------------
2 | algo_name : A3C
3 | env_name : Multihop-V2V
4 | n_train_processes : 2
5 | max_train_ep : 300
6 | max_test_ep : 500
7 | update_interval : 5
8 | gamma : 0.98
9 | learning_rate : 0.0002
10 | hidden_dim : 256
11 | device : cuda
12 | result_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20231011-143350/results/
13 | model_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20231011-143350/models/
14 | save_fig : True
15 | ------------------- end -------------------
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231011-143350/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231011-143350/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231011-143350/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231011-143350/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231011-143350/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231011-143350/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231011-143350/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231011-143350/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231011-185838/models/a3c.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231011-185838/models/a3c.pt
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231011-185838/results/params.txt:
--------------------------------------------------------------------------------
1 | ------------------ start ------------------
2 | algo_name : A3C
3 | env_name : Multihop-V2V
4 | n_train_processes : 2
5 | max_train_ep : 300
6 | max_test_ep : 500
7 | update_interval : 5
8 | gamma : 0.98
9 | learning_rate : 0.0002
10 | hidden_dim : 256
11 | device : cuda
12 | result_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20231011-185838/results/
13 | model_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20231011-185838/models/
14 | save_fig : True
15 | ------------------- end -------------------
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231011-185838/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231011-185838/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231011-185838/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231011-185838/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231011-185838/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231011-185838/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231011-185838/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231011-185838/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231012-213018/models/a3c.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231012-213018/models/a3c.pt
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231012-213018/results/params.txt:
--------------------------------------------------------------------------------
1 | ------------------ start ------------------
2 | algo_name : A3C
3 | env_name : Multihop-V2V
4 | n_train_processes : 2
5 | max_train_ep : 300
6 | max_test_ep : 500
7 | update_interval : 5
8 | gamma : 0.98
9 | learning_rate : 0.0002
10 | hidden_dim : 256
11 | device : cuda
12 | result_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20231012-213018/results/
13 | model_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20231012-213018/models/
14 | save_fig : True
15 | ------------------- end -------------------
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231012-213018/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231012-213018/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231012-213018/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231012-213018/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231012-213018/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231012-213018/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231012-213018/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231012-213018/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231013-085149/models/a3c.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231013-085149/models/a3c.pt
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231013-085149/results/params.txt:
--------------------------------------------------------------------------------
1 | ------------------ start ------------------
2 | algo_name : A3C
3 | env_name : Multihop-V2V
4 | n_train_processes : 6
5 | max_train_ep : 300
6 | max_test_ep : 500
7 | update_interval : 5
8 | gamma : 0.98
9 | learning_rate : 0.0002
10 | hidden_dim : 256
11 | device : cuda
12 | result_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20231013-085149/results/
13 | model_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20231013-085149/models/
14 | save_fig : True
15 | ------------------- end -------------------
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231013-085149/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231013-085149/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231013-085149/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231013-085149/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231013-085149/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231013-085149/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231013-085149/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231013-085149/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231013-145006/models/a3c.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231013-145006/models/a3c.pt
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231013-145006/results/params.txt:
--------------------------------------------------------------------------------
1 | ------------------ start ------------------
2 | algo_name : A3C
3 | env_name : Multihop-V2V
4 | n_train_processes : 6
5 | max_train_ep : 300
6 | max_test_ep : 500
7 | update_interval : 5
8 | gamma : 0.98
9 | learning_rate : 0.0002
10 | hidden_dim : 256
11 | device : cuda
12 | result_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20231013-145006/results/
13 | model_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20231013-145006/models/
14 | save_fig : True
15 | ------------------- end -------------------
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231013-145006/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231013-145006/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231013-145006/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231013-145006/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231013-145006/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231013-145006/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231013-145006/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231013-145006/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231013-170138/models/a3c.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231013-170138/models/a3c.pt
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231013-170138/results/params.txt:
--------------------------------------------------------------------------------
1 | ------------------ start ------------------
2 | algo_name : A3C
3 | env_name : Multihop-V2V
4 | n_train_processes : 6
5 | max_train_ep : 300
6 | max_test_ep : 500
7 | update_interval : 5
8 | gamma : 0.98
9 | learning_rate : 0.0002
10 | hidden_dim : 256
11 | device : cuda
12 | result_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20231013-170138/results/
13 | model_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20231013-170138/models/
14 | save_fig : True
15 | ------------------- end -------------------
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231013-170138/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231013-170138/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231013-170138/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231013-170138/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231013-170138/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231013-170138/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231013-170138/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231013-170138/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231013-203310/models/a3c.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231013-203310/models/a3c.pt
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231013-203310/results/params.txt:
--------------------------------------------------------------------------------
1 | ------------------ start ------------------
2 | algo_name : A3C
3 | env_name : Multihop-V2V
4 | n_train_processes : 6
5 | max_train_ep : 300
6 | max_test_ep : 500
7 | update_interval : 5
8 | gamma : 0.98
9 | learning_rate : 0.0002
10 | hidden_dim : 256
11 | device : cuda
12 | result_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20231013-203310/results/
13 | model_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20231013-203310/models/
14 | save_fig : True
15 | ------------------- end -------------------
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231013-203310/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231013-203310/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231013-203310/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231013-203310/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231013-203310/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231013-203310/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231013-203310/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231013-203310/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231017-090344/models/a3c.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231017-090344/models/a3c.pt
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231017-090344/results/params.txt:
--------------------------------------------------------------------------------
1 | ------------------ start ------------------
2 | algo_name : A3C
3 | env_name : Multihop-V2V
4 | n_train_processes : 6
5 | max_train_ep : 300
6 | max_test_ep : 500
7 | update_interval : 5
8 | gamma : 0.98
9 | learning_rate : 0.0002
10 | hidden_dim : 256
11 | device : cuda
12 | result_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20231017-090344/results/
13 | model_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20231017-090344/models/
14 | save_fig : True
15 | ------------------- end -------------------
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231017-090344/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231017-090344/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231017-090344/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231017-090344/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231017-090344/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231017-090344/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231017-090344/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231017-090344/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231025-084607/models/a3c.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231025-084607/models/a3c.pt
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231025-084607/results/params.txt:
--------------------------------------------------------------------------------
1 | ------------------ start ------------------
2 | algo_name : A3C
3 | env_name : Multihop-V2V
4 | n_train_processes : 6
5 | max_train_ep : 300
6 | max_test_ep : 500
7 | update_interval : 5
8 | gamma : 0.98
9 | learning_rate : 0.0002
10 | hidden_dim : 256
11 | device : cuda
12 | result_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20231025-084607/results/
13 | model_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20231025-084607/models/
14 | save_fig : True
15 | ------------------- end -------------------
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231025-084607/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231025-084607/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231025-084607/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231025-084607/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231025-084607/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231025-084607/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231025-084607/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231025-084607/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231107-153511/models/a3c.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231107-153511/models/a3c.pt
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231107-153511/results/params.txt:
--------------------------------------------------------------------------------
1 | ------------------ start ------------------
2 | algo_name : A3C
3 | env_name : Multihop-V2V
4 | n_train_processes : 6
5 | max_train_ep : 300
6 | max_test_ep : 500
7 | update_interval : 5
8 | gamma : 0.98
9 | learning_rate : 0.0002
10 | hidden_dim : 256
11 | device : cuda
12 | result_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20231107-153511/results/
13 | model_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-seed\methods\A3C/outputs/Multihop-V2V/20231107-153511/models/
14 | save_fig : True
15 | ------------------- end -------------------
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231115-085643/models/a3c.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231115-085643/models/a3c.pt
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231115-085643/results/params.txt:
--------------------------------------------------------------------------------
1 | ------------------ start ------------------
2 | algo_name : A3C
3 | env_name : Multihop-V2V
4 | n_train_processes : 6
5 | max_train_ep : 300
6 | max_test_ep : 500
7 | update_interval : 5
8 | gamma : 0.98
9 | learning_rate : 0.0002
10 | hidden_dim : 256
11 | device : cuda
12 | result_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-script\methods\A3C/outputs/Multihop-V2V/20231115-085643/results/
13 | model_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-script\methods\A3C/outputs/Multihop-V2V/20231115-085643/models/
14 | save_fig : True
15 | ------------------- end -------------------
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231115-085643/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231115-085643/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231115-085643/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231115-085643/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231115-085643/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231115-085643/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231115-085643/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231115-085643/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231120-093833/models/a3c.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231120-093833/models/a3c.pt
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231120-093833/results/params.txt:
--------------------------------------------------------------------------------
1 | ------------------ start ------------------
2 | algo_name : A3C
3 | env_name : Multihop-V2V
4 | n_train_processes : 6
5 | max_train_ep : 300
6 | max_test_ep : 500
7 | update_interval : 5
8 | gamma : 0.98
9 | learning_rate : 0.0002
10 | hidden_dim : 256
11 | device : cuda
12 | result_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-script\methods\A3C/outputs/Multihop-V2V/20231120-093833/results/
13 | model_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-script\methods\A3C/outputs/Multihop-V2V/20231120-093833/models/
14 | save_fig : True
15 | ------------------- end -------------------
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231120-093833/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231120-093833/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231120-093833/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231120-093833/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231120-093833/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231120-093833/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231120-093833/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231120-093833/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231122-103437/models/a3c.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231122-103437/models/a3c.pt
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231122-103437/results/params.txt:
--------------------------------------------------------------------------------
1 | ------------------ start ------------------
2 | algo_name : A3C
3 | env_name : Multihop-V2V
4 | n_train_processes : 6
5 | max_train_ep : 200
6 | max_test_ep : 300
7 | update_interval : 5
8 | gamma : 0.98
9 | learning_rate : 0.0002
10 | hidden_dim : 256
11 | device : cuda
12 | result_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-script\methods\A3C/outputs/Multihop-V2V/20231122-103437/results/
13 | model_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-script\methods\A3C/outputs/Multihop-V2V/20231122-103437/models/
14 | save_fig : True
15 | ------------------- end -------------------
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231122-103437/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231122-103437/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231122-103437/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231122-103437/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231122-103437/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231122-103437/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231122-103437/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231122-103437/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231123-164321/models/a3c.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231123-164321/models/a3c.pt
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231123-164321/results/params.txt:
--------------------------------------------------------------------------------
1 | ------------------ start ------------------
2 | algo_name : A3C
3 | env_name : Multihop-V2V
4 | n_train_processes : 6
5 | max_train_ep : 200
6 | max_test_ep : 300
7 | update_interval : 5
8 | gamma : 0.98
9 | learning_rate : 0.0002
10 | hidden_dim : 256
11 | device : cuda
12 | result_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-script\methods\A3C/outputs/Multihop-V2V/20231123-164321/results/
13 | model_path : D:\PyCharm\PyCharmProject\pytorch\Multihop-V2V-no-script\methods\A3C/outputs/Multihop-V2V/20231123-164321/models/
14 | save_fig : True
15 | ------------------- end -------------------
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231123-164321/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231123-164321/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231123-164321/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231123-164321/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231123-164321/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231123-164321/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/outputs/Multihop-V2V/20231123-164321/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/A3C/outputs/Multihop-V2V/20231123-164321/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/A3C/run_A3C.py:
--------------------------------------------------------------------------------
1 | import sys, os
2 | curr_path = os.path.dirname(os.path.abspath(__file__)) # 当前文件所在绝对路径
3 | parent_path = os.path.dirname(curr_path) # 父路径
4 | # print(curr_path)
5 |
6 | sys.path.append(parent_path) # 添加路径到系统路径
7 | # parent_path_1 = os.path.dirname(parent_path)
8 | # sys.path.append(parent_path_1)
9 | # print(parent_path)
10 |
11 | import numpy as np
12 | import torch
13 | import argparse
14 | from methods.A3C.a3c import ActorCritic
15 | import matplotlib.pyplot as plt
16 | import seaborn as sns
17 | from env.utils import plot_rewards, save_args,plot_completion_rate
18 | from env.utils import save_results_1, make_dir
19 |
20 | from env import environment
21 | from torch.distributions import Categorical
22 | import torch.nn.functional as F
23 | import torch.optim as optim
24 | import torch.multiprocessing as mp
25 | import datetime
26 | from env.config import VehicularEnvConfig
27 |
28 |
29 |
30 | def get_args():
31 | """ Hyperparameters
32 | """
33 | curr_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # 获取当前时间
34 | parser = argparse.ArgumentParser(description="hyperparameters")
35 | parser.add_argument('--algo_name',default='A3C',type=str,help="name of algorithm")
36 | #算法名称:A2C
37 | parser.add_argument('--env_name',default='Multihop-V2V',type=str,help="name of environment")
38 | #环境名称:CartPole-v0
39 | parser.add_argument('--n_train_processes',default=6,type=int,help="numbers of environments")
40 | #创建8个独立的并行运行环境
41 | parser.add_argument('--max_train_ep',default=200,type=int,help="episodes of training")
42 | #训练回合数
43 | parser.add_argument('--max_test_ep',default=300,type=int,help="episodes of testing")
44 | #测试回合数
45 | parser.add_argument('--update_interval', default=5, type=int, help="展开轨迹数")
46 | # 展开轨迹数
47 | parser.add_argument('--gamma',default=0.98,type=float,help="discounted factor")
48 | #折扣因子
49 | parser.add_argument('--learning_rate',default=0.0002,type=float,help="learning rate")
50 | #学习率
51 | parser.add_argument('--hidden_dim',default=256,type=int)
52 | parser.add_argument('--device', default="cuda" if torch.cuda.is_available() else "cpu", type=str, help="cpu or cuda")
53 | parser.add_argument('--result_path', default=curr_path + "/outputs/" + parser.parse_args().env_name + \
54 | '/' + curr_time + '/results/')
55 | parser.add_argument('--model_path', default=curr_path + "/outputs/" + parser.parse_args().env_name + \
56 | '/' + curr_time + '/models/') # path to save models
57 | parser.add_argument('--save_fig', default=True, type=bool, help="if save figure or not")
58 | #隐藏层的神经元数目
59 | args = parser.parse_args()
60 | args.device = torch.device(
61 | "cuda" if torch.cuda.is_available() else "cpu") # check GPU
62 | return args
63 |
64 | def env_agent_config(cfg, seed=1):
65 | ''' 创建环境和智能体
66 | '''
67 | env = environment.RoadState() # 创建环境
68 | n_states = env.observation_space.shape[0] # 状态维度
69 | n_actions = env.action_space.n
70 | # print(f"n states: {n_states}, n actions: {n_actions}")
71 | agent =ActorCritic(n_states, n_actions, cfg.hidden_dim) # 创建智能体
72 | # if seed != 0: # 设置随机种子
73 | # torch.manual_seed(seed)
74 | # env.seed(seed)
75 | # np.random.seed(seed)
76 | return env, agent
77 |
78 |
79 | def train(cfg, env, agent,global_model,rank):
80 | agent.load_state_dict(global_model.state_dict())
81 | optimizer = optim.Adam(global_model.parameters(), lr=cfg.learning_rate)
82 |
83 | for n_epi in range(cfg.max_train_ep):
84 | done = False
85 | state,function = env.reset()
86 | while not done:
87 | state_lst, action_lst, reward_lst = [], [], []
88 | for t in range(cfg.update_interval):#展开轨迹,收集经验
89 | prob = agent.actor(torch.from_numpy(state).float())
90 | dist = Categorical(prob)
91 | action = dist.sample().item()
92 | # 根据概率分布dist进行采样,返回一个采样的动作。
93 | # 由于动作是离散的,因此item()方法用于将采样的动作从张量中提取出来,转换成普通的Python整数类型。
94 | next_state, reward, done,next_function,_,_,_,_= env.step(action,function)
95 |
96 | state_lst.append(state)
97 | action_lst.append([action])
98 | # r_lst.append(r/100.0)
99 | reward_lst.append(reward)
100 | state = next_state
101 | function=next_function
102 | if done:
103 | break
104 |
105 | # state_final = torch.tensor(next_state, dtype=torch.float)
106 | # final_state轨迹的最后一个时间步
107 | final_state = torch.tensor(next_state, dtype=torch.float)
108 | V = 0.0 if done else agent.critic(final_state).item()
109 | td_target_lst = []
110 | for reward in reward_lst[::-1]:
111 | V = cfg.gamma * V + reward
112 | td_target_lst.append([V])
113 | td_target_lst.reverse() # 将列表中的元素顺序进行反转
114 |
115 | state_lst=np.array(state_lst, dtype=np.float32)
116 | action_lst = np.array(action_lst, dtype=np.int64)
117 | td_target_lst = np.array(td_target_lst, dtype=np.float32)
118 |
119 | state_batch, action_batch, td_target = torch.tensor(state_lst, dtype=torch.float), \
120 | torch.tensor(action_lst), torch.tensor(td_target_lst)
121 | advantage = td_target - agent.critic(state_batch)
122 |
123 | action_prob = agent.actor(state_batch,softmax_dim=1)
124 | all_action_prob = action_prob.gather(1, action_batch)
125 | loss = -torch.log(all_action_prob) * advantage.detach() + \
126 | F.smooth_l1_loss(agent.critic(state_batch), td_target.detach())
127 |
128 | optimizer.zero_grad()
129 | loss.mean().backward()
130 | for global_param, local_param in zip(global_model.parameters(), agent.parameters()):
131 | # 这是一个for循环,它通过zip函数将全局模型的参数和对应的局部模型的参数一一对应起来
132 | global_param._grad = local_param.grad
133 | # 在每次迭代中,将局部模型的参数梯度(local_param.grad)传递给全局模型的参数(global_param._grad)。
134 | # 这样,全局模型的参数会受到来自多个进程局部模型的梯度影响,实现了梯度的累积。
135 | optimizer.step()
136 | # 在每次迭代中,使用优化器来更新全局模型的参数。因为全局模型的参数累积了多个进程局部模型的梯度,
137 | # 所以此步骤会根据累积的梯度来更新全局模型的参数。
138 | agent.load_state_dict(global_model.state_dict())
139 | # 在每次迭代结束后,将全局模型的参数拷贝回局部模型,确保每个进程的局部模型保持与全局模型相同的参数状态,
140 | # 以便下一次迭代时继续与环境交互。
141 | env.close()
142 | print("Training process {} reached maximum episode.".format(rank))
143 |
144 | def test_global_model(cfg, env,global_model):
145 | rewards_plot = []
146 | ma_rewards_plot = []
147 | offloading_vehicle_number_plot = []
148 | offloading_rsu_number_plot = []
149 | offloading_cloud_number_plot = []
150 | completion_rate_plot=[]
151 | ma_completion_rate_plot = []
152 | for n_epi in range(cfg.max_test_ep):
153 | rewards=0
154 | steps=0
155 | done = False
156 | offloading_vehicle_number = 0
157 | offloading_rsu_number = 0
158 | offloading_cloud_number = 0
159 | complete_number =0
160 | # completion_rate=0
161 | state ,function= env.reset()
162 | while not done:
163 | prob = global_model.actor(torch.from_numpy(state).float())
164 | action = Categorical(prob).sample().item()
165 | next_state, reward, done,next_function,offloading_vehicle,offloading_rsu,offloading_cloud,complete = env.step(action,function)
166 | state = next_state
167 | function=next_function
168 | steps+=1
169 | rewards += reward
170 | offloading_vehicle_number+=offloading_vehicle
171 | offloading_rsu_number+=offloading_rsu
172 | offloading_cloud_number+=offloading_cloud
173 | complete_number+=complete
174 | rewards_plot.append(rewards)
175 | offloading_vehicle_number_plot.append(offloading_vehicle_number)
176 | offloading_rsu_number_plot.append(offloading_rsu_number)
177 | offloading_cloud_number_plot.append( offloading_cloud_number)
178 | completion_rate=complete_number/(VehicularEnvConfig().rsu_number*(VehicularEnvConfig().time_slot_end+1))
179 | completion_rate_plot.append(completion_rate)
180 |
181 | print("# episode :{}, steps : {}, rewards : {}, complete : {}, vehicle : {}, rsu : {}, cloud : {}"
182 | .format(n_epi+1,steps, rewards,
183 | completion_rate,offloading_vehicle_number,offloading_rsu_number,offloading_cloud_number))
184 | # time.sleep(1)
185 |
186 | if ma_rewards_plot:
187 | ma_rewards_plot.append(0.9 * ma_rewards_plot[-1] + 0.1 * rewards)
188 | else:
189 | ma_rewards_plot.append(rewards)
190 |
191 | if ma_completion_rate_plot:
192 | ma_completion_rate_plot.append(0.9 * ma_completion_rate_plot[-1] + 0.1 * completion_rate)
193 | else:
194 | ma_completion_rate_plot.append(completion_rate)
195 |
196 |
197 | res_dic_rewards = {'rewards': rewards_plot, 'ma_rewards': ma_rewards_plot}
198 | res_dic_completion_rate = {'completion_rate': completion_rate_plot, 'ma_completion_rate': ma_completion_rate_plot}
199 | if not os.path.exists(cfg.result_path):
200 | os.makedirs(cfg.result_path)
201 | save_results_1(res_dic_rewards, tag='train',
202 | path=cfg.result_path)
203 | save_results_1(res_dic_completion_rate, tag='train',
204 | path=cfg.result_path)
205 | plot_rewards(res_dic_rewards['rewards'], res_dic_rewards['ma_rewards'], cfg, tag="train")
206 | plot_completion_rate(res_dic_completion_rate['completion_rate'], res_dic_completion_rate['ma_completion_rate'], cfg, tag="train")
207 | env.close()
208 |
209 |
210 |
211 |
212 | if __name__ == '__main__':
213 | cfg=get_args()
214 | make_dir(cfg.result_path, cfg.model_path)
215 | env,global_model =env_agent_config(cfg)
216 | global_model.share_memory()
217 | # 创建一个队列用于存储结果数据
218 | result_queue = mp.Queue()
219 | processes = []
220 | # 创建一个空列表processes,用于存储所有的进程。
221 | for rank in range(cfg.n_train_processes + 1):
222 | #通过for循环,遍历range(n_train_processes + 1),其中n_train_processes + 1是训练进程的数量再加1(用于测试进程)。
223 | if rank == 0:
224 | _, agent = env_agent_config(cfg)
225 | #当rank为0时,创建一个进程p,目标函数是test,并传入global_model作为参数。
226 | p = mp.Process(target=test_global_model, args=(cfg,env,global_model))
227 | else:
228 | #当rank不为0时,创建一个进程p,目标函数是train,并传入global_model和rank作为参数。
229 | _,agent=env_agent_config(cfg)
230 | p = mp.Process(target=train, args=(cfg, env, agent,global_model,rank))
231 | p.start()
232 | #启动进程p,开始执行对应的训练或测试任务。
233 | processes.append(p)
234 | #将进程p添加到processes列表中,以便稍后等待所有进程完成。
235 | for p in processes:
236 | #使用for循环,等待所有进程完成。通过调用p.join(),主程序会等待每个进程执行完毕后再继续执行。
237 | p.join()
238 |
239 | save_args(cfg) # 保存参数
240 | global_model.save(path=cfg.model_path) # save model
241 |
--------------------------------------------------------------------------------
/methods/DQN/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/README.md
--------------------------------------------------------------------------------
/methods/DQN/dqn.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | '''
4 | @Author: John
5 | @Email: johnjim0816@gmail.com
6 | @Date: 2020-06-12 00:50:49
7 | @LastEditor: John
8 | LastEditTime: 2022-07-13 00:08:18
9 | @Discription:
10 | @Environment: python 3.7.7
11 | '''
12 | '''off-policy
13 | '''
14 |
15 | import torch
16 | import torch.nn as nn
17 | import torch.nn.functional as F
18 | import torch.optim as optim
19 | import random
20 | import math
21 | import numpy as np
22 |
23 | class MLP(nn.Module):
24 | def __init__(self, n_states,n_actions,hidden_dim=128):
25 | """ 初始化q网络,为全连接网络
26 | n_states: 输入的特征数即环境的状态维度
27 | n_actions: 输出的动作维度
28 | """
29 | super(MLP, self).__init__()
30 | self.fc1 = nn.Linear(n_states, hidden_dim) # 输入层
31 | self.fc2 = nn.Linear(hidden_dim,hidden_dim) # 隐藏层
32 | self.fc3 = nn.Linear(hidden_dim, n_actions) # 输出层
33 |
34 | def forward(self, x):
35 | # 各层对应的激活函数
36 | x = F.relu(self.fc1(x))
37 | x = F.relu(self.fc2(x))
38 | return self.fc3(x)
39 |
40 | class ReplayBuffer:
41 | def __init__(self, capacity):
42 | self.capacity = capacity # 经验回放的容量
43 | self.buffer = [] # 缓冲区
44 | self.position = 0
45 |
46 | def push(self, state, action, reward, next_state, done):
47 | ''' 缓冲区是一个队列,容量超出时去掉开始存入的转移(transition)
48 | '''
49 | if len(self.buffer) < self.capacity:
50 | self.buffer.append(None)
51 | self.buffer[self.position] = (state, action, reward, next_state, done)
52 | self.position = (self.position + 1) % self.capacity
53 |
54 | def sample(self, batch_size):
55 | batch = random.sample(self.buffer, batch_size) # 随机采出小批量转移
56 | state, action, reward, next_state, done = zip(*batch) # 解压成状态,动作等
57 | return state, action, reward, next_state, done
58 |
59 | def __len__(self):
60 | ''' 返回当前存储的量
61 | '''
62 | return len(self.buffer)
63 |
64 | class DQN:
65 | def __init__(self, n_states,n_actions,cfg):
66 |
67 | self.n_actions = n_actions # 总的动作个数
68 | self.device = cfg.device # 设备,cpu或gpu等
69 | self.gamma = cfg.gamma # 奖励的折扣因子
70 | # e-greedy策略相关参数
71 | self.frame_idx = 0 # 用于epsilon的衰减计数
72 | self.epsilon = lambda frame_idx: cfg.epsilon_end + \
73 | (cfg.epsilon_start - cfg.epsilon_end) * \
74 | math.exp(-1. * frame_idx / cfg.epsilon_decay)
75 | self.batch_size = cfg.batch_size
76 | self.policy_net = MLP(n_states,n_actions).to(self.device)
77 | self.target_net = MLP(n_states,n_actions).to(self.device)
78 | for target_param, param in zip(self.target_net.parameters(),self.policy_net.parameters()): # 复制参数到目标网路targe_net
79 | target_param.data.copy_(param.data)
80 | self.optimizer = optim.Adam(self.policy_net.parameters(), lr=cfg.lr) # 优化器
81 | self.memory = ReplayBuffer(cfg.memory_capacity) # 经验回放
82 |
83 | def choose_action(self, state):
84 | ''' 选择动作
85 | '''
86 | self.frame_idx += 1
87 | if random.random() > self.epsilon(self.frame_idx):
88 | with torch.no_grad():
89 | state = torch.tensor(state, device=self.device, dtype=torch.float32).unsqueeze(dim=0)
90 | q_values = self.policy_net(state)
91 | action = q_values.max(1)[1].item() # 选择Q值最大的动作
92 | else:
93 | action = random.randrange(self.n_actions)
94 | return action
95 | def update(self):
96 | if len(self.memory) < self.batch_size: # 当memory中不满足一个批量时,不更新策略
97 | return
98 | # 从经验回放中(replay memory)中随机采样一个批量的转移(transition)
99 | # print('updating')
100 |
101 | state_batch, action_batch, reward_batch, next_state_batch, done_batch = self.memory.sample(
102 | self.batch_size)
103 | state_batch = torch.tensor(np.array(state_batch), device=self.device, dtype=torch.float)
104 | action_batch = torch.tensor(action_batch, device=self.device).unsqueeze(1)
105 | reward_batch = torch.tensor(reward_batch, device=self.device, dtype=torch.float)
106 | next_state_batch = torch.tensor(np.array(next_state_batch), device=self.device, dtype=torch.float)
107 | done_batch = torch.tensor(np.float32(done_batch), device=self.device)
108 | q_values = self.policy_net(state_batch).gather(dim=1, index=action_batch) # 计算当前状态(s_t,a)对应的Q(s_t, a)
109 | next_q_values = self.target_net(next_state_batch).max(1)[0].detach() # 计算下一时刻的状态(s_t_,a)对应的Q值
110 | # 计算期望的Q值,对于终止状态,此时done_batch[0]=1, 对应的expected_q_value等于reward
111 | expected_q_values = reward_batch + self.gamma * next_q_values * (1-done_batch)
112 | loss = nn.MSELoss()(q_values, expected_q_values.unsqueeze(1)) # 计算均方根损失
113 | # 优化更新模型
114 | self.optimizer.zero_grad()
115 | loss.backward()
116 | for param in self.policy_net.parameters(): # clip防止梯度爆炸
117 | param.grad.data.clamp_(-1, 1)
118 | self.optimizer.step()
119 |
120 | def save(self, path):
121 | torch.save(self.target_net.state_dict(), path+'dqn_checkpoint.pth')
122 |
123 | def load(self, path):
124 | self.target_net.load_state_dict(torch.load(path+'dqn_checkpoint.pth'))
125 | for target_param, param in zip(self.target_net.parameters(), self.policy_net.parameters()):
126 | param.data.copy_(target_param.data)
127 |
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231009-101444/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231009-101444/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231009-101444/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231009-101444/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231009-101444/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231009-101444/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231009-101444/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231009-101444/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231009-102149/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231009-102149/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231009-102149/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231009-102149/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231009-102149/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231009-102149/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231009-102149/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231009-102149/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231009-102312/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231009-102312/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231009-102312/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231009-102312/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231009-102312/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231009-102312/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231009-102312/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231009-102312/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231011-143243/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231011-143243/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231011-143243/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231011-143243/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231011-143243/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231011-143243/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231011-143243/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231011-143243/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231013-084908/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231013-084908/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231013-084908/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231013-084908/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231013-084908/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231013-084908/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231013-084908/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231013-084908/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231016-141744/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231016-141744/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231016-141744/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231016-141744/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231016-141744/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231016-141744/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231016-141744/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231016-141744/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231018-102922/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231018-102922/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231018-102922/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231018-102922/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231018-102922/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231018-102922/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231018-102922/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231018-102922/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231020-192110/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231020-192110/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231020-192110/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231020-192110/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231020-192110/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231020-192110/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231020-192110/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231020-192110/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231023-092959/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231023-092959/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231023-092959/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231023-092959/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231023-092959/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231023-092959/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231023-092959/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231023-092959/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231025-084607/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231025-084607/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231025-084607/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231025-084607/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231025-084607/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231025-084607/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231025-084607/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231025-084607/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231115-085643/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231115-085643/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231115-085643/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231115-085643/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231115-085643/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231115-085643/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231115-085643/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231115-085643/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231122-103436/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231122-103436/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231122-103436/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231122-103436/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231122-103436/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231122-103436/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231122-103436/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231122-103436/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231123-161552/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231123-161552/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231123-161552/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231123-161552/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231123-161552/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231123-161552/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/DQN/outputs/Multihop-V2V/20231123-161552/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/DQN/outputs/Multihop-V2V/20231123-161552/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/DQN/run_DQN.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | import sys, os
4 | curr_path = os.path.dirname(os.path.abspath(__file__)) # 当前文件所在绝对路径
5 | parent_path = os.path.dirname(curr_path) # 父路径
6 | # print(curr_path)
7 |
8 | # sys.path.append(parent_path) # 添加路径到系统路径
9 | parent_path_1 = os.path.dirname(parent_path)
10 | sys.path.append(parent_path_1)
11 | # print(parent_path)
12 |
13 |
14 |
15 | import gym
16 | import torch
17 | import datetime
18 | import numpy as np
19 | import argparse
20 |
21 | from methods.DQN.dqn import DQN
22 | from env import environment
23 | from env.config import VehicularEnvConfig
24 |
25 | from env.utils import plot_rewards, save_args, plot_completion_rate
26 | from env.utils import save_results_1, make_dir
27 |
28 |
29 | def get_args():
30 | """ Hyperparameters
31 | """
32 | curr_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # Obtain current time
33 | parser = argparse.ArgumentParser(description="hyperparameters")
34 | parser.add_argument('--algo_name',default='DQN',type=str,help="name of algorithm")
35 | parser.add_argument('--env_name',default='Multihop-V2V',type=str,help="name of environment")
36 | parser.add_argument('--train_eps',default=300,type=int,help="episodes of training")
37 | parser.add_argument('--test_eps',default=20,type=int,help="episodes of testing")
38 | parser.add_argument('--gamma',default=0.95,type=float,help="discounted factor")
39 | parser.add_argument('--epsilon_start',default=0.95,type=float,help="initial value of epsilon")
40 | parser.add_argument('--epsilon_end',default=0.01,type=float,help="final value of epsilon")
41 | parser.add_argument('--epsilon_decay',default=500,type=int,help="decay rate of epsilon")
42 | parser.add_argument('--lr',default=0.00005,type=float,help="learning rate")
43 | parser.add_argument('--memory_capacity',default=100000,type=int,help="memory capacity")
44 | parser.add_argument('--batch_size',default=128,type=int)#64
45 | parser.add_argument('--target_update',default=4,type=int)
46 | parser.add_argument('--hidden_dim',default=256,type=int)
47 | parser.add_argument('--result_path',default=curr_path + "/outputs/" + parser.parse_args().env_name + \
48 | '/' + curr_time + '/results/' )
49 | parser.add_argument('--model_path',default=curr_path + "/outputs/" + parser.parse_args().env_name + \
50 | '/' + curr_time + '/models/' ) # path to save models
51 | parser.add_argument('--save_fig',default=True,type=bool,help="if save figure or not")
52 | args = parser.parse_args()
53 | args.device = torch.device(
54 | "cuda" if torch.cuda.is_available() else "cpu") # check GPU
55 | return args
56 |
57 | def env_agent_config(cfg,seed=1):
58 | ''' 创建环境和智能体
59 | '''
60 | env = environment.RoadState() # 创建环境
61 | n_states = env.observation_space.shape[0] # 状态维度
62 | n_actions = env.action_space.n # 动作维度
63 | print(f"n states: {n_states}, n actions: {n_actions}")
64 | agent = DQN(n_states,n_actions, cfg) # 创建智能体
65 | # if seed !=0: # 设置随机种子
66 | # torch.manual_seed(seed)
67 | # env.seed(seed)
68 | # np.random.seed(seed)
69 | return env, agent
70 |
71 | def train(cfg, env, agent):
72 | ''' Training
73 | '''
74 | print('Start training!')
75 | print(f'Env:{cfg.env_name}, 算法:{cfg.algo_name}, 设备:{cfg.device}')
76 | rewards_plot = []
77 | ma_rewards_plot = []
78 | offloading_vehicle_number_plot = []
79 | offloading_rsu_number_plot = []
80 | offloading_cloud_number_plot = []
81 | completion_rate_plot=[]
82 | ma_completion_rate_plot = []
83 |
84 | for i_ep in range(cfg.train_eps):
85 | rewards = 0 # 记录一回合内的奖励
86 | steps = 0
87 | offloading_vehicle_number = 0
88 | offloading_rsu_number = 0
89 | offloading_cloud_number = 0
90 | complete_number =0
91 | state,function = env.reset() # 重置环境,返回初始状态
92 | while True:
93 | steps+= 1
94 | action = agent.choose_action(state) # 选择动作
95 | next_state, reward, done,next_function,offloading_vehicle,offloading_rsu,offloading_cloud,complete = env.step(action,function) # 更新环境,返回transition
96 | agent.memory.push(state, action, reward,
97 | next_state, done) # 保存transition
98 | state = next_state # 更新下一个状态
99 | function = next_function
100 | agent.update() # 更新智能体
101 | rewards += reward # 累加奖励
102 | offloading_vehicle_number+=offloading_vehicle
103 | offloading_rsu_number+=offloading_rsu
104 | offloading_cloud_number+=offloading_cloud
105 | complete_number+=complete
106 | if done:
107 | break
108 | if (i_ep + 1) % cfg.target_update == 0: # 智能体目标网络更新
109 | agent.target_net.load_state_dict(agent.policy_net.state_dict())
110 |
111 | rewards_plot.append(rewards)
112 | offloading_vehicle_number_plot.append(offloading_vehicle_number)
113 | offloading_rsu_number_plot.append(offloading_rsu_number)
114 | offloading_cloud_number_plot.append(offloading_cloud_number)
115 | completion_rate = complete_number / (
116 | VehicularEnvConfig().rsu_number * (VehicularEnvConfig().time_slot_end + 1))
117 | completion_rate_plot.append(completion_rate)
118 | print("# episode :{}, steps : {}, rewards : {}, complete : {}, vehicle : {}, rsu : {}, cloud : {}"
119 | .format(i_ep + 1, steps, rewards,
120 | completion_rate, offloading_vehicle_number, offloading_rsu_number, offloading_cloud_number))
121 | if ma_rewards_plot:
122 | ma_rewards_plot.append(0.9 * ma_rewards_plot[-1] + 0.1 * rewards)
123 | else:
124 | ma_rewards_plot.append(rewards)
125 |
126 | if ma_completion_rate_plot:
127 | ma_completion_rate_plot.append(0.9 * ma_completion_rate_plot[-1] + 0.1 * completion_rate)
128 | else:
129 | ma_completion_rate_plot.append(completion_rate)
130 |
131 | res_dic_rewards = {'rewards': rewards_plot, 'ma_rewards': ma_rewards_plot}
132 | res_dic_completion_rate = {'completion_rate': completion_rate_plot,
133 | 'ma_completion_rate': ma_completion_rate_plot}
134 | if not os.path.exists(cfg.result_path):
135 | os.makedirs(cfg.result_path)
136 | save_results_1(res_dic_rewards, tag='train',
137 | path=cfg.result_path)
138 | save_results_1(res_dic_completion_rate, tag='train',
139 | path=cfg.result_path)
140 | plot_rewards(res_dic_rewards['rewards'], res_dic_rewards['ma_rewards'], cfg, tag="train")
141 | plot_completion_rate(res_dic_completion_rate['completion_rate'], res_dic_completion_rate['ma_completion_rate'],
142 | cfg, tag="train")
143 | env.close()
144 |
145 |
146 |
147 | if __name__ == "__main__":
148 | cfg = get_args()
149 | # 训练
150 | env, agent = env_agent_config(cfg)
151 | train(cfg, env, agent)
152 |
--------------------------------------------------------------------------------
/methods/Draw_pictures/A3C_completion_rate.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Draw_pictures/A3C_completion_rate.pdf
--------------------------------------------------------------------------------
/methods/Draw_pictures/A3C_completion_rate_lr.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Draw_pictures/A3C_completion_rate_lr.pdf
--------------------------------------------------------------------------------
/methods/Draw_pictures/A3C_rewards.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Draw_pictures/A3C_rewards.pdf
--------------------------------------------------------------------------------
/methods/Draw_pictures/A3C_rewards_lr.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Draw_pictures/A3C_rewards_lr.pdf
--------------------------------------------------------------------------------
/methods/Draw_pictures/contrast_completion_rate.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Draw_pictures/contrast_completion_rate.pdf
--------------------------------------------------------------------------------
/methods/Draw_pictures/contrast_rewards.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Draw_pictures/contrast_rewards.pdf
--------------------------------------------------------------------------------
/methods/Draw_pictures/different_task_computation_resource_average_rewards.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Draw_pictures/different_task_computation_resource_average_rewards.pdf
--------------------------------------------------------------------------------
/methods/Draw_pictures/different_tasksize_average_rewards.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Draw_pictures/different_tasksize_average_rewards.pdf
--------------------------------------------------------------------------------
/methods/Draw_pictures/different_vehicle_number_average_rewards.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Draw_pictures/different_vehicle_number_average_rewards.pdf
--------------------------------------------------------------------------------
/methods/Draw_pictures/different_vehicle_speed_average_rewards.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Draw_pictures/different_vehicle_speed_average_rewards.pdf
--------------------------------------------------------------------------------
/methods/Greedy/greedy_tasksize.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from env.config import VehicularEnvConfig
3 | class Greedy(object):
4 | """ 贪心算法实现思路 """
5 |
6 |
7 | def __init__(self):
8 | self.config=VehicularEnvConfig()
9 | pass
10 |
11 | def choose_action(self, state,function) -> int:
12 | """ 根据任务队列选择合适的卸载节点 """
13 | action_list=[]
14 | State=state
15 | Function=function
16 | function_size=[]
17 | for i in range(self.config.rsu_number):
18 | function_size.append(Function[i].get_task_datasize())
19 |
20 | for i in range(len(function_size)):
21 |
22 | min_index = np.argmin(State)
23 | action_list.append(min_index+1)
24 | State[i]=State[i]+function_size[i]
25 | x=action_list[0]
26 | y=action_list[1]
27 | z=action_list[2]
28 | action= (x - 1) + (y - 1) * 14 + (z - 1) * 14 * 14 + 1
29 | return action
30 |
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231005-192011/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231005-192011/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231005-192011/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231005-192011/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231005-192011/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231005-192011/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231005-192011/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231005-192011/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231008-130015/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231008-130015/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231008-130015/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231008-130015/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231008-130015/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231008-130015/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231008-130015/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231008-130015/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231016-193257/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231016-193257/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231016-193257/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231016-193257/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231016-193257/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231016-193257/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231016-193257/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231016-193257/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231020-215837/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231020-215837/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231020-215837/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231020-215837/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231020-215837/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231020-215837/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231020-215837/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231020-215837/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231025-140726/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231025-140726/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231025-140726/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231025-140726/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231025-140726/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231025-140726/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231025-140726/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231025-140726/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231025-152310/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231025-152310/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231025-152310/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231025-152310/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231025-152310/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231025-152310/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231025-152310/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231025-152310/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231105-092657/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231105-092657/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231105-092657/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231105-092657/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231105-092657/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231105-092657/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231105-092657/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231105-092657/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231105-150100/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231105-150100/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231105-150100/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231105-150100/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231105-150100/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231105-150100/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231105-150100/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231105-150100/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231106-084841/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231106-084841/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231106-084841/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231106-084841/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231106-084841/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231106-084841/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231106-084841/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231106-084841/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231106-143001/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231106-143001/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231106-143001/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231106-143001/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231106-143001/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231106-143001/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231106-143001/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231106-143001/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231106-182637/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231106-182637/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231106-182637/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231106-182637/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231106-182637/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231106-182637/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231106-182637/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231106-182637/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231107-103741/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231107-103741/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231107-103741/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231107-103741/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231107-103741/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231107-103741/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231107-103741/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231107-103741/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231107-161928/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231107-161928/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231107-161928/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231107-161928/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231107-161928/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231107-161928/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231107-161928/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231107-161928/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231108-082916/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231108-082916/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231108-082916/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231108-082916/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231108-082916/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231108-082916/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231108-082916/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231108-082916/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231108-091912/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231108-091912/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231108-091912/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231108-091912/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231108-091912/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231108-091912/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231108-091912/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231108-091912/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231108-102015/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231108-102015/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231108-102015/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231108-102015/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231108-102015/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231108-102015/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231108-102015/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231108-102015/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231115-085643/results/train_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231115-085643/results/train_completion_rate.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231115-085643/results/train_ma_completion_rate.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231115-085643/results/train_ma_completion_rate.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231115-085643/results/train_ma_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231115-085643/results/train_ma_rewards.npy
--------------------------------------------------------------------------------
/methods/Greedy/outputs/Multihop-V2V/20231115-085643/results/train_rewards.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZWLab23/Asynchronous-DRL-based-Multi-Hop-Task-Offloading-in-RSU-assisted-IoV-Networks/8a3529dc3eeb6f7afd367288134456ac2f9740e6/methods/Greedy/outputs/Multihop-V2V/20231115-085643/results/train_rewards.npy
--------------------------------------------------------------------------------
/methods/Greedy/run_greedy.py:
--------------------------------------------------------------------------------
1 | import sys, os
2 | curr_path = os.path.dirname(os.path.abspath(__file__)) # 当前文件所在绝对路径
3 | parent_path = os.path.dirname(curr_path) # 父路径
4 | print(curr_path)
5 | sys.path.append(parent_path) # 添加路径到系统路径
6 | # parent_path_1 = os.path.dirname(parent_path)
7 | # sys.path.append(parent_path_1)
8 |
9 | import torch
10 | import argparse
11 |
12 | from env.utils import plot_rewards, save_args, plot_completion_rate
13 | from env.utils import save_results_1, make_dir
14 | from env import environment
15 |
16 | import datetime
17 | from env.config import VehicularEnvConfig
18 | from methods.Greedy.greedy_tasksize import Greedy
19 |
20 |
21 | def get_args():
22 | """ Hyperparameters
23 | """
24 | curr_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # 获取当前时间
25 | parser = argparse.ArgumentParser(description="hyperparameters")
26 | parser.add_argument('--algo_name',default='greedy',type=str,help="name of algorithm")
27 | #算法名称:A2C
28 | parser.add_argument('--env_name',default='Multihop-V2V',type=str,help="name of environment")
29 | parser.add_argument('--train_eps', default=300, type=int, help="episodes of training")
30 | parser.add_argument('--test_eps', default=200, type=int, help="episodes of testing")
31 | parser.add_argument('--result_path',
32 | default=curr_path + "/outputs/" + parser.parse_args().env_name + '/' + curr_time + '/results/')
33 | parser.add_argument('--model_path', # path to save models
34 | default=curr_path + "/outputs/" + parser.parse_args().env_name + '/' + curr_time + '/models/')
35 | parser.add_argument('--save_fig', default=True, type=bool, help="if save figure or not")
36 | args = parser.parse_args()
37 | args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # check GPU
38 | return args
39 |
40 |
41 |
42 | def train(cfg, env, agent):
43 | """ Training """
44 | print('Start training!')
45 | print(f'Env:{cfg.env_name}, A{cfg.algo_name}, 设备:{cfg.device}')
46 | rewards_plot = []
47 | ma_rewards_plot = []
48 | offloading_vehicle_number_plot = []
49 | offloading_rsu_number_plot = []
50 | offloading_cloud_number_plot = []
51 | completion_rate_plot=[]
52 | ma_completion_rate_plot = []
53 | for n_epi in range(cfg.train_eps):
54 | rewards=0
55 | steps=0
56 | done = False
57 | offloading_vehicle_number = 0
58 | offloading_rsu_number = 0
59 | offloading_cloud_number = 0
60 | complete_number =0
61 | state,function= env.reset()
62 | while not done:
63 | action = agent.choose_action( state,function)
64 |
65 | next_state, reward, done,next_function,offloading_vehicle,offloading_rsu,offloading_cloud,complete = env.step(action,function)
66 | state = next_state
67 | function=next_function
68 | steps+=1
69 | rewards += reward
70 | offloading_vehicle_number+=offloading_vehicle
71 | offloading_rsu_number+=offloading_rsu
72 | offloading_cloud_number+=offloading_cloud
73 | complete_number+=complete
74 | rewards_plot.append(rewards)
75 | offloading_vehicle_number_plot.append(offloading_vehicle_number)
76 | offloading_rsu_number_plot.append(offloading_rsu_number)
77 | offloading_cloud_number_plot.append( offloading_cloud_number)
78 | completion_rate = complete_number / (VehicularEnvConfig().rsu_number * (VehicularEnvConfig().time_slot_end + 1))
79 | completion_rate_plot.append(completion_rate)
80 | print("# episode :{}, steps : {}, rewards : {}, complete : {}, vehicle : {}, rsu : {}, cloud : {}"
81 | .format(n_epi+1,steps, rewards,
82 | completion_rate,offloading_vehicle_number,offloading_rsu_number,offloading_cloud_number))
83 | # time.sleep(1)
84 |
85 | if ma_rewards_plot:
86 | ma_rewards_plot.append(0.9 * ma_rewards_plot[-1] + 0.1 * rewards)
87 | else:
88 | ma_rewards_plot.append(rewards)
89 |
90 | if ma_completion_rate_plot:
91 | ma_completion_rate_plot.append(0.9 * ma_completion_rate_plot[-1] + 0.1 * completion_rate)
92 | else:
93 | ma_completion_rate_plot.append(completion_rate)
94 |
95 |
96 | res_dic_rewards = {'rewards': rewards_plot, 'ma_rewards': ma_rewards_plot}
97 | res_dic_completion_rate = {'completion_rate': completion_rate_plot, 'ma_completion_rate': ma_completion_rate_plot}
98 | if not os.path.exists(cfg.result_path):
99 | os.makedirs(cfg.result_path)
100 | save_results_1(res_dic_rewards, tag='train',
101 | path=cfg.result_path)
102 | save_results_1(res_dic_completion_rate, tag='train',
103 | path=cfg.result_path)
104 | plot_rewards(res_dic_rewards['rewards'], res_dic_rewards['ma_rewards'], cfg, tag="train")
105 | plot_completion_rate(res_dic_completion_rate['completion_rate'], res_dic_completion_rate['ma_completion_rate'], cfg, tag="train")
106 | env.close()
107 |
108 |
109 |
110 |
111 |
112 | if __name__ == "__main__":
113 | cfg = get_args()
114 | # 训练
115 | env = environment.RoadState()
116 | agent = Greedy()
117 | train(cfg, env, agent)
118 |
119 |
120 |
121 | #20000-250000
122 | #8
--------------------------------------------------------------------------------