├── .gitignore
├── Image processing module
├── Geographic Data
│ ├── testmap.jpg
│ └── testmap_detect.jpg
├── step2_Discretization.py
├── step3_Edge detection and Contour extraction.py
├── Simulated random obstacle.py
└── step1_ECDIS.py
├── Meteorological analysis module
├── Meterorological Data
│ ├── u.xlsx
│ ├── v.xlsx
│ ├── u10.xlsx
│ ├── u2.xlsx
│ ├── v10.xlsx
│ ├── v2.xlsx
│ ├── u2_new.xlsx
│ ├── v2_new.xlsx
│ ├── u10_new.xlsx
│ ├── v10_new.xlsx
│ ├── u_interpolated.xlsx
│ ├── v_interpolated.xlsx
│ ├── wind_vector_2m_field.png
│ ├── wind_vector_10m_field.png
│ └── ocean_current_vector_field.png
├── step1_nc_to_excel.py
└── step2_excel_to_vector field.py
├── Path planning module
├── .idea
│ ├── vcs.xml
│ ├── inspectionProfiles
│ │ └── profiles_settings.xml
│ ├── modules.xml
│ ├── misc.xml
│ ├── DQN_mofan.iml
│ └── workspace.xml
├── Ablation Experiments
│ ├── figure_ablation.png
│ ├── figure_ablation_demo.jpg
│ ├── figure_ablation_demo.pdf
│ ├── PDDQN_path1_x9522_y14992.xlsx
│ ├── episode-mean_total_reward(No Double)_x9522_y14992.pickle
│ ├── episode-mean_total_reward(No Dueling)_x9522_y14992.pickle
│ ├── episode-mean_total_reward(No Priority)_x9522_y14992.pickle
│ ├── episode-mean_total_reward(fusion DQN)_x9522_y14992.pickle
│ ├── episode-mean_total_reward(No Multimodal)_x9522_y14992.pickle
│ ├── Reward comparison for Ablation.py
│ └── demo1_Dueling_ablation.py
├── Experiments for Comparison
│ ├── figure_comparison
│ ├── Astar_x9522_y14992.xlsx
│ ├── DQN_x9522_y14992.xlsx
│ ├── figure_comparison.pdf
│ ├── figure_comparison.png
│ ├── fusionDQN_x9522_y14992.xlsx
│ ├── A_star
│ │ ├── A_star_MLP (additional test)
│ │ │ ├── mlp_model.pth
│ │ │ ├── MLP_network.pdf
│ │ │ ├── Astar_MLP_test.xlsx
│ │ │ ├── MLP_comparison.pdf
│ │ │ ├── MLP_comparison.png
│ │ │ ├── MLP.py
│ │ │ ├── path comparison (MLP).py
│ │ │ └── A_star_MLP.py
│ │ ├── A_star_basic.py
│ │ ├── grid_with_circles.txt
│ │ └── A_star.py
│ └── demo1_3_algorithm_comparison.py
├── Experiments for Testing Generalization Ability
│ ├── path_visualization.pdf
│ ├── path_visualization.png
│ ├── fusion DQN episode-steps.png
│ ├── fusion DQN episode-mean_Loss.png
│ ├── fusion DQN episode-steps_test.png
│ ├── fusion DQN episode-total_Loss.png
│ ├── fusion DQN episode-mean_reward.png
│ ├── fusion DQN episode-total_reward.png
│ ├── episode-steps(fusion DQN)_test.pickle
│ ├── eval_net_params_DQN_x9522_y14992.pkl
│ ├── fusion DQN episode-mean_Loss_test.png
│ ├── fusion DQN episode-mean_collision.png
│ ├── fusion DQN episode-total_Loss_test.png
│ ├── fusion DQN episode-total_collision.png
│ ├── fusion DQN episode-mean_reward_test.png
│ ├── fusion DQN episode-total_reward_test.png
│ ├── path_trainning_process_visualization.jpg
│ ├── path_trainning_process_visualization.pdf
│ ├── episode-mean_Loss(fusion DQN)_test.pickle
│ ├── episode-mean_reward(fusion DQN)_test.pickle
│ ├── episode-total_Loss(fusion DQN)_test.pickle
│ ├── eval_net_params_fusionDQN_x9522_y14992.pkl
│ ├── fusion DQN episode-mean_collision_test.png
│ ├── fusion DQN episode-total_collision_test.png
│ ├── episode-steps(fusion DQN)_x9522_y14992.pickle
│ ├── episode-total_reward(fusion DQN)_test.pickle
│ ├── fusionDQN_trainning_process_first_stage.xlsx
│ ├── fusionDQN_trainning_process_second_stage.xlsx
│ ├── fusionDQN_trainning_process_third_stage.xlsx
│ ├── episode-mean_collision(fusion DQN)_test.pickle
│ ├── episode-total_collision(fusion DQN)_test.pickle
│ ├── episode-mean_Loss(fusion DQN)_x9522_y14992.pickle
│ ├── episode-total_Loss(fusion DQN)_x9522_y14992.pickle
│ ├── episode-mean_reward(fusion DQN)_x9522_y14992.pickle
│ ├── episode-total_reward(fusion DQN)_x9522_y14992.pickle
│ ├── episode-mean_collision(fusion DQN)_x9522_y14992.pickle
│ ├── episode-total_collision(fusion DQN)_x9522_y14992.pickle
│ ├── fusionDQN_trainning_process_firststage_x9522_y14992.xlsx
│ ├── fusionDQN_trainning_process_secondstage_x9522_y14992.xlsx
│ ├── fusionDQN_trainning_process_thirdstage_x9522_y14992.xlsx
│ ├── different _stage_CR.py
│ ├── different _stage_SR.py
│ ├── all_model_trainning_process_visualization.py
│ ├── best_path_visualization.py
│ └── different_stage_path.py
├── txt_to_matrix.py
├── Multimodal_characteristics_Marine_environment.py
└── fusion_DQN.py
├── LICENSE
└── README.md
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__/
2 | *.pyc
3 | *.pyo
4 | .vscode/
5 |
--------------------------------------------------------------------------------
/Image processing module/Geographic Data/testmap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Image processing module/Geographic Data/testmap.jpg
--------------------------------------------------------------------------------
/Image processing module/Geographic Data/testmap_detect.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Image processing module/Geographic Data/testmap_detect.jpg
--------------------------------------------------------------------------------
/Meteorological analysis module/Meterorological Data/u.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Meteorological analysis module/Meterorological Data/u.xlsx
--------------------------------------------------------------------------------
/Meteorological analysis module/Meterorological Data/v.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Meteorological analysis module/Meterorological Data/v.xlsx
--------------------------------------------------------------------------------
/Meteorological analysis module/Meterorological Data/u10.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Meteorological analysis module/Meterorological Data/u10.xlsx
--------------------------------------------------------------------------------
/Meteorological analysis module/Meterorological Data/u2.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Meteorological analysis module/Meterorological Data/u2.xlsx
--------------------------------------------------------------------------------
/Meteorological analysis module/Meterorological Data/v10.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Meteorological analysis module/Meterorological Data/v10.xlsx
--------------------------------------------------------------------------------
/Meteorological analysis module/Meterorological Data/v2.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Meteorological analysis module/Meterorological Data/v2.xlsx
--------------------------------------------------------------------------------
/Path planning module/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/Path planning module/Ablation Experiments/figure_ablation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Ablation Experiments/figure_ablation.png
--------------------------------------------------------------------------------
/Meteorological analysis module/Meterorological Data/u2_new.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Meteorological analysis module/Meterorological Data/u2_new.xlsx
--------------------------------------------------------------------------------
/Meteorological analysis module/Meterorological Data/v2_new.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Meteorological analysis module/Meterorological Data/v2_new.xlsx
--------------------------------------------------------------------------------
/Meteorological analysis module/Meterorological Data/u10_new.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Meteorological analysis module/Meterorological Data/u10_new.xlsx
--------------------------------------------------------------------------------
/Meteorological analysis module/Meterorological Data/v10_new.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Meteorological analysis module/Meterorological Data/v10_new.xlsx
--------------------------------------------------------------------------------
/Path planning module/Ablation Experiments/figure_ablation_demo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Ablation Experiments/figure_ablation_demo.jpg
--------------------------------------------------------------------------------
/Path planning module/Ablation Experiments/figure_ablation_demo.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Ablation Experiments/figure_ablation_demo.pdf
--------------------------------------------------------------------------------
/Path planning module/Experiments for Comparison/figure_comparison:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Comparison/figure_comparison
--------------------------------------------------------------------------------
/Meteorological analysis module/Meterorological Data/u_interpolated.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Meteorological analysis module/Meterorological Data/u_interpolated.xlsx
--------------------------------------------------------------------------------
/Meteorological analysis module/Meterorological Data/v_interpolated.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Meteorological analysis module/Meterorological Data/v_interpolated.xlsx
--------------------------------------------------------------------------------
/Path planning module/.idea/inspectionProfiles/profiles_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/Path planning module/Ablation Experiments/PDDQN_path1_x9522_y14992.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Ablation Experiments/PDDQN_path1_x9522_y14992.xlsx
--------------------------------------------------------------------------------
/Path planning module/Experiments for Comparison/Astar_x9522_y14992.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Comparison/Astar_x9522_y14992.xlsx
--------------------------------------------------------------------------------
/Path planning module/Experiments for Comparison/DQN_x9522_y14992.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Comparison/DQN_x9522_y14992.xlsx
--------------------------------------------------------------------------------
/Path planning module/Experiments for Comparison/figure_comparison.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Comparison/figure_comparison.pdf
--------------------------------------------------------------------------------
/Path planning module/Experiments for Comparison/figure_comparison.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Comparison/figure_comparison.png
--------------------------------------------------------------------------------
/Meteorological analysis module/Meterorological Data/wind_vector_2m_field.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Meteorological analysis module/Meterorological Data/wind_vector_2m_field.png
--------------------------------------------------------------------------------
/Path planning module/Experiments for Comparison/fusionDQN_x9522_y14992.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Comparison/fusionDQN_x9522_y14992.xlsx
--------------------------------------------------------------------------------
/Meteorological analysis module/Meterorological Data/wind_vector_10m_field.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Meteorological analysis module/Meterorological Data/wind_vector_10m_field.png
--------------------------------------------------------------------------------
/Meteorological analysis module/Meterorological Data/ocean_current_vector_field.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Meteorological analysis module/Meterorological Data/ocean_current_vector_field.png
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/path_visualization.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/path_visualization.pdf
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/path_visualization.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/path_visualization.png
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/fusion DQN episode-steps.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/fusion DQN episode-steps.png
--------------------------------------------------------------------------------
/Path planning module/txt_to_matrix.py:
--------------------------------------------------------------------------------
1 | def read_binary_txt(file_path):
2 | matrix_list = []
3 | with open(file_path, 'r') as file:
4 | for line in file:
5 | row = [int(value) for value in line.strip().split()]
6 | matrix_list.append(row)
7 | return matrix_list
8 |
9 |
--------------------------------------------------------------------------------
/Path planning module/Ablation Experiments/episode-mean_total_reward(No Double)_x9522_y14992.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Ablation Experiments/episode-mean_total_reward(No Double)_x9522_y14992.pickle
--------------------------------------------------------------------------------
/Path planning module/Experiments for Comparison/A_star/A_star_MLP (additional test)/mlp_model.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Comparison/A_star/A_star_MLP (additional test)/mlp_model.pth
--------------------------------------------------------------------------------
/Path planning module/Ablation Experiments/episode-mean_total_reward(No Dueling)_x9522_y14992.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Ablation Experiments/episode-mean_total_reward(No Dueling)_x9522_y14992.pickle
--------------------------------------------------------------------------------
/Path planning module/Ablation Experiments/episode-mean_total_reward(No Priority)_x9522_y14992.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Ablation Experiments/episode-mean_total_reward(No Priority)_x9522_y14992.pickle
--------------------------------------------------------------------------------
/Path planning module/Ablation Experiments/episode-mean_total_reward(fusion DQN)_x9522_y14992.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Ablation Experiments/episode-mean_total_reward(fusion DQN)_x9522_y14992.pickle
--------------------------------------------------------------------------------
/Path planning module/Experiments for Comparison/A_star/A_star_MLP (additional test)/MLP_network.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Comparison/A_star/A_star_MLP (additional test)/MLP_network.pdf
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/fusion DQN episode-mean_Loss.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/fusion DQN episode-mean_Loss.png
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/fusion DQN episode-steps_test.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/fusion DQN episode-steps_test.png
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/fusion DQN episode-total_Loss.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/fusion DQN episode-total_Loss.png
--------------------------------------------------------------------------------
/Path planning module/Ablation Experiments/episode-mean_total_reward(No Multimodal)_x9522_y14992.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Ablation Experiments/episode-mean_total_reward(No Multimodal)_x9522_y14992.pickle
--------------------------------------------------------------------------------
/Path planning module/Experiments for Comparison/A_star/A_star_MLP (additional test)/Astar_MLP_test.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Comparison/A_star/A_star_MLP (additional test)/Astar_MLP_test.xlsx
--------------------------------------------------------------------------------
/Path planning module/Experiments for Comparison/A_star/A_star_MLP (additional test)/MLP_comparison.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Comparison/A_star/A_star_MLP (additional test)/MLP_comparison.pdf
--------------------------------------------------------------------------------
/Path planning module/Experiments for Comparison/A_star/A_star_MLP (additional test)/MLP_comparison.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Comparison/A_star/A_star_MLP (additional test)/MLP_comparison.png
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/fusion DQN episode-mean_reward.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/fusion DQN episode-mean_reward.png
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/fusion DQN episode-total_reward.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/fusion DQN episode-total_reward.png
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/episode-steps(fusion DQN)_test.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/episode-steps(fusion DQN)_test.pickle
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/eval_net_params_DQN_x9522_y14992.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/eval_net_params_DQN_x9522_y14992.pkl
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/fusion DQN episode-mean_Loss_test.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/fusion DQN episode-mean_Loss_test.png
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/fusion DQN episode-mean_collision.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/fusion DQN episode-mean_collision.png
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/fusion DQN episode-total_Loss_test.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/fusion DQN episode-total_Loss_test.png
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/fusion DQN episode-total_collision.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/fusion DQN episode-total_collision.png
--------------------------------------------------------------------------------
/Path planning module/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/fusion DQN episode-mean_reward_test.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/fusion DQN episode-mean_reward_test.png
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/fusion DQN episode-total_reward_test.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/fusion DQN episode-total_reward_test.png
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/path_trainning_process_visualization.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/path_trainning_process_visualization.jpg
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/path_trainning_process_visualization.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/path_trainning_process_visualization.pdf
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/episode-mean_Loss(fusion DQN)_test.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/episode-mean_Loss(fusion DQN)_test.pickle
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/episode-mean_reward(fusion DQN)_test.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/episode-mean_reward(fusion DQN)_test.pickle
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/episode-total_Loss(fusion DQN)_test.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/episode-total_Loss(fusion DQN)_test.pickle
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/eval_net_params_fusionDQN_x9522_y14992.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/eval_net_params_fusionDQN_x9522_y14992.pkl
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/fusion DQN episode-mean_collision_test.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/fusion DQN episode-mean_collision_test.png
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/fusion DQN episode-total_collision_test.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/fusion DQN episode-total_collision_test.png
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/episode-steps(fusion DQN)_x9522_y14992.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/episode-steps(fusion DQN)_x9522_y14992.pickle
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/episode-total_reward(fusion DQN)_test.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/episode-total_reward(fusion DQN)_test.pickle
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/fusionDQN_trainning_process_first_stage.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/fusionDQN_trainning_process_first_stage.xlsx
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/fusionDQN_trainning_process_second_stage.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/fusionDQN_trainning_process_second_stage.xlsx
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/fusionDQN_trainning_process_third_stage.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/fusionDQN_trainning_process_third_stage.xlsx
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/episode-mean_collision(fusion DQN)_test.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/episode-mean_collision(fusion DQN)_test.pickle
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/episode-total_collision(fusion DQN)_test.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/episode-total_collision(fusion DQN)_test.pickle
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/episode-mean_Loss(fusion DQN)_x9522_y14992.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/episode-mean_Loss(fusion DQN)_x9522_y14992.pickle
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/episode-total_Loss(fusion DQN)_x9522_y14992.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/episode-total_Loss(fusion DQN)_x9522_y14992.pickle
--------------------------------------------------------------------------------
/Path planning module/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/episode-mean_reward(fusion DQN)_x9522_y14992.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/episode-mean_reward(fusion DQN)_x9522_y14992.pickle
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/episode-total_reward(fusion DQN)_x9522_y14992.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/episode-total_reward(fusion DQN)_x9522_y14992.pickle
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/episode-mean_collision(fusion DQN)_x9522_y14992.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/episode-mean_collision(fusion DQN)_x9522_y14992.pickle
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/episode-total_collision(fusion DQN)_x9522_y14992.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/episode-total_collision(fusion DQN)_x9522_y14992.pickle
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/fusionDQN_trainning_process_firststage_x9522_y14992.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/fusionDQN_trainning_process_firststage_x9522_y14992.xlsx
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/fusionDQN_trainning_process_secondstage_x9522_y14992.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/fusionDQN_trainning_process_secondstage_x9522_y14992.xlsx
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/fusionDQN_trainning_process_thirdstage_x9522_y14992.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ql3c20/RL-based-USV-Path-Planning-Under-the-Marine-Multimodal-Features-Considerations/HEAD/Path planning module/Experiments for Testing Generalization Ability/fusionDQN_trainning_process_thirdstage_x9522_y14992.xlsx
--------------------------------------------------------------------------------
/Path planning module/.idea/DQN_mofan.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 David / Quanbao Lin
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/Image processing module/step2_Discretization.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from scipy import ndimage
3 | from PIL import Image, ImageDraw
4 | import matplotlib.pyplot as plt
5 |
6 |
7 | # Read the binary map----------------------------------
8 | binary_map_data = np.loadtxt(r'Image processing module\Geographic Data\testmap_original_105108.txt', dtype=int)
9 | non_one_indices = np.nonzero(binary_map_data != 1)
10 | min_y, max_y, min_x, max_x = np.min(non_one_indices[0]), np.max(non_one_indices[0]), np.min(non_one_indices[1]), np.max(non_one_indices[1])
11 | cropped_map = binary_map_data[min_y:max_y+1, min_x:max_x+1]
12 | non_zero_indices = np.nonzero(cropped_map != 0)
13 | min_y, max_y, min_x, max_x = np.min(non_zero_indices[0]), np.max(non_zero_indices[0]), np.min(non_zero_indices[1]), np.max(non_zero_indices[1])
14 | cropped_map = cropped_map[min_y:max_y+1, min_x:max_x+1]
15 |
16 |
17 |
18 |
19 | #Crop picture to 300*300-------------------------------------------------
20 | desired_width = 300
21 | desired_length = 300
22 | scale_width = desired_width / cropped_map.shape[0]
23 | scale_length = desired_length / cropped_map.shape[1]
24 | scaled_map = ndimage.zoom(cropped_map, (scale_width, scale_length), order=1)
25 | scaled_map = np.round(scaled_map).astype(int)
26 | np.savetxt(r'Image processing module\Geographic Data\testmap_105108.txt', scaled_map, fmt='%d')
27 |
28 |
29 |
30 | binary_map_data = np.loadtxt(r'Image processing module\Geographic Data\testmap_105108.txt', dtype=int)
31 | # print("Shape of binary_map_data:", binary_map_data.shape)
32 | image = Image.fromarray((binary_map_data * 255).astype(np.uint8)) # Converts binary data to pixel values of 0-255
33 |
34 | # image.show()
35 | plt.imshow(image, cmap='gray')
36 | plt.show()
37 |
38 |
39 |
40 |
41 |
--------------------------------------------------------------------------------
/Image processing module/step3_Edge detection and Contour extraction.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 |
4 | # Cannny Edge detection + Contour extraction (which can be replaced by other image processing methods)
5 | image = cv2.imread('Image processing module\Geographic Data\testmap.jpg')
6 | gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
7 | edges = cv2.Canny(gray, 50, 150, apertureSize=3)
8 | contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
9 |
10 |
11 | gray_circles = []
12 | radii = []
13 |
14 | for contour in contours:
15 | perimeter = cv2.arcLength(contour, True)
16 | approx = cv2.approxPolyDP(contour, 0.03 * perimeter, True)
17 | if len(approx) > 6: # Assume that the circular contour fits the polygon with a number of sides greater than 6
18 | area = cv2.contourArea(contour)
19 | if area > 100:
20 | (x, y), radius = cv2.minEnclosingCircle(contour)
21 | center = (int(x), int(y))
22 | radius = int(radius)
23 |
24 | # Color threshold
25 | mask = cv2.inRange(image, (100, 100, 100), (150, 150, 150))
26 | masked_image = cv2.bitwise_and(image, image, mask=mask)
27 | if cv2.mean(masked_image)[0] > 100:
28 | gray_circles.append((center, radius))
29 | radii.append(radius)
30 |
31 |
32 | center_change = []
33 | for (center, radius) in gray_circles:
34 | cv2.circle(image, center, radius, (0, 255, 0), 2)
35 | cv2.circle(image, center, 3, (0, 0, 255), -1)
36 | center = (center[0], 600 - center[1])
37 | center_change.append(center)
38 |
39 | # Detected Gray Circle Positions
40 | print("Modified Gray Circle Positions:", center_change)
41 | print("Radii:", radii)
42 |
43 | # Detected Gray Circles
44 | cv2.imshow('Detected Gray Circles', image)
45 | cv2.waitKey(0)
46 | cv2.destroyAllWindows()
47 |
48 | # Save the image
49 | output_path = 'Image processing module\Geographic Data\testmap_detect.jpg'
50 | cv2.imwrite(output_path, image)
--------------------------------------------------------------------------------
/Path planning module/Experiments for Comparison/A_star/A_star_MLP (additional test)/MLP.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.optim as optim
4 | from sklearn.model_selection import train_test_split
5 | import pandas as pd
6 | import numpy as np
7 |
8 | # Historical optimal data. (Data building is worth further exploration.)
9 | df = pd.read_excel(r'Historical optimal data.xlsx')
10 |
11 | # input:[x, y, angle, u, v, u2, v2]-7 dimensions
12 | X = df[['x', 'y', 'angle', 'u', 'v', 'u2', 'v2']].values
13 |
14 | # output:g_value-1 dimension
15 | y = df['g_value'].values
16 |
17 | # Divide the data into training sets and test sets
18 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
19 |
20 | # Convert data to PyTorch tensor
21 | X_train_tensor = torch.tensor(X_train, dtype=torch.float32)
22 | X_test_tensor = torch.tensor(X_test, dtype=torch.float32)
23 | y_train_tensor = torch.tensor(y_train, dtype=torch.float32).view(-1, 1)
24 | y_test_tensor = torch.tensor(y_test, dtype=torch.float32).view(-1, 1)
25 |
26 | # MLP network
27 | class MLP(nn.Module):
28 | def __init__(self):
29 | super(MLP, self).__init__()
30 | self.fc1 = nn.Linear(7, 64)
31 | self.relu = nn.ReLU()
32 | self.fc2 = nn.Linear(64, 32)
33 | self.fc3 = nn.Linear(32, 1)
34 |
35 | def forward(self, x):
36 | x = self.fc1(x)
37 | x = self.relu(x)
38 | x = self.fc2(x)
39 | x = self.relu(x)
40 | x = self.fc3(x)
41 | return x
42 |
43 |
44 | model = MLP()
45 | criterion = nn.MSELoss()
46 | optimizer = optim.Adam(model.parameters(), lr=0.001)
47 |
48 | # trainning
49 | num_epochs = 10000
50 | for epoch in range(num_epochs):
51 | model.train()
52 |
53 | # forward
54 | outputs = model(X_train_tensor)
55 | loss = criterion(outputs, y_train_tensor)
56 |
57 | # backward
58 | optimizer.zero_grad()
59 | loss.backward()
60 | optimizer.step()
61 |
62 | if (epoch + 1) % 10 == 0:
63 | print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')
64 |
65 |
66 | model.eval()
67 | with torch.no_grad():
68 | predicted = model(X_test_tensor)
69 | test_loss = criterion(predicted, y_test_tensor)
70 | print(f'Test Loss: {test_loss.item():.4f}')
71 | torch.save(model.state_dict(), 'mlp_model.pth')
72 | print("Model parameters saved to 'mlp_model.pth'")
73 |
74 |
75 |
76 |
--------------------------------------------------------------------------------
/Path planning module/Ablation Experiments/Reward comparison for Ablation.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import pickle
3 | import numpy as np
4 | fig = plt.figure(figsize=(12, 6))
5 |
6 |
7 | #No Mutimodal
8 | # replace your file path
9 | with open(r'Path planning module\Ablation Experiments\episode-mean_total_reward(No Multimodal)_x9522_y14992.pickle', 'rb') as f1:
10 | info1 = pickle.load(f1)
11 | x1 = np.array(info1[0])
12 | y1 = np.array(info1[1])
13 |
14 |
15 | #No Priority
16 | # replace your file path
17 | with open(r'Path planning module\Ablation Experiments\episode-mean_total_reward(No Priority)_x9522_y14992.pickle', 'rb') as f2:
18 | info2 = pickle.load(f2)
19 | x2 = np.array(info2[0])
20 | y2 = np.array(info2[1])
21 |
22 |
23 |
24 |
25 | #No Dueling
26 | # replace your file path
27 | with open(r'Path planning module\Ablation Experiments\episode-mean_total_reward(No Dueling)_x9522_y14992.pickle', 'rb') as f3:
28 | info3 = pickle.load(f3)
29 | x3 = np.array(info3[0])
30 | y3 = np.array(info3[1])
31 |
32 |
33 |
34 | #No Double
35 | # replace your file path
36 | with open(r'Path planning module\Ablation Experiments\episode-mean_total_reward(No Double)_x9522_y14992.pickle', 'rb') as f4:
37 | info4 = pickle.load(f4)
38 | x4 = np.array(info4[0])
39 | y4 = np.array(info4[1])
40 |
41 |
42 | #fusion DQN
43 | # replace your file path
44 | with open(r'Path planning module\Ablation Experiments\episode-mean_total_reward(fusion DQN)_x9522_y14992.pickle', 'rb') as f5:
45 | info5 = pickle.load(f5)
46 | x5 = np.array(info5[0])
47 | y5 = np.array(info5[1])
48 |
49 |
50 |
51 |
52 | plt.plot(x1, y1, c=(0/255,0/255,255/255), label='No Mutimodal', linewidth=3)
53 | plt.plot(x2, y2, c=(0/255,127/255,0/255), label='No Priority', linewidth=3)
54 | plt.plot(x3, y3, c=(146/255,38/255,146/255), label='No Dueling', linewidth=3)
55 | plt.plot(x4, y4, c=(255/255,165/255,0/255),label='No Double', linewidth=3)
56 | plt.plot(x5, y5, c=(255/255,0/255,0/255), label='fusion DQN', linewidth=3)
57 |
58 | plt.ylim(84, 96)
59 |
60 |
61 | plt.legend(loc='best', fontsize='16')
62 | plt.ylabel('reward', fontsize=16)
63 | plt.xlabel('episode', fontsize=16)
64 | plt.xticks(fontsize=14)
65 | plt.yticks(fontsize=14)
66 |
67 |
68 |
69 | plt.savefig(r'Path planning module\Ablation Experiments\figure_ablation.png')# replace your file path
70 | plt.savefig(r'Path planning module\Ablation Experiments\figure_ablation.eps')
71 | plt.savefig(r'Path planning module\Ablation Experiments\figure_ablation.pdf')
72 | plt.show()
73 |
74 |
75 |
76 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # RL-based USV Path Planning Under the Marine Multimodal Features Considerations
2 | * @Author: DavidLin
3 | * @Date : 2024/12/7
4 | * @Contact : davidlin659562@gmail.com
5 | * @Description : This code corresponds to the work "RL-based USV Path Planning Under the Marine Multimodal Features Considerations"
6 | sumitted to IEEE Internet of Things Journal.
7 |
8 |
9 | # The structure is as follows:
10 | > Image processing module
11 | * [Geographic Data] : Files that may be used in Image processing module.
12 | * `step1_ECDIS.py`: Code to process ECDIS file.
13 | * `step2_Discretization.py`: Code to rasterize the processed ECDIS file.
14 | * `step3_Edge detection and Contour extraction.py`: Code to process random obstacles through Edge detection and Contour extraction.
15 | > Meteorological analysis module
16 | * [Geographic Data]: Files that may be used in Meteorological analysis module.
17 | * `step1_nc_to_excel.py`: Code to process dataset file (.nc) to obtain excel.
18 | * `step2_excel_to_vector field.py`: Code to process the meteorological data through Logarithmic formula transformation and Bilinear interpolation.
19 | > Path planning module
20 | * [Ablation Experiments]: files that may be used in Ablation Experiments.
21 | * [Experiments for Comparison]: files that may be used in Experiments for Comparison.
22 | * [Experiments for Testing Generalization Ability]: files that may be used in Experiments for Testing Generalization Ability.
23 | * `fusion_DQN.py`: Code to complete reinforcement learning through fusion DQN proposed in the paper.
24 | * `Multimodal_characteristics_Marine_environment.py`: Code to construct the the interactive environment for the agent to RL.
25 |
26 |
27 | # Supplementary introduction
28 | 1. The preprocessing steps need to be completed according to the Image processing module and Meteorological analysis module.
29 | (Of course, the processed data has also been prepared. )
30 | 2. The test data can ensure that all the experiments can be completed, which can be replaced by your own data.
31 | 3. In all files, the parts that can be adjustied have been marked, such as hyperparameters, file paths, environmental data, etc.
32 | If you have any questions or suggestions, please feel free to contact me.
33 |
34 | # Dataset download
35 | To download the [Reanalysis CORAv1.0 dataset](https://mds.nmdis.org.cn/pages/dataViewDetail.html?dataSetId=83).
36 | To download the [ERA5 dataset](https://cds.climate.copernicus.eu/cdsapp#!/dataset/reanalysis-era5-single-levels-monthly-means?tab=overview).
37 |
38 |
39 | # Contact email:
40 | DavidLin: davidlin659562@gmail.com
--------------------------------------------------------------------------------
/Image processing module/Simulated random obstacle.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from scipy import ndimage
3 | from PIL import Image, ImageDraw
4 | import random
5 |
6 |
7 |
8 | # load picture------------------------------------
9 | desired_width = 200
10 | desired_length = 300
11 | binary_map_data = np.loadtxt(r'Image processing module\testmap_105108.txt', dtype=int)
12 | one_indices = np.argwhere(binary_map_data == 1)
13 | image = Image.new("RGB", (desired_length, desired_width), "white")
14 | draw = ImageDraw.Draw(image)
15 | bw_image = Image.fromarray((binary_map_data * 255).astype(np.uint8))
16 | image.paste(bw_image, (0, 0))
17 | filtered_indices = [(y, x) for y, x in one_indices if (0 <= x <= 300) and (0 <= y <= 200)]
18 |
19 |
20 |
21 | # A random position generates a circle with five random radii------------------------------------
22 | for _ in range(10):
23 | valid_position = False
24 | diameter = random.randint(4, 6)
25 | while not valid_position:
26 | center = random.choice(filtered_indices)
27 | if center[0] - diameter // 2 >= 0 and center[0] + diameter // 2 < desired_length \
28 | and center[1] - diameter // 2 >= 0 and center[1] + diameter // 2 < desired_width:
29 | if binary_map_data[center[1] - diameter // 2][center[0] - diameter // 2] == 1 \
30 | and binary_map_data[center[1] + diameter // 2][ center[0] + diameter // 2] == 1 \
31 | and binary_map_data[center[1] - diameter // 2][ center[0] - diameter // 2] == 1 \
32 | and binary_map_data[center[1] - diameter // 2][ center[0] + diameter // 2] == 1:
33 | valid_position = True
34 | else:
35 | valid_position = False
36 | else:
37 | valid_position = False
38 | bounding_box = [
39 | (center[1] - diameter // 2, center[0] - diameter // 2),
40 | (center[1] + diameter // 2, center[0] + diameter // 2)
41 | ]
42 | draw.ellipse(bounding_box, fill="red")
43 | print(f"Random Circle: Center {center}, Diameter {diameter}")
44 |
45 | # goal point------------------------------------
46 | goal_center = (200-89, 147)
47 | goal_radius = 10
48 | goal_bounding_box = [
49 | (goal_center[1] - goal_radius, goal_center[0] - goal_radius),
50 | (goal_center[1] + goal_radius, goal_center[0] + goal_radius)
51 | ]
52 | draw.ellipse(goal_bounding_box, fill=(250,109,0))
53 |
54 |
55 |
56 | # agent(from start point)------------------------------------
57 | agent_center = (200-16, 97)
58 | agent_radius = 4
59 | agent_bounding_box = [
60 | (agent_center[1] - agent_radius, agent_center[0] - agent_radius),
61 | (agent_center[1] + agent_radius, agent_center[0] + agent_radius)
62 | ]
63 | draw.ellipse(agent_bounding_box, fill=(0,255,0))
64 |
65 |
66 | image.show()
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/different _stage_CR.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pickle
3 |
4 | # replace it with your trainning file path
5 | file_path = r'Path planning module\Experiments for Testing Generalization Ability\episode-total_collision(fusion DQN)_x9522_y14992.pickle' # 替换为你的文件路径
6 | with open(file_path, 'rb') as file:
7 | episodeVStotal_collision = pickle.load(file)
8 |
9 |
10 | # 0 epoch ~ 1667 epoch
11 | start_episode_part1 = 0
12 | end_episode_part1 = 1667
13 | collision_counts_part1 = episodeVStotal_collision[1][start_episode_part1:end_episode_part1]
14 | non_zero_collision_counts_part1 = collision_counts_part1[collision_counts_part1 != 0]
15 | non_zero_collision_ratio_part1 = len(non_zero_collision_counts_part1) / 1667
16 | print(f'0~1667:{non_zero_collision_ratio_part1 * 100}%')
17 |
18 | # 0 epoch ~ 3334 epoch
19 | start_episode_1 = 0
20 | end_episode_1 = 3334
21 | collision_counts_1 = episodeVStotal_collision[1][start_episode_1:end_episode_1]
22 | non_zero_collision_counts_1 = collision_counts_1[collision_counts_1 != 0]
23 | non_zero_collision_ratio_1 = len(non_zero_collision_counts_1) / 3334
24 | print(f'first stage:{non_zero_collision_ratio_1 * 100}%')
25 |
26 | # 1667 epoch ~ 5001 epoch
27 | start_episode_part2 = 1667
28 | end_episode_part2 = 5001
29 | collision_counts_part2 = episodeVStotal_collision[1][start_episode_part2:end_episode_part2]
30 | non_zero_collision_counts_part2 = collision_counts_part2[collision_counts_part2 != 0]
31 | non_zero_collision_ratio_part2 = len(non_zero_collision_counts_part2) / 3334
32 | print(f'1667~5001:{non_zero_collision_ratio_part2 * 100}%')
33 |
34 | # 3334 epoch ~ 6667 epoch
35 | start_episode_2 = 3334
36 | end_episode_2 = 6667
37 | collision_counts_2 = episodeVStotal_collision[1][start_episode_2:end_episode_2]
38 | non_zero_collision_counts_2 = collision_counts_2[collision_counts_2 != 0]
39 | non_zero_collision_ratio_2 = len(non_zero_collision_counts_2) / 3334
40 | print(f'second stage:{non_zero_collision_ratio_2 * 100}%')
41 |
42 | # 5001 epoch ~ 8335 epoch
43 | start_episode_part3 = 5001
44 | end_episode_part3 = 8335
45 | collision_counts_part3 = episodeVStotal_collision[1][start_episode_part3:end_episode_part3]
46 | non_zero_collision_counts_part3 = collision_counts_part3[collision_counts_part3 != 0]
47 | non_zero_collision_ratio_part3 = len(non_zero_collision_counts_part3) / 3334
48 | print(f'5001~8335:{non_zero_collision_ratio_part3 * 100}%')
49 |
50 | # 6667 epoch ~ 10000 epoch
51 | start_episode_3 = 6667
52 | end_episode_3 = 10000
53 | collision_counts_3 = episodeVStotal_collision[1][start_episode_3:end_episode_3]
54 | non_zero_collision_counts_3 = collision_counts_3[collision_counts_3 != 0]
55 | non_zero_collision_ratio_3 = len(non_zero_collision_counts_3) / 3334
56 | print(f'third:{non_zero_collision_ratio_3 * 100}%')
57 |
58 |
59 |
60 |
61 |
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/different _stage_SR.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pickle
3 |
4 | # replace it with your trainning file path
5 | file_path = r'Path planning module\Experiments for Testing Generalization Ability\episode-steps(fusion DQN)_x9522_y14992.pickle' # 替换为你的文件路径
6 | with open(file_path, 'rb') as file:
7 | episodeVSsteps = pickle.load(file)
8 |
9 |
10 | # 0 epoch ~ 1667 epoch
11 | start_episode_part1 = 0
12 | end_episode_part1 = 1667
13 | num_episodes_to_average = 1667
14 | steps_data_part1 = episodeVSsteps[1][start_episode_part1:end_episode_part1]
15 | steps_less_than_15_part1 = steps_data_part1[steps_data_part1 < 15]
16 | steps_less_than_15_ratio_part1 = len(steps_less_than_15_part1) / num_episodes_to_average
17 | print(f'{start_episode_part1}epoch to {end_episode_part1}epoch: {steps_less_than_15_ratio_part1 * 100}%')
18 |
19 |
20 | # first stage
21 | start_episode_1 = 0
22 | end_episode_1 = 3333
23 | num_episodes_to_average = 3333
24 | steps_data_1 = episodeVSsteps[1][start_episode_1:end_episode_1]
25 | steps_less_than_15_1 = steps_data_1[steps_data_1 < 15]
26 | steps_less_than_15_ratio_1 = len(steps_less_than_15_1) / num_episodes_to_average
27 | print(f'{start_episode_1}epoch to {end_episode_1}epoch: {steps_less_than_15_ratio_1 * 100}%')
28 |
29 | # 1667 epoch ~ 5001 epoch
30 | start_episode_part2 = 1667
31 | end_episode_part2 = 5001
32 | num_episodes_to_average = 3333
33 | steps_data_part2 = episodeVSsteps[1][start_episode_part2:end_episode_part2]
34 | steps_less_than_15_part2 = steps_data_part2[steps_data_part2 < 15]
35 | steps_less_than_15_ratio_part2 = len(steps_less_than_15_part2) / num_episodes_to_average
36 | print(f'{start_episode_part2}epoch to {end_episode_part2}epoch: {steps_less_than_15_ratio_part2 * 100}%')
37 |
38 | # second stage
39 | start_episode_2 = 3334
40 | end_episode_2 = 6666
41 | num_episodes_to_average = 3333
42 | steps_data_2 = episodeVSsteps[1][start_episode_2:end_episode_2]
43 | steps_less_than_15_2 = steps_data_2[steps_data_2 < 15]
44 | steps_less_than_15_ratio_2 = len(steps_less_than_15_2) / num_episodes_to_average
45 | print(f'{start_episode_2}epoch to {end_episode_2}epoch: {steps_less_than_15_ratio_2 * 100}%')
46 |
47 | # 5001 epoch ~ 8335 epoch
48 | start_episode_part3 = 5001
49 | end_episode_part3 = 8335
50 | num_episodes_to_average = 3334
51 | steps_data_part3 = episodeVSsteps[1][start_episode_part3:end_episode_part3]
52 | steps_less_than_15_part3 = steps_data_part3[steps_data_part3 < 15]
53 | steps_less_than_15_ratio_part3 = len(steps_less_than_15_part3) / num_episodes_to_average
54 | print(f'{start_episode_part3}epoch to {end_episode_part3}epoch: {steps_less_than_15_ratio_part3 * 100}%')
55 |
56 | # third stage
57 | start_episode_3 = 6667
58 | end_episode_3 = 10000
59 | num_episodes_to_average = 3334
60 | steps_data_3 = episodeVSsteps[1][start_episode_3:end_episode_3]
61 | steps_less_than_15_3 = steps_data_3[steps_data_3 < 15]
62 | steps_less_than_15_ratio_3 = len(steps_less_than_15_3) / num_episodes_to_average
63 | print(f'{start_episode_3}epoch to {end_episode_3}epoch: {steps_less_than_15_ratio_3 * 100}%')
64 |
65 |
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/all_model_trainning_process_visualization.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pickle
3 | import numpy as np
4 | import matplotlib.pyplot as plt
5 |
6 | # the saved training results
7 | # replace it with your own trainning data path
8 | output_folder = r'Path planning module\Experiments for Testing Generalization Ability'
9 | file1_path = os.path.join(output_folder, 'episode-mean_collision(fusion DQN)_test.pickle')
10 | file2_path = os.path.join(output_folder, 'episode-mean_Loss(fusion DQN)_test.pickle')
11 | file3_path = os.path.join(output_folder, 'episode-mean_reward(fusion DQN)_test.pickle')
12 | file4_path = os.path.join(output_folder, 'episode-steps(fusion DQN)_test.pickle')
13 | file5_path = os.path.join(output_folder, 'episode-total_collision(fusion DQN)_test.pickle')
14 | file6_path = os.path.join(output_folder, 'episode-total_Loss(fusion DQN)_test.pickle')
15 | file7_path = os.path.join(output_folder, 'episode-total_reward(fusion DQN)_test.pickle')
16 |
17 |
18 | """
19 |
20 | Due to the complexity of the multimodal Marine environment and the number of rounds,
21 | to make the overall trend clearer, we will retain one extreme value for every five data in test figure within a reasonable range,
22 | which is also acceptable in the physical test.
23 |
24 | """
25 | def plot_data(file_path, title, xlabel, ylabel, image_path, operation=np.min):
26 | with open(file_path, 'rb') as file:
27 | episodeVSdata = pickle.load(file)
28 | episode = episodeVSdata[0, :]
29 | data = episodeVSdata[1, :]
30 |
31 | # Reshape data to every five episodes and calculate the operation (min or max)
32 | reshaped_data = data.reshape(-1, 5)
33 | if operation == np.min:
34 | aggregated_data = np.min(reshaped_data, axis=1)
35 | else:
36 | aggregated_data = np.max(reshaped_data, axis=1)
37 |
38 | # Generate new episode numbers for the aggregated data
39 | aggregated_episode = np.arange(0, len(aggregated_data)) * 5 + 2.5
40 |
41 | plt.figure()
42 | plt.plot(aggregated_episode, aggregated_data, c='r')
43 | plt.legend([title], loc='best')
44 | plt.ylabel(ylabel)
45 | plt.xlabel(xlabel)
46 | plt.title(title)
47 | plt.grid()
48 | plt.savefig(image_path)
49 | plt.show()
50 |
51 | # Plot each graph with min or max operation
52 | # define your own save folder path
53 | plot_data(file1_path, 'mean_collision', 'episode', 'mean_collision', os.path.join(output_folder, 'fusion DQN episode-mean_collision_test.png'), np.min)
54 | plot_data(file2_path, 'mean_Loss', 'episode', 'mean_Loss', os.path.join(output_folder, 'fusion DQN episode-mean_Loss_test.png'), np.min)
55 | plot_data(file3_path, 'mean_reward', 'episode', 'mean_reward', os.path.join(output_folder, 'fusion DQN episode-mean_reward_test.png'), np.max)
56 | plot_data(file4_path, 'steps', 'episode', 'steps', os.path.join(output_folder, 'fusion DQN episode-steps_test.png'), np.min)
57 | plot_data(file5_path, 'total_collision', 'episode', 'total_collision', os.path.join(output_folder, 'fusion DQN episode-total_collision_test.png'), np.min)
58 | plot_data(file6_path, 'total_Loss', 'episode', 'total_Loss', os.path.join(output_folder, 'fusion DQN episode-total_Loss_test.png'), np.min)
59 | plot_data(file7_path, 'total_reward', 'episode', 'total_reward', os.path.join(output_folder, 'fusion DQN episode-total_reward_test.png'), np.max)
--------------------------------------------------------------------------------
/Meteorological analysis module/step1_nc_to_excel.py:
--------------------------------------------------------------------------------
1 | import netCDF4 as nc
2 | import pandas as pd
3 | import numpy as np
4 | import math
5 | from scipy.interpolate import RectBivariateSpline
6 |
7 |
8 | # ERA5——u10v10----------------------------------------
9 | #ERA5 monthly averaged data on single levels from 1940 to present.Accessed: 2023. [Online].
10 | # Available: https://cds.climate.copernicus.eu/cdsapp#!/dataset/reanalysis-era5-single-levels-monthly-means?tab=overview
11 | nc_file_path = r'adaptor.mars.internal-202111.nc'
12 | nc_dataset = nc.Dataset(nc_file_path,encoder="gbk")
13 | data1 = nc_dataset.variables['u10'][:]
14 | data2 = nc_dataset.variables['v10'][:]
15 | data3 = data1[0, :, :]
16 | data4 = data2[0, :, :]
17 | df1 = pd.DataFrame(data3)
18 | df2 = pd.DataFrame(data4)
19 | excel_file_path_u10 = r'Meteorological analysis module\Meterorological Data\u10_new.xlsx'
20 | df1.to_excel(excel_file_path_u10, index=False)
21 | excel_file_path_v10 = r'Meteorological analysis module\Meterorological Data\v10_new.xlsx'
22 | df2.to_excel(excel_file_path_v10, index=False)
23 |
24 | print(nc_dataset.variables)
25 |
26 |
27 | # ERA5——u2v2----------------------------------------
28 | #ERA5 monthly averaged data on single levels from 1940 to present.Accessed: 2023. [Online].
29 | # Available: https://cds.climate.copernicus.eu/cdsapp#!/dataset/reanalysis-era5-single-levels-monthly-means?tab=overview
30 | nc_dataset_uv = nc.Dataset(nc_file_path, encoder="gbk")
31 | data1 = nc_dataset_uv.variables['u10'][:]
32 | data2 = nc_dataset_uv.variables['v10'][:]
33 | data3 = data1[0, :, :]
34 | data4 = data2[0, :, :]
35 | u2_data = data3*(np.log(2/0.003) / np.log(10/0.003))
36 | v2_data = data4*(np.log(2/0.003) / np.log(10/0.003))
37 | df_u2 = pd.DataFrame(u2_data)
38 | df_v2 = pd.DataFrame(v2_data)
39 | excel_file_path_u2 = r'Meteorological analysis module\Meterorological Data\u2_new.xlsx'
40 | excel_file_path_v2 = r'Meteorological analysis module\Meterorological Data\v2_new.xlsx'
41 | df_u2.to_excel(excel_file_path_u2, index=False)
42 | df_v2.to_excel(excel_file_path_v2, index=False)
43 |
44 |
45 |
46 | # National Marine Science Data Center dataset-------------------------------------------
47 | # Reanalysis CORAv1.0. Accessed: 2023. [Online]. Available: https://mds.nmdis.org.cn/pages/dataViewDetail.html?dataSetId=83
48 | nc_file_path1 = r'chinasea_202111_c.nc'
49 | nc_dataset = nc.Dataset(nc_file_path1,encoder="gbk")
50 | data_u = nc_dataset.variables['u'][:]
51 | data_v = nc_dataset.variables['v'][:]
52 | data_u_right = data_u[0, 5:24, 10:21]
53 | data_v_right = data_v[0, 5:24, 10:21]
54 | u = pd.DataFrame(data_u_right)
55 | v = pd.DataFrame(data_v_right)
56 | excel_file_path_u = r'Meteorological analysis module\Meterorological Data\u.xlsx'
57 | u.to_excel(excel_file_path_u, index=False)
58 | excel_file_path_v = r'Meteorological analysis module\Meterorological Data\v.xlsx'
59 | v.to_excel(excel_file_path_v, index=False)
60 | df_u = pd.read_excel(excel_file_path_u)
61 | df_u = df_u.fillna(0)
62 | df_u = df_u.transpose()
63 | df_u = df_u.iloc[::-1]
64 | df_u.to_excel(excel_file_path_u, index=False)
65 | df_v = pd.read_excel(excel_file_path_v)
66 | df_v = df_v.fillna(0)
67 | df_v = df_v.transpose()
68 | df_v = df_v.iloc[::-1]
69 | df_v.to_excel(excel_file_path_v, index=False)
70 |
71 | # Bilinear interpolation(Multiple difference methods can be substituted)
72 | old_rows, old_cols = df_u.shape
73 | spline_u = RectBivariateSpline(np.arange(old_rows), np.arange(old_cols), df_u.values)
74 | spline_v = RectBivariateSpline(np.arange(old_rows), np.arange(old_cols), df_v.values)
75 | new_rows, new_cols = 21, 37
76 | new_row_coords = np.linspace(0, old_rows - 1, new_rows)
77 | new_col_coords = np.linspace(0, old_cols - 1, new_cols)
78 | new_data_u = spline_u(new_row_coords, new_col_coords)
79 | new_data_v = spline_v(new_row_coords, new_col_coords)
80 | new_df_u = pd.DataFrame(new_data_u)
81 | new_df_v = pd.DataFrame(new_data_v)
82 | new_excel_file_path_u = r'Meteorological analysis module\Meterorological Data\u_interpolated.xlsx'
83 | new_excel_file_path_v = r'Meteorological analysis module\Meterorological Data\v_interpolated.xlsx'
84 | new_df_u.to_excel(new_excel_file_path_u, index=False)
85 | new_df_v.to_excel(new_excel_file_path_v, index=False)
86 |
87 |
88 |
89 |
90 |
91 |
--------------------------------------------------------------------------------
/Meteorological analysis module/step2_excel_to_vector field.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import numpy as np
3 | import pandas as pd
4 | import math
5 |
6 | #vector field--wind(u10/v10)---------------------------------------------------------------------------
7 | excel_file_path_u10 = r'Meteorological analysis module\Meterorological Data\u10.xlsx'
8 | excel_file_path_v10 = r'Meteorological analysis module\Meterorological Data\v10.xlsx'
9 | df1 = pd.read_excel(excel_file_path_u10)
10 | df2 = pd.read_excel(excel_file_path_v10)
11 | array1 = df1.values
12 | array2 = df2.values
13 | list_u10 = [[round(num, 3) for num in row] for row in array1]
14 | list_v10 = [[round(num, 3) for num in row] for row in array2]
15 | u10_array = np.array(list_u10)
16 | v10_array = np.array(list_v10)
17 | u10_array_flipped = np.flipud(u10_array)
18 | v10_array_flipped = np.flipud(v10_array)
19 | magnitude = np.sqrt(u10_array_flipped**2 + v10_array_flipped**2)
20 | x, y = np.meshgrid(np.arange(u10_array_flipped.shape[1]), np.arange(u10_array_flipped.shape[0]))
21 | plt.figure(figsize=(14, 6))
22 | plt.quiver(x, y, u10_array_flipped, v10_array_flipped, magnitude, scale=50, cmap='viridis')
23 | plt.xlim(0, u10_array_flipped.shape[1])
24 | plt.ylim(0, u10_array_flipped.shape[0])
25 | plt.xticks(np.arange(0, u10_array_flipped.shape[1], 2.5))
26 | plt.yticks(np.arange(0, u10_array_flipped.shape[0], 2.5))
27 | plt.colorbar()
28 | plt.title('Wind Vector (10m) Field with Magnitude')
29 | plt.xlabel('Longitude (°E)')
30 | plt.ylabel('Latitude (°S)')
31 | plt.savefig(r'Meteorological analysis module\Meterorological Data\wind_vector_10m_field.png', bbox_inches='tight')
32 |
33 |
34 |
35 | #vector field--wind(u2/v2)----------------------------------------------------------------------------------
36 | excel_file_path_u2 = r'Meteorological analysis module\Meterorological Data\u2.xlsx'
37 | excel_file_path_v2 = r'Meteorological analysis module\Meterorological Data\v2.xlsx'
38 | df_u2 = pd.read_excel(excel_file_path_u2)
39 | df_v2 = pd.read_excel(excel_file_path_v2)
40 | array_u2 = df_u2.values
41 | array_v2 = df_v2.values
42 | list_u2 = [[round(num, 3) for num in row] for row in array_u2]
43 | list_v2 = [[round(num, 3) for num in row] for row in array_v2]
44 | u2_array = np.array(list_u2)
45 | v2_array = np.array(list_v2)
46 | u2_array_flipped = np.flipud(u2_array)
47 | v2_array_flipped = np.flipud(v2_array)
48 | magnitude = np.sqrt(u2_array_flipped**2 + v2_array_flipped**2)
49 | x, y = np.meshgrid(np.arange(u2_array_flipped.shape[1]), np.arange(u2_array_flipped.shape[0]))
50 | plt.figure(figsize=(14, 6))
51 | plt.quiver(x, y, u2_array_flipped, v2_array_flipped, magnitude, scale=50, cmap='viridis')
52 | plt.colorbar()
53 | plt.title(' Wind Vector(2m) Field with Magnitude')
54 | plt.xlabel('X-axis')
55 | plt.ylabel('Y-axis')
56 | plt.xticks(np.arange(0, u2_array_flipped.shape[1], 2.5))
57 | plt.yticks(np.arange(0, u2_array_flipped.shape[0], 2.5))
58 | plt.savefig(r'Meteorological analysis module\Meterorological Data\wind_vector_2m_field.png', bbox_inches='tight')
59 |
60 |
61 | #vector field--ocean current field(u/v)----------------------------------------------------------------------------
62 | excel_file_path_u = r'Meteorological analysis module\Meterorological Data\u.xlsx'
63 | excel_file_path_v = r'Meteorological analysis module\Meterorological Data\v.xlsx'
64 | df_u = pd.read_excel(excel_file_path_u)
65 | df_v = pd.read_excel(excel_file_path_v)
66 | array_u = df_u.values
67 | array_v = df_v.values
68 | list_u = [[round(num, 3) for num in row] for row in array_u]
69 | list_v = [[round(num, 3) for num in row] for row in array_v]
70 | u_array = np.array(list_u)
71 | v_array = np.array(list_v)
72 | u_array_flipped = np.flipud(u_array)
73 | v_array_flipped = np.flipud(v_array)
74 | magnitude = np.sqrt(u_array_flipped**2 + v_array_flipped**2)
75 | x, y = np.meshgrid(np.arange(u_array_flipped.shape[1]), np.arange(u_array_flipped.shape[0]))
76 | plt.figure(figsize=(14, 6))
77 | plt.quiver(x, y, u_array_flipped, v_array_flipped, magnitude, scale=1, cmap='viridis')
78 | plt.colorbar()
79 | plt.title('Ocean Current Vector(-2.5m) Field with Magnitude')
80 | plt.xlabel('X-axis')
81 | plt.ylabel('Y-axis')
82 | plt.xticks(np.arange(0, u_array_flipped.shape[1], 2.5))
83 | plt.yticks(np.arange(0, u_array_flipped.shape[0], 2.5))
84 | plt.savefig(r'文件语义信息_矢量场\ocean_current_vector_field.png', bbox_inches='tight')
85 |
86 |
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/best_path_visualization.py:
--------------------------------------------------------------------------------
1 | from scipy import ndimage
2 | from PIL import Image, ImageDraw, ImageColor, ImageFont
3 | import numpy as np
4 | import pandas as pd
5 | import matplotlib.pyplot as plt
6 | from scipy.interpolate import splprep, splev
7 |
8 |
9 |
10 | # ECDIS
11 | # #replace it with your multimodal characteristic marine environment
12 | fig, ax = plt.subplots(figsize=(3, 3))
13 | binary_map_data = np.loadtxt(r'Image processing module\Geographic Data\testmap_105108.txt', dtype=int)
14 | binary_map_data = np.array(binary_map_data)
15 | binary_array_revers = np.flipud(binary_map_data)
16 | ax.set_aspect('equal')
17 | ax.set_xlim(0, binary_array_revers.shape[1])
18 | ax.set_ylim(0, binary_array_revers.shape[0])
19 | color_map = {
20 | 0: [255, 228, 181], # rgb(255, 228, 181)
21 | 1: [25, 101, 149] # rgb(135, 206, 250)
22 | }
23 | cmap = plt.cm.colors.ListedColormap([[255/255, 228/255, 181/255], [25/255, 101/255, 149/255]])
24 | ax.imshow(binary_array_revers, cmap=cmap, interpolation='none', aspect='auto', vmin=0, vmax=1)
25 |
26 |
27 |
28 | # load the vector data of wind field
29 | # #replace it with your multimodal characteristic marine environment
30 | excel_file_path_u2 = r'Meteorological analysis module\Meterorological Data\u2_new.xlsx'
31 | excel_file_path_v2 = r'Meteorological analysis module\Meterorological Data\v2_new.xlsx'
32 | df_u2 = pd.read_excel(excel_file_path_u2)
33 | df_v2 = pd.read_excel(excel_file_path_v2)
34 | array_u2 = df_u2.iloc[4:16, 12:24].values
35 | array_v2 = df_v2.iloc[4:16, 12:24].values
36 | list_u2 = [[round(num, 5) for num in row] for row in array_u2]
37 | list_v2 = [[round(num, 5) for num in row] for row in array_v2]
38 | u2_array = np.array(list_u2)
39 | v2_array = np.array(list_v2)
40 | u2_array_flipped = np.flipud(u2_array)
41 | v2_array_flipped = np.flipud(v2_array)
42 | for i in range(0, 12):
43 | for j in range(0, 12):
44 | u_value = u2_array_flipped[i, j]
45 | v_value = v2_array_flipped[i, j]
46 | if binary_array_revers[int(i / 12 * 300), int(j / 12 * 300)] == 1:
47 | start_point = (j / 12 * 300, i / 12 * 300)
48 | ax.quiver(start_point[0], start_point[1], 60*u_value, 60*v_value, color=(245 / 255, 245 / 255, 220 / 255), angles='xy', scale_units='xy', scale=4, width=0.008)
49 |
50 |
51 | # load the vector data of ocean current field
52 | # replace it with your multimodal characteristic marine environment
53 | excel_file_path_u = r'Meteorological analysis module\Meterorological Data\u_interpolated.xlsx'
54 | excel_file_path_v = r'Meteorological analysis module\Meterorological Data\v_interpolated.xlsx'
55 | df_u = pd.read_excel(excel_file_path_u)
56 | df_v = pd.read_excel(excel_file_path_v)
57 | array_u = df_u.iloc[4:16, 12:24].values
58 | array_v = df_v.iloc[4:16, 12:24].values
59 | list_u = [[round(num, 5) for num in row] for row in array_u]
60 | list_v = [[round(num, 5) for num in row] for row in array_v]
61 | u_array = np.array(list_u)
62 | v_array = np.array(list_v)
63 | u_array_flipped = np.flipud(u_array)
64 | v_array_flipped = np.flipud(v_array)
65 | for i in range(0, 12):
66 | for j in range(0, 12):
67 | u = u_array_flipped[i, j]
68 | v = v_array_flipped[i, j]
69 | if binary_array_revers[int(i / 12 * 300), int(j / 12 * 300)] == 1:
70 | start_point = (j / 12 * 300, i / 12 * 300)
71 | ax.quiver(start_point[0], start_point[1], 1200*u, 1200*v, color=(135 / 255, 206 / 255, 250 / 255), angles='xy', scale_units='xy', scale=4, width=0.008)
72 |
73 |
74 |
75 | excel_file1 = r'Path planning module\Experiments for Comparison\fusionDQN_x9522_y14992.xlsx' # fusion path, add your trainning file path
76 | df1 = pd.read_excel(excel_file1)
77 | x1 = df1.iloc[:, 0]
78 | y1 = df1.iloc[:, 1]
79 | angle = df1.iloc[:, 2]
80 | tck, u = splprep([x1, y1], s=0)
81 | u_new = np.linspace(0, 1, 1000)
82 | xy_smooth = splev(u_new, tck)
83 | ax.plot(xy_smooth[0], xy_smooth[1], color=(255/255,0/255,0/255), linewidth=2)
84 |
85 |
86 |
87 |
88 |
89 |
90 | # obstacles
91 | obstacle_centers = [(26, 175), (122, 102), (140, 161), (129, 56), (106, 10), (215, 154), (200, 16), (105, 33)]
92 | radius = [2, 4, 2, 3, 2, 2, 4, 3]
93 | for center, r in zip(obstacle_centers, radius):
94 | ax.add_patch(plt.Circle((center[0], center[1]), r, color=(212 / 255, 213 / 255, 214 / 255)))
95 |
96 | # agent
97 | agent_center = (95, 22)
98 | agent_radius = 2
99 | ax.add_patch(plt.Circle((agent_center[0], agent_center[1]), agent_radius, color=(0, 1, 0), zorder= 10))
100 |
101 |
102 | # goal point
103 | goal_center = (147, 88)
104 | goal_radius = 10
105 | ax.add_patch(plt.Circle((goal_center[0], goal_center[1]), goal_radius, color=(250 / 255, 109 / 255, 0),zorder= 9))
106 | plt.axis('off')
107 |
108 |
109 | left, right = 76, 174
110 | bottom, top = 10, 108
111 | ax.set_xlim(left, right)
112 | ax.set_ylim(bottom, top)
113 |
114 |
115 |
116 | ax.add_patch(plt.Rectangle((left, bottom),
117 | right-left, top-bottom,
118 | fill=False, edgecolor='black', linewidth=5))
119 | plt.savefig(r'Path planning module\Experiments for Testing Generalization Ability\path_visualization.eps')#replace it with your file path
120 | plt.savefig(r'Path planning module\Experiments for Testing Generalization Ability\path_visualization.pdf')
121 | plt.savefig(r'Path planning module\Experiments for Testing Generalization Ability\path_visualization.png')
122 |
123 | plt.show()
--------------------------------------------------------------------------------
/Path planning module/Ablation Experiments/demo1_Dueling_ablation.py:
--------------------------------------------------------------------------------
1 | from scipy import ndimage
2 | from PIL import Image, ImageDraw, ImageColor, ImageFont
3 | import numpy as np
4 | import pandas as pd
5 | import matplotlib.pyplot as plt
6 | from scipy.interpolate import splprep, splev
7 |
8 |
9 | #ECDIS
10 | #replace it with your multimodal characteristic marine environment
11 | fig, ax = plt.subplots(figsize=(3, 3))
12 | binary_map_data = np.loadtxt(r'Image processing module\Geographic Data\testmap_105108.txt', dtype=int)
13 | binary_map_data = np.array(binary_map_data)
14 | binary_array_revers = np.flipud(binary_map_data)
15 | ax.set_aspect('equal')
16 | ax.set_xlim(0, binary_array_revers.shape[1])
17 | ax.set_ylim(0, binary_array_revers.shape[0])
18 | color_map = {
19 | 0: [255, 228, 181], # rgb(255, 228, 181)
20 | 1: [25, 101, 149] # rgb(135, 206, 250)
21 | }
22 | cmap = plt.cm.colors.ListedColormap([[255/255, 228/255, 181/255], [25/255, 101/255, 149/255]])
23 | ax.imshow(binary_array_revers, cmap=cmap, interpolation='none', aspect='auto', vmin=0, vmax=1)
24 |
25 |
26 |
27 | # wind vector field
28 | #replace it with your multimodal characteristic marine environment
29 | excel_file_path_u2 = r'Meteorological analysis module\Meterorological Data\u2_new.xlsx'
30 | excel_file_path_v2 = r'Meteorological analysis module\Meterorological Data\v2_new.xlsx'
31 | df_u2 = pd.read_excel(excel_file_path_u2)
32 | df_v2 = pd.read_excel(excel_file_path_v2)
33 | array_u2 = df_u2.iloc[4:16, 12:24].values
34 | array_v2 = df_v2.iloc[4:16, 12:24].values
35 | list_u2 = [[round(num, 5) for num in row] for row in array_u2]
36 | list_v2 = [[round(num, 5) for num in row] for row in array_v2]
37 | u2_array = np.array(list_u2)
38 | v2_array = np.array(list_v2)
39 | u2_array_flipped = np.flipud(u2_array)
40 | v2_array_flipped = np.flipud(v2_array)
41 | for i in range(0, 12):
42 | for j in range(0, 12):
43 | u_value = u2_array_flipped[i, j]
44 | v_value = v2_array_flipped[i, j]
45 | if binary_array_revers[int(i / 12 * 300), int(j / 12 * 300)] == 1:
46 | start_point = (j / 12 * 300, i / 12 * 300)
47 | ax.quiver(start_point[0], start_point[1], 60*u_value, 60*v_value, color=(245 / 255, 245 / 255, 220 / 255), angles='xy', scale_units='xy', scale=4, width=0.008)
48 |
49 |
50 | # ocean current vector field
51 | #replace it with your multimodal characteristic marine environment
52 | excel_file_path_u = r'Meteorological analysis module\Meterorological Data\u_interpolated.xlsx'
53 | excel_file_path_v = r'Meteorological analysis module\Meterorological Data\v_interpolated.xlsx'
54 | df_u = pd.read_excel(excel_file_path_u)
55 | df_v = pd.read_excel(excel_file_path_v)
56 | array_u = df_u.iloc[4:16, 12:24].values
57 | array_v = df_v.iloc[4:16, 12:24].values
58 | list_u = [[round(num, 5) for num in row] for row in array_u]
59 | list_v = [[round(num, 5) for num in row] for row in array_v]
60 | u_array = np.array(list_u)
61 | v_array = np.array(list_v)
62 | u_array_flipped = np.flipud(u_array)
63 | v_array_flipped = np.flipud(v_array)
64 | for i in range(0, 12):
65 | for j in range(0, 12):
66 | u = u_array_flipped[i, j]#
67 | v = v_array_flipped[i, j]
68 | if binary_array_revers[int(i / 12 * 300), int(j / 12 * 300)] == 1:
69 | start_point = (j / 12 * 300, i / 12 * 300)
70 | ax.quiver(start_point[0], start_point[1], 1200*u, 1200*v, color=(135 / 255, 206 / 255, 250 / 255), angles='xy', scale_units='xy', scale=4, width=0.008)
71 |
72 |
73 | #fusion
74 | excel_file1 = r'Path planning module\Experiments for Comparison\fusionDQN_x9522_y14992.xlsx' # replace it with your file path
75 | df1 = pd.read_excel(excel_file1)
76 | x1 = df1.iloc[:, 0]
77 | y1 = df1.iloc[:, 1]
78 | angle = df1.iloc[:, 2]
79 | tck, u = splprep([x1, y1], s=0)
80 | u_new = np.linspace(0, 1, 1000)
81 | xy_smooth = splev(u_new, tck)
82 | ax.plot(xy_smooth[0], xy_smooth[1], color=(225/255,226/255,228/255), linewidth=2,linestyle='--')
83 |
84 |
85 |
86 |
87 | #No Dueling
88 | excel_file1 = r'Path planning module\Ablation Experiments\PDDQN_path1_x9522_y14992.xlsx' # replace it with your file path
89 | df1 = pd.read_excel(excel_file1)
90 | x1 = df1.iloc[:, 0]
91 | y1 = df1.iloc[:, 1]
92 | angle = df1.iloc[:, 2]
93 | tck, u = splprep([x1, y1], s=0)
94 | u_new = np.linspace(0, 1, 1000)
95 | xy_smooth = splev(u_new, tck)
96 | ax.plot(xy_smooth[0], xy_smooth[1], color=(221/255, 69/255, 178/255), linewidth=2)
97 |
98 |
99 |
100 |
101 |
102 | # obstacles
103 | obstacle_centers = [(26, 175), (122, 102), (140, 161), (129, 56), (106, 10), (215, 154), (200, 16), (105, 33)]
104 | radius = [2, 4, 2, 3, 2, 2, 4, 3]
105 | for center, r in zip(obstacle_centers, radius):
106 | ax.add_patch(plt.Circle((center[0], center[1]), r, color=(212 / 255, 213 / 255, 214 / 255)))
107 |
108 | # agent
109 | agent_center = (95, 22)
110 | agent_radius = 2
111 | ax.add_patch(plt.Circle((agent_center[0], agent_center[1]), agent_radius, color=(0, 1, 0), zorder= 10))
112 |
113 |
114 | # goal
115 | goal_center = (147, 88)
116 | goal_radius = 10
117 | ax.add_patch(plt.Circle((goal_center[0], goal_center[1]), goal_radius, color=(250 / 255, 109 / 255, 0),zorder= 9))
118 | plt.axis('off')
119 |
120 |
121 |
122 | left, right = 76, 174
123 | bottom, top = 10, 108
124 | ax.set_xlim(left, right)
125 | ax.set_ylim(bottom, top)
126 |
127 |
128 | ax.add_patch(plt.Rectangle((left, bottom),
129 | right-left, top-bottom,
130 | fill=False, edgecolor='black', linewidth=5))
131 | plt.savefig(r'Path planning module\Ablation Experiments\figure_ablation_demo.eps', format='eps') #replace it with your file path
132 | plt.savefig(r'Path planning module\Ablation Experiments\figure_ablation_demo.pdf', format='pdf')
133 | plt.savefig(r'Path planning module\Ablation Experiments\figure_ablation_demo.jpg', format='jpg')
134 | plt.show()
135 |
136 |
137 |
138 |
--------------------------------------------------------------------------------
/Path planning module/Experiments for Comparison/demo1_3_algorithm_comparison.py:
--------------------------------------------------------------------------------
1 | from scipy import ndimage
2 | from PIL import Image, ImageDraw, ImageColor, ImageFont
3 | import numpy as np
4 | import pandas as pd
5 | import matplotlib.pyplot as plt
6 | from scipy.interpolate import splprep, splev
7 |
8 |
9 |
10 |
11 |
12 | fig, ax = plt.subplots(figsize=(3, 3))
13 | binary_map_data = np.loadtxt(r'Image processing module\Geographic Data\testmap_105108.txt', dtype=int)
14 | binary_map_data = np.array(binary_map_data)
15 | binary_array_revers = np.flipud(binary_map_data)
16 | ax.set_aspect('equal')
17 | ax.set_xlim(0, binary_array_revers.shape[1])
18 | ax.set_ylim(0, binary_array_revers.shape[0])
19 | color_map = {
20 | 0: [255, 228, 181], # rgb(255, 228, 181)
21 | 1: [25, 101, 149] # rgb(135, 206, 250)
22 | }
23 | cmap = plt.cm.colors.ListedColormap([[255/255, 228/255, 181/255], [25/255, 101/255, 149/255]])
24 | ax.imshow(binary_array_revers, cmap=cmap, interpolation='none', aspect='auto', vmin=0, vmax=1)
25 |
26 |
27 |
28 | # load the vector data of wind field
29 | excel_file_path_u2 = r'Meteorological analysis module\Meterorological Data\u2_new.xlsx'
30 | excel_file_path_v2 = r'Meteorological analysis module\Meterorological Data\v2_new.xlsx'
31 | df_u2 = pd.read_excel(excel_file_path_u2)
32 | df_v2 = pd.read_excel(excel_file_path_v2)
33 | array_u2 = df_u2.iloc[4:16, 12:24].values
34 | array_v2 = df_v2.iloc[4:16, 12:24].values
35 | list_u2 = [[round(num, 5) for num in row] for row in array_u2]
36 | list_v2 = [[round(num, 5) for num in row] for row in array_v2]
37 | u2_array = np.array(list_u2)
38 | v2_array = np.array(list_v2)
39 | u2_array_flipped = np.flipud(u2_array)
40 | v2_array_flipped = np.flipud(v2_array)
41 | for i in range(0, 12):
42 | for j in range(0, 12):
43 | u_value = u2_array_flipped[i, j]
44 | v_value = v2_array_flipped[i, j]
45 | if binary_array_revers[int(i / 12 * 300), int(j / 12 * 300)] == 1:
46 | start_point = (j / 12 * 300, i / 12 * 300)
47 | ax.quiver(start_point[0], start_point[1], 60*u_value, 60*v_value, color=(245 / 255, 245 / 255, 220 / 255), angles='xy', scale_units='xy', scale=4, width=0.008)
48 |
49 |
50 | # load the vector data of ocean current field
51 | excel_file_path_u = r'Meteorological analysis module\Meterorological Data\u_interpolated.xlsx'
52 | excel_file_path_v = r'Meteorological analysis module\Meterorological Data\v_interpolated.xlsx'
53 | df_u = pd.read_excel(excel_file_path_u)
54 | df_v = pd.read_excel(excel_file_path_v)
55 | array_u = df_u.iloc[4:16, 12:24].values
56 | array_v = df_v.iloc[4:16, 12:24].values
57 | list_u = [[round(num, 5) for num in row] for row in array_u]
58 | list_v = [[round(num, 5) for num in row] for row in array_v]
59 | u_array = np.array(list_u)
60 | v_array = np.array(list_v)
61 | u_array_flipped = np.flipud(u_array)
62 | v_array_flipped = np.flipud(v_array)
63 | for i in range(0, 12):
64 | for j in range(0, 12):
65 | u = u_array_flipped[i, j]
66 | v = v_array_flipped[i, j]
67 | if binary_array_revers[int(i / 12 * 300), int(j / 12 * 300)] == 1:
68 | start_point = (j / 12 * 300, i / 12 * 300)
69 | ax.quiver(start_point[0], start_point[1], 1200*u, 1200*v, color=(135 / 255, 206 / 255, 250 / 255), angles='xy', scale_units='xy', scale=4, width=0.008)
70 |
71 |
72 |
73 | excel_file1 = r'Path planning module\Experiments for Comparison\fusionDQN_x9522_y14992.xlsx' # fusion path, add your trainning path
74 | df1 = pd.read_excel(excel_file1)
75 | x1 = df1.iloc[:, 0]
76 | y1 = df1.iloc[:, 1]
77 | angle = df1.iloc[:, 2]
78 | tck, u = splprep([x1, y1], s=0)
79 | u_new = np.linspace(0, 1, 1000)
80 | xy_smooth = splev(u_new, tck)
81 | ax.plot(xy_smooth[0], xy_smooth[1], color=(255/255,0/255,0/255), linewidth=2)
82 |
83 |
84 |
85 |
86 | excel_file2 = r'Path planning module\Experiments for Comparison\DQN_x9522_y14992.xlsx' # DQN path, add your trainning file path
87 | df2 = pd.read_excel(excel_file2)
88 | x2 = df2.iloc[:, 4]
89 | y2 = df2.iloc[:, 5]
90 | angle = df2.iloc[:, 6]
91 | tck, u = splprep([x2, y2], s=0)
92 | u_new = np.linspace(0, 1, 1000)
93 | xy_smooth = splev(u_new, tck)
94 | ax.plot(xy_smooth[0], xy_smooth[1], color=(235/255,235/255,235/255), linewidth=2)
95 |
96 |
97 |
98 | excel_file_3 = r'Path planning module\Experiments for Comparison\Astar_x9522_y14992.xlsx' # Astar path, add your trainning file path
99 | df_3 = pd.read_excel(excel_file_3)
100 | x_3 = df_3.iloc[:, 0]
101 | y_3 = df_3.iloc[:, 1]
102 | tck_3, u_3 = splprep([x_3, y_3], s=0)
103 | u_3_new = np.linspace(0, 1, 1000)
104 | xy_3_smooth = splev(u_3_new, tck_3)
105 | ax.plot(xy_3_smooth[0], xy_3_smooth[1], color=(185/255,185/255,185/255), linewidth=2)
106 |
107 |
108 |
109 |
110 |
111 | # obstacles
112 | obstacle_centers = [(26, 175), (122, 102), (140, 161), (129, 56), (106, 10), (215, 154), (200, 16), (105, 33)]
113 | radius = [2, 4, 2, 3, 2, 2, 4, 3]
114 | for center, r in zip(obstacle_centers, radius):
115 | ax.add_patch(plt.Circle((center[0], center[1]), r, color=(212 / 255, 213 / 255, 214 / 255)))
116 |
117 | # agent
118 | agent_center = (95, 22)
119 | agent_radius = 2
120 | ax.add_patch(plt.Circle((agent_center[0], agent_center[1]), agent_radius, color=(0, 1, 0), zorder= 10))
121 |
122 |
123 | # goal point
124 | goal_center = (147, 88)
125 | goal_radius = 10
126 | ax.add_patch(plt.Circle((goal_center[0], goal_center[1]), goal_radius, color=(250 / 255, 109 / 255, 0),zorder= 9))
127 | plt.axis('off')
128 |
129 |
130 | left, right = 76, 174
131 | bottom, top = 10, 108
132 | ax.set_xlim(left, right)
133 | ax.set_ylim(bottom, top)
134 | # 在外边界添加黑色边框
135 | ax.add_patch(plt.Rectangle((left, bottom),
136 | right-left, top-bottom,
137 | fill=False, edgecolor='black', linewidth=5))
138 | plt.savefig(r'Path planning module\Experiments for Comparison\figure_comparison.eps')## replace it with your file path
139 | plt.savefig(r'Path planning module\Experiments for Comparison\figure_comparison.pdf')
140 | plt.savefig(r'Path planning module\Experiments for Comparison\figure_comparison.png')
141 |
142 | plt.show()
143 |
144 |
145 |
--------------------------------------------------------------------------------
/Path planning module/Experiments for Comparison/A_star/A_star_MLP (additional test)/path comparison (MLP).py:
--------------------------------------------------------------------------------
1 | from scipy import ndimage
2 | from PIL import Image, ImageDraw, ImageColor, ImageFont
3 | import numpy as np
4 | import pandas as pd
5 | import matplotlib.pyplot as plt
6 | from scipy.interpolate import splprep, splev
7 |
8 |
9 |
10 |
11 |
12 | fig, ax = plt.subplots(figsize=(3, 3))
13 | binary_map_data = np.loadtxt(r'Image processing module\Geographic Data\testmap_105108.txt', dtype=int)
14 | binary_map_data = np.array(binary_map_data)
15 | binary_array_revers = np.flipud(binary_map_data)
16 | ax.set_aspect('equal')
17 | ax.set_xlim(0, binary_array_revers.shape[1])
18 | ax.set_ylim(0, binary_array_revers.shape[0])
19 | color_map = {
20 | 0: [255, 228, 181], # rgb(255, 228, 181)
21 | 1: [25, 101, 149] # rgb(135, 206, 250)
22 | }
23 | cmap = plt.cm.colors.ListedColormap([[255/255, 228/255, 181/255], [25/255, 101/255, 149/255]])
24 | ax.imshow(binary_array_revers, cmap=cmap, interpolation='none', aspect='auto', vmin=0, vmax=1)
25 |
26 |
27 |
28 | # load the vector data of wind field
29 | excel_file_path_u2 = r'Meteorological analysis module\Meterorological Data\u2_new.xlsx'
30 | excel_file_path_v2 = r'Meteorological analysis module\Meterorological Data\v2_new.xlsx'
31 | df_u2 = pd.read_excel(excel_file_path_u2)
32 | df_v2 = pd.read_excel(excel_file_path_v2)
33 | array_u2 = df_u2.iloc[4:16, 12:24].values
34 | array_v2 = df_v2.iloc[4:16, 12:24].values
35 | list_u2 = [[round(num, 5) for num in row] for row in array_u2]
36 | list_v2 = [[round(num, 5) for num in row] for row in array_v2]
37 | u2_array = np.array(list_u2)
38 | v2_array = np.array(list_v2)
39 | u2_array_flipped = np.flipud(u2_array)
40 | v2_array_flipped = np.flipud(v2_array)
41 | for i in range(0, 12):
42 | for j in range(0, 12):
43 | u_value = u2_array_flipped[i, j]
44 | v_value = v2_array_flipped[i, j]
45 | if binary_array_revers[int(i / 12 * 300), int(j / 12 * 300)] == 1:
46 | start_point = (j / 12 * 300, i / 12 * 300)
47 | ax.quiver(start_point[0], start_point[1], 60*u_value, 60*v_value, color=(245 / 255, 245 / 255, 220 / 255), angles='xy', scale_units='xy', scale=4, width=0.008)
48 |
49 |
50 | # load the vector data of ocean current field
51 | excel_file_path_u = r'Meteorological analysis module\Meterorological Data\u_interpolated.xlsx'
52 | excel_file_path_v = r'Meteorological analysis module\Meterorological Data\v_interpolated.xlsx'
53 | df_u = pd.read_excel(excel_file_path_u)
54 | df_v = pd.read_excel(excel_file_path_v)
55 | array_u = df_u.iloc[4:16, 12:24].values
56 | array_v = df_v.iloc[4:16, 12:24].values
57 | list_u = [[round(num, 5) for num in row] for row in array_u]
58 | list_v = [[round(num, 5) for num in row] for row in array_v]
59 | u_array = np.array(list_u)
60 | v_array = np.array(list_v)
61 | u_array_flipped = np.flipud(u_array)
62 | v_array_flipped = np.flipud(v_array)
63 | for i in range(0, 12):
64 | for j in range(0, 12):
65 | u = u_array_flipped[i, j]
66 | v = v_array_flipped[i, j]
67 | if binary_array_revers[int(i / 12 * 300), int(j / 12 * 300)] == 1:
68 | start_point = (j / 12 * 300, i / 12 * 300)
69 | ax.quiver(start_point[0], start_point[1], 1200*u, 1200*v, color=(135 / 255, 206 / 255, 250 / 255), angles='xy', scale_units='xy', scale=4, width=0.008)
70 |
71 |
72 | # fusion
73 | excel_file1 = r'Path planning module\Experiments for Comparison\fusionDQN_x9522_y14992.xlsx' # add your trainning path
74 | df1 = pd.read_excel(excel_file1)
75 | x1 = df1.iloc[:, 0]
76 | y1 = df1.iloc[:, 1]
77 | angle = df1.iloc[:, 2]
78 | tck, u = splprep([x1, y1], s=0)
79 | u_new = np.linspace(0, 1, 1000)
80 | xy_smooth = splev(u_new, tck)
81 | ax.plot(xy_smooth[0], xy_smooth[1], color=(255/255,0/255,0/255), linewidth=2)
82 |
83 |
84 |
85 | # DQN
86 | excel_file2 = r'Path planning module\Experiments for Comparison\DQN_x9522_y14992.xlsx' # add your trainning file path
87 | df2 = pd.read_excel(excel_file2)
88 | x2 = df2.iloc[:, 4]
89 | y2 = df2.iloc[:, 5]
90 | angle = df2.iloc[:, 6]
91 | tck, u = splprep([x2, y2], s=0)
92 | u_new = np.linspace(0, 1, 1000)
93 | xy_smooth = splev(u_new, tck)
94 | ax.plot(xy_smooth[0], xy_smooth[1], color=(235/255,235/255,235/255), linewidth=2)
95 |
96 |
97 | # Astar
98 | excel_file_3 = r'Path planning module\Experiments for Comparison\Astar_x9522_y14992.xlsx' # add your trainning file path
99 | df_3 = pd.read_excel(excel_file_3)
100 | x_3 = df_3.iloc[:, 0]
101 | y_3 = df_3.iloc[:, 1]
102 | tck_3, u_3 = splprep([x_3, y_3], s=0)
103 | u_3_new = np.linspace(0, 1, 1000)
104 | xy_3_smooth = splev(u_3_new, tck_3)
105 | ax.plot(xy_3_smooth[0], xy_3_smooth[1], color=(185/255,185/255,185/255), linewidth=2)
106 |
107 | # Astar_MLP
108 | excel_file_4 = r'Path planning module\Experiments for Comparison\A_star\A_star_MLP (additional test)\Astar_MLP_test.xlsx' # add your trainning file path
109 | df_4 = pd.read_excel(excel_file_4)
110 | x_4 = df_4.iloc[:, 0]
111 | y_4 = df_4.iloc[:, 1]
112 | tck_4, u_4 = splprep([x_4, y_4], s=0)
113 | u_4_new = np.linspace(0, 1, 1000)
114 | xy_4_smooth = splev(u_4_new, tck_4)
115 | ax.plot(xy_4_smooth[0], xy_4_smooth[1], color=(190/255,220/255,170/255), linewidth=2)
116 |
117 |
118 |
119 |
120 | # obstacles
121 | obstacle_centers = [(26, 175), (122, 102), (140, 161), (129, 56), (106, 10), (215, 154), (200, 16), (105, 33)]
122 | radius = [2, 4, 2, 3, 2, 2, 4, 3]
123 | for center, r in zip(obstacle_centers, radius):
124 | ax.add_patch(plt.Circle((center[0], center[1]), r, color=(212 / 255, 213 / 255, 214 / 255)))
125 |
126 | # agent
127 | agent_center = (95, 22)
128 | agent_radius = 2
129 | ax.add_patch(plt.Circle((agent_center[0], agent_center[1]), agent_radius, color=(0, 1, 0), zorder= 10))
130 |
131 |
132 | # goal point
133 | goal_center = (147, 88)
134 | goal_radius = 10
135 | ax.add_patch(plt.Circle((goal_center[0], goal_center[1]), goal_radius, color=(250 / 255, 109 / 255, 0),zorder= 9))
136 | plt.axis('off')
137 |
138 |
139 | left, right = 76, 174
140 | bottom, top = 10, 108
141 | ax.set_xlim(left, right)
142 | ax.set_ylim(bottom, top)
143 | # 在外边界添加黑色边框
144 | ax.add_patch(plt.Rectangle((left, bottom),
145 | right-left, top-bottom,
146 | fill=False, edgecolor='black', linewidth=5))
147 | plt.savefig(r'Path planning module\Experiments for Comparison\A_star\A_star_MLP (additional test)\MLP_comparison.eps')
148 | plt.savefig(r'Path planning module\Experiments for Comparison\A_star\A_star_MLP (additional test)\MLP_comparison.pdf')
149 | plt.savefig(r'Path planning module\Experiments for Comparison\A_star\A_star_MLP (additional test)\MLP_comparison.png')
150 |
151 | plt.show()
152 |
153 |
154 |
--------------------------------------------------------------------------------
/Path planning module/Experiments for Testing Generalization Ability/different_stage_path.py:
--------------------------------------------------------------------------------
1 | from scipy import ndimage
2 | from PIL import Image, ImageDraw, ImageColor, ImageFont
3 | import numpy as np
4 | import pandas as pd
5 | import matplotlib.pyplot as plt
6 | from scipy.interpolate import splprep, splev
7 |
8 |
9 |
10 |
11 |
12 | # ECDIS
13 | # #replace it with your multimodal characteristic marine environment
14 | fig, ax = plt.subplots(figsize=(3, 3))
15 | binary_map_data = np.loadtxt(r'Image processing module\Geographic Data\testmap_105108.txt', dtype=int)
16 | binary_map_data = np.array(binary_map_data)
17 | binary_array_revers = np.flipud(binary_map_data)
18 | ax.set_aspect('equal')
19 | ax.set_xlim(0, binary_array_revers.shape[1])
20 | ax.set_ylim(0, binary_array_revers.shape[0])
21 | color_map = {
22 | 0: [255, 228, 181], # rgb(255, 228, 181)
23 | 1: [25, 101, 149] # rgb(135, 206, 250)
24 | }
25 | cmap = plt.cm.colors.ListedColormap([[255/255, 228/255, 181/255], [25/255, 101/255, 149/255]])
26 | ax.imshow(binary_array_revers, cmap=cmap, interpolation='none', aspect='auto', vmin=0, vmax=1)
27 |
28 |
29 |
30 | # wind vector field
31 | # #replace it with your multimodal characteristic marine environment
32 | excel_file_path_u2 = r'Meteorological analysis module\Meterorological Data\u2_new.xlsx'
33 | excel_file_path_v2 = r'Meteorological analysis module\Meterorological Data\v2_new.xlsx'
34 | df_u2 = pd.read_excel(excel_file_path_u2)
35 | df_v2 = pd.read_excel(excel_file_path_v2)
36 | array_u2 = df_u2.iloc[4:16, 12:24].values
37 | array_v2 = df_v2.iloc[4:16, 12:24].values
38 | list_u2 = [[round(num, 5) for num in row] for row in array_u2]
39 | list_v2 = [[round(num, 5) for num in row] for row in array_v2]
40 | u2_array = np.array(list_u2)
41 | v2_array = np.array(list_v2)
42 | u2_array_flipped = np.flipud(u2_array)
43 | v2_array_flipped = np.flipud(v2_array)
44 | for i in range(0, 12):
45 | for j in range(0, 12):
46 | u_value = u2_array_flipped[i, j]
47 | v_value = v2_array_flipped[i, j]
48 | if binary_array_revers[int(i / 12 * 300), int(j / 12 * 300)] == 1:
49 | start_point = (j / 12 * 300, i / 12 * 300)
50 | ax.quiver(start_point[0], start_point[1], 60*u_value, 60*v_value, color=(245 / 255, 245 / 255, 220 / 255), angles='xy', scale_units='xy', scale=4, width=0.008)
51 |
52 |
53 | # ocean current vector field
54 | # replace it with your trainning file path
55 | excel_file_path_u = r'Meteorological analysis module\Meterorological Data\u_interpolated.xlsx'
56 | excel_file_path_v = r'Meteorological analysis module\Meterorological Data\v_interpolated.xlsx'
57 | df_u = pd.read_excel(excel_file_path_u)
58 | df_v = pd.read_excel(excel_file_path_v)
59 | array_u = df_u.iloc[4:16, 12:24].values
60 | array_v = df_v.iloc[4:16, 12:24].values
61 | list_u = [[round(num, 5) for num in row] for row in array_u]
62 | list_v = [[round(num, 5) for num in row] for row in array_v]
63 | u_array = np.array(list_u)
64 | v_array = np.array(list_v)
65 | u_array_flipped = np.flipud(u_array)
66 | v_array_flipped = np.flipud(v_array)
67 | for i in range(0, 12):
68 | for j in range(0, 12):
69 | u = u_array_flipped[i, j]
70 | v = v_array_flipped[i, j]
71 | if binary_array_revers[int(i / 12 * 300), int(j / 12 * 300)] == 1:
72 | start_point = (j / 12 * 300, i / 12 * 300)
73 | ax.quiver(start_point[0], start_point[1], 1200*u, 1200*v, color=(135 / 255, 206 / 255, 250 / 255), angles='xy', scale_units='xy', scale=4, width=0.008)
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 | # 33%
83 | # replace your trainning file path
84 | excel_file1 = r'Path planning module\Experiments for Testing Generalization Ability\fusionDQN_trainning_process_firststage_x9522_y14992.xlsx' # 替换为你的 Excel 文件路径
85 | df1 = pd.read_excel(excel_file1)
86 | x1 = df1.iloc[:, 0]
87 | y1 = df1.iloc[:, 1]
88 | angle = df1.iloc[:, 2]
89 | tck, u = splprep([x1, y1], s=0)
90 | u_new = np.linspace(0, 1, 1000)
91 | xy_smooth = splev(u_new, tck)
92 | ax.plot(xy_smooth[0], xy_smooth[1], color=(225/255,226/255,228/255), linewidth=1.8,linestyle='--')
93 |
94 | # 67%
95 | # replace your trainning file path
96 | excel_file1 = r'Path planning module\Experiments for Testing Generalization Ability\fusionDQN_trainning_process_secondstage_x9522_y14992.xlsx' # 替换为你的 Excel 文件路径
97 | df1 = pd.read_excel(excel_file1)
98 | x1 = df1.iloc[:, 0]
99 | y1 = df1.iloc[:, 1]
100 | angle = df1.iloc[:, 2]
101 | tck, u = splprep([x1, y1], s=0)
102 | u_new = np.linspace(0, 1, 1000)
103 | xy_smooth = splev(u_new, tck)
104 | ax.plot(xy_smooth[0], xy_smooth[1], color=(225/255,226/255,228/255), linewidth=1.8,linestyle='--')
105 |
106 |
107 | # 100%
108 | # replace your trainning file path
109 | excel_file1 = r'Path planning module\Experiments for Testing Generalization Ability\fusionDQN_trainning_process_thirdstage_x9522_y14992.xlsx' # 替换为你的 Excel 文件路径
110 | df1 = pd.read_excel(excel_file1)
111 | x1 = df1.iloc[:, 0]
112 | y1 = df1.iloc[:, 1]
113 | angle = df1.iloc[:, 2]
114 | tck, u = splprep([x1, y1], s=0)
115 | u_new = np.linspace(0, 1, 1000)
116 | xy_smooth = splev(u_new, tck)
117 | ax.plot(xy_smooth[0], xy_smooth[1], color=(255/255,0/255,0/255), linewidth=2.3)
118 |
119 |
120 |
121 | # obstacles
122 | obstacle_centers = [(26, 175), (122, 102), (140, 161), (129, 56), (106, 10), (215, 154), (200, 16), (105, 33)]
123 | radius = [2, 4, 2, 3, 2, 2, 4, 3]
124 | for center, r in zip(obstacle_centers, radius):
125 | ax.add_patch(plt.Circle((center[0], center[1]), r, color=(212 / 255, 213 / 255, 214 / 255)))
126 |
127 | # agent
128 | agent_center = (95, 22)
129 | agent_radius = 2
130 | ax.add_patch(plt.Circle((agent_center[0], agent_center[1]), agent_radius, color=(0, 1, 0), zorder= 10))
131 |
132 |
133 | # goal
134 | goal_center = (147, 88)
135 | goal_radius = 10
136 | ax.add_patch(plt.Circle((goal_center[0], goal_center[1]), goal_radius, color=(250 / 255, 109 / 255, 0),zorder= 9))
137 | plt.axis('off')
138 |
139 |
140 |
141 | left, right = 76, 174
142 | bottom, top = 10, 108
143 | ax.set_xlim(left, right)
144 | ax.set_ylim(bottom, top)
145 |
146 |
147 | ax.add_patch(plt.Rectangle((left, bottom),
148 | right-left, top-bottom,
149 | fill=False, edgecolor='black', linewidth=5))
150 | # replace your file path
151 | plt.savefig(r'Path planning module\Experiments for Testing Generalization Ability\path_trainning_process_visualization.eps', format='eps')
152 | plt.savefig(r'path planning module\Experiments for Testing Generalization Ability\path_trainning_process_visualization.pdf', format='pdf')
153 | plt.savefig(r'path planning module\Experiments for Testing Generalization Ability\path_trainning_process_visualization.jpg', format='jpg')
154 | plt.show()
155 |
156 |
--------------------------------------------------------------------------------
/Path planning module/Experiments for Comparison/A_star/A_star_basic.py:
--------------------------------------------------------------------------------
1 | """
2 |
3 | A* grid planning
4 |
5 | author: Atsushi Sakai(@Atsushi_twi)
6 | Nikos Kanargias (nkana@tee.gr)
7 |
8 |
9 | See Wikipedia article (https://en.wikipedia.org/wiki/A*_search_algorithm)
10 |
11 | """
12 |
13 | import math
14 | import time
15 |
16 | import matplotlib.pyplot as plt
17 |
18 | show_animation = True
19 |
20 |
21 | class AStarPlanner:
22 |
23 | def __init__(self, ox, oy, resolution, rr):
24 | """
25 | Initialize grid map for a star planning
26 |
27 | ox: x position list of Obstacles [m]
28 | oy: y position list of Obstacles [m]
29 | resolution: grid resolution [m],
30 | rr: robot radius[m]
31 | """
32 |
33 | self.resolution = resolution
34 | self.rr = rr
35 | self.min_x, self.min_y = 0, 0
36 | self.max_x, self.max_y = 0, 0
37 | self.obstacle_map = None
38 | self.x_width, self.y_width = 0, 0
39 | self.motion = self.get_motion_model()
40 | self.calc_obstacle_map(ox, oy)
41 |
42 | class Node:
43 | def __init__(self, x, y, cost, parent_index):
44 | self.x = x # index of grid
45 | self.y = y # index of grid
46 | self.cost = cost
47 | self.parent_index = parent_index
48 |
49 | def __str__(self):
50 | return str(self.x) + "," + str(self.y) + "," + str(
51 | self.cost) + "," + str(self.parent_index)
52 |
53 | def planning(self, sx, sy, gx, gy):#起点坐标,目标点坐标
54 | """
55 | A star path search
56 | input:
57 | s_x: start x position [m]
58 | s_y: start y position [m]
59 | gx: goal x position [m]
60 | gy: goal y position [m]
61 |
62 | output:
63 | rx: x position list of the final path
64 | ry: y position list of the final path
65 | """
66 | start_node = self.Node(self.calc_xy_index(sx, self.min_x),
67 | self.calc_xy_index(sy, self.min_y), 0.0, -1)
68 | goal_node = self.Node(self.calc_xy_index(gx, self.min_x),
69 | self.calc_xy_index(gy, self.min_y), 0.0, -1)
70 | open_set, closed_set = dict(), dict()
71 | open_set[self.calc_grid_index(start_node)] = start_node
72 |
73 | while 1:
74 | if len(open_set) == 0:
75 | print("Open set is empty..")
76 | break
77 | c_id = min(
78 | open_set,
79 | key=lambda o: open_set[o].cost + self.calc_heuristic(goal_node, open_set[o]))
80 | current = open_set[c_id]
81 |
82 | # show graph
83 | if show_animation: # pragma: no cover
84 | plt.plot(self.calc_grid_position(current.x, self.min_x),
85 | self.calc_grid_position(current.y, self.min_y), "xc")
86 | # for stopping simulation with the esc key.
87 | plt.gcf().canvas.mpl_connect('key_release_event', lambda event: [exit(0) if event.key == 'escape' else None])
88 | if len(closed_set.keys()) % 10 == 0:
89 | plt.pause(0.001)
90 |
91 | if current.x == goal_node.x and current.y == goal_node.y:
92 | print("Find goal")
93 | goal_node.parent_index = current.parent_index
94 | goal_node.cost = current.cost
95 | break
96 |
97 | # Remove the item from the open set
98 | del open_set[c_id]
99 |
100 | # Add it to the closed set
101 | closed_set[c_id] = current
102 |
103 | # expand_grid search grid based on motion model
104 | for i, _ in enumerate(self.motion):
105 | node = self.Node(current.x + self.motion[i][0],
106 | current.y + self.motion[i][1],
107 | current.cost + self.motion[i][2], c_id)
108 | n_id = self.calc_grid_index(node)
109 |
110 | # If the node is not safe, do nothing
111 | if not self.verify_node(node):
112 | continue
113 |
114 | if n_id in closed_set:
115 | continue
116 |
117 | if n_id not in open_set:
118 | open_set[n_id] = node # discovered a new node
119 | else:
120 | if open_set[n_id].cost > node.cost:
121 | # This path is the best until now. record it
122 | open_set[n_id] = node
123 |
124 | rx, ry = self.calc_final_path(goal_node, closed_set)
125 |
126 | return rx, ry
127 |
128 | def calc_final_path(self, goal_node, closed_set):
129 | # generate final course
130 | rx, ry = [self.calc_grid_position(goal_node.x, self.min_x)], [
131 | self.calc_grid_position(goal_node.y, self.min_y)]
132 | parent_index = goal_node.parent_index
133 | while parent_index != -1:
134 | n = closed_set[parent_index]
135 | rx.append(self.calc_grid_position(n.x, self.min_x))
136 | ry.append(self.calc_grid_position(n.y, self.min_y))
137 | parent_index = n.parent_index
138 |
139 | return rx, ry
140 |
141 | @staticmethod
142 | def calc_heuristic(n1, n2):
143 | """
144 | Args:
145 | n1 (_type_): _description_
146 | n2 (_type_): _description_
147 |
148 | Returns:
149 | _type_: _description_
150 | """
151 | w = 1.0 # weight of heuristic
152 | d = w * math.hypot(n1.x - n2.x, n1.y - n2.y)
153 | return d
154 |
155 | def calc_grid_position(self, index, min_position):
156 | """
157 | calc grid position
158 |
159 | :param index:
160 | :param min_position:
161 | :return:
162 | """
163 | pos = index * self.resolution + min_position
164 | return pos
165 |
166 | def calc_xy_index(self, position, min_pos):
167 | return round((position - min_pos) / self.resolution)
168 |
169 | def calc_grid_index(self, node):
170 | return (node.y - self.min_y) * self.x_width + (node.x - self.min_x)
171 |
172 | def verify_node(self, node):
173 | px = self.calc_grid_position(node.x, self.min_x)
174 | py = self.calc_grid_position(node.y, self.min_y)
175 |
176 | if px < self.min_x:
177 | return False
178 | elif py < self.min_y:
179 | return False
180 | elif px >= self.max_x:
181 | return False
182 | elif py >= self.max_y:
183 | return False
184 |
185 | # collision check
186 | if self.obstacle_map[node.x][node.y]:
187 | return False
188 |
189 | return True
190 |
191 | def calc_obstacle_map(self, ox, oy):
192 | self.min_x = round(min(ox))
193 | self.min_y = round(min(oy))
194 | self.max_x = round(max(ox))
195 | self.max_y = round(max(oy))
196 | print("min_x:", self.min_x)
197 | print("min_y:", self.min_y)
198 | print("max_x:", self.max_x)
199 | print("max_y:", self.max_y)
200 |
201 | self.x_width = round((self.max_x - self.min_x) / self.resolution)
202 | self.y_width = round((self.max_y - self.min_y) / self.resolution)
203 | print("x_width:", self.x_width)
204 | print("y_width:", self.y_width)
205 |
206 | # obstacle map generation
207 | self.obstacle_map = [[False for _ in range(self.y_width)]
208 | for _ in range(self.x_width)]
209 | for ix in range(self.x_width):
210 | x = self.calc_grid_position(ix, self.min_x)
211 | for iy in range(self.y_width):
212 | y = self.calc_grid_position(iy, self.min_y)
213 | for iox, ioy in zip(ox, oy):
214 | d = math.hypot(iox - x, ioy - y)
215 | if d <= self.rr:
216 | self.obstacle_map[ix][iy] = True
217 | break
218 |
219 | @staticmethod
220 | def get_motion_model():
221 | # dx, dy, cost
222 | motion = [[1, 0, 1],
223 | [0, 1, 1],
224 | [-1, 0, 1],
225 | [0, -1, 1],
226 | [-1, -1, math.sqrt(2)],
227 | [-1, 1, math.sqrt(2)],
228 | [1, -1, math.sqrt(2)],
229 | [1, 1, math.sqrt(2)]]
230 |
231 | return motion
232 |
233 |
234 | def main():
235 | print(__file__ + " start!!")
236 |
237 | # start and goal position
238 | sx = 10.0 # [m]
239 | sy = 10.0 # [m]
240 | gx = 50.0 # [m]
241 | gy = 50.0 # [m]
242 | grid_size = 2.0 # [m]
243 | robot_radius = 1.0 # [m]
244 |
245 | # set obstacle positions
246 | ox, oy = [], []
247 | for i in range(-10, 60):
248 | ox.append(i)
249 | oy.append(-10.0)
250 | for i in range(-10, 60):
251 | ox.append(60.0)
252 | oy.append(i)
253 | for i in range(-10, 61):
254 | ox.append(i)
255 | oy.append(60.0)
256 | for i in range(-10, 61):
257 | ox.append(-10.0)
258 | oy.append(i)
259 | for i in range(-10, 40):
260 | ox.append(20.0)
261 | oy.append(i)
262 | for i in range(0, 40):
263 | ox.append(40.0)
264 | oy.append(60.0 - i)
265 |
266 | if show_animation: # pragma: no cover #
267 | plt.plot(ox, oy, ".k")
268 | plt.plot(sx, sy, "og")
269 | plt.plot(gx, gy, "xb")
270 | plt.grid(True)
271 | plt.axis("equal")
272 |
273 |
274 | start_time = time.time()
275 |
276 | a_star = AStarPlanner(ox, oy, grid_size, robot_radius)
277 | rx, ry = a_star.planning(sx, sy, gx, gy)
278 | end_time = time.time()
279 | dtime = end_time - start_time
280 | print("running time: %.8s s" % dtime)
281 |
282 | if show_animation: # pragma: no cover
283 | plt.plot(rx, ry, "-r")
284 | plt.pause(0.001)
285 | plt.show()
286 |
287 |
288 | if __name__ == '__main__':
289 | main()
290 |
291 |
--------------------------------------------------------------------------------
/Path planning module/Experiments for Comparison/A_star/grid_with_circles.txt:
--------------------------------------------------------------------------------
1 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
2 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0
3 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0
4 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0
5 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0
6 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0
7 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0
8 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0
9 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
10 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
11 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
12 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
13 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
14 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
15 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
16 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
17 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
18 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
19 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
20 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
21 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
22 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
23 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
24 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
25 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
26 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
27 | 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
28 | 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
29 | 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
30 | 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
31 | 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
32 | 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
33 | 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
34 | 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
35 | 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
36 | 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
37 | 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
38 | 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
39 | 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
40 | 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
41 | 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
42 | 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
43 | 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
44 | 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
45 | 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
46 | 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
47 | 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
48 | 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
49 | 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
50 | 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
51 | 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
52 | 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
53 | 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
54 | 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
55 | 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
56 | 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
57 | 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
58 | 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
59 | 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
60 | 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
61 | 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
62 | 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
63 | 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
64 | 0 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
65 | 0 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
66 | 0 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
67 | 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
68 | 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
69 | 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
70 | 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
71 | 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
72 | 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
73 | 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
74 | 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
75 | 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
76 | 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
77 | 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
78 | 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
79 | 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
80 | 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0
81 | 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
82 |
--------------------------------------------------------------------------------
/Image processing module/step1_ECDIS.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 | import matplotlib.ticker as mticker
4 | import matplotlib as mpl
5 | import pickle
6 | import cartopy.crs as ccrs
7 | import geopandas as gpd
8 | import shapefile
9 | from rasterio.features import rasterize
10 | import rasterio
11 |
12 |
13 | from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
14 | from matplotlib import pyplot as plt
15 | from shapely.geometry import shape, Polygon
16 |
17 | colour_dic = {'1': "white", '2': "black", '3': "red", '4': "green", '5': "blue", '6': "yellow", '7': "grey",
18 | '8': "brown", '9': "amber", '10': "violet", '11': "orange", '12': "magenta", '13': "pink"}
19 | colour_luminance_dic = {'0': "black", '1': "darkgray", '2': "mediumgray", '3': "lightgray", '4': "palegray", '5': "white"}
20 | pattern_dic = {'1': "-----", '2': "|||||", '3': r"\\" + "\\", '4': "xxxxx", '5': ".....", '6': "-----+"}
21 | boyshp_dic = {'1': "^", '2': "v", '3': "o", '4': "|", '5': "s", '6': "D", '7': "P", '8': "X"}
22 |
23 |
24 | mpl.rcParams["font.family"] = 'Arial'
25 | mpl.rcParams["mathtext.fontset"] = 'cm'
26 | mpl.rcParams["font.size"] = 12
27 | mpl.rcParams["axes.linewidth"] = 1
28 |
29 |
30 | class map_enc_charts:
31 | def __init__(self, region, pixel_size, folder_path):
32 | self.region = region
33 | self.pixel_size = pixel_size
34 | self.folder_path = folder_path
35 |
36 | def enc_2d_init(self):
37 | shape_reader_files, gpd_read_files = self.enc_file_layer()
38 | self.enc_obstacle_areas_analysis_layer(gpd_read_files)
39 | # surfaces_dict, features_dict = self.enc_data_analysis_layer(shape_reader_files)
40 | # ax = self.enc_render_layer(surfaces_dict, features_dict)
41 | # obstacle_mask, geo_polygons = self.enc_obstacle_areas_analysis_layer(gpd_read_files)
42 | # return ax, obstacle_mask, geo_polygons
43 |
44 | def enc_file_layer(self):
45 | shape_reader_files = []
46 | gpd_read_files = []
47 | for file_name in os.listdir(self.folder_path):
48 | if file_name.endswith('.shp'):
49 | file_path = os.path.join(self.folder_path, file_name)
50 | shape_reader_files.append(shapefile.Reader(file_path))
51 | gpd_read_files.append(gpd.read_file(file_path))
52 |
53 | return shape_reader_files, gpd_read_files
54 |
55 | @staticmethod
56 | def enc_data_analysis_layer(shape_reader_files):
57 | surface_dict = {}
58 | features_dict = {}
59 |
60 | for shape_reader_file in shape_reader_files:
61 | field_names = [field[0] for field in shape_reader_file.fields[1:]]
62 | for shape_record in shape_reader_file.iterShapeRecords():
63 | if shape_record.shape.shapeType == 0:
64 | continue
65 | shape_geom = shape(shape_record.shape.__geo_interface__)
66 | if shape_geom.is_valid:
67 | attributes = dict(zip(field_names, shape_record.record))
68 | feature = (shape_geom, attributes)
69 | grup = attributes.get('GRUP')
70 | layer = attributes.get('OBJL')
71 | if grup == 1:
72 | if layer not in surface_dict:
73 | surface_dict[layer] = []
74 | surface_dict[layer].append(feature)
75 | elif grup == 2:
76 | if layer not in features_dict:
77 | features_dict[layer] = {}
78 | feature_type = attributes.get('OBJNAM', 'unknown')
79 | if feature_type not in features_dict[layer]:
80 | features_dict[layer][feature_type] = []
81 | features_dict[layer][feature_type].append(feature)
82 |
83 | return surface_dict, features_dict
84 |
85 | def enc_render_layer(self, surfaces_dict, features_dict):
86 | ax = plt.axes(projection=ccrs.PlateCarree())
87 | for surface_layer in surfaces_dict:
88 | for shape_geom, attributes in surfaces_dict[surface_layer]:
89 | if shape_geom.geom_type == "Polygon":
90 | if surface_layer == 71:
91 | ax.add_geometries([shape_geom], crs=ccrs.PlateCarree(), edgecolor="black", alpha=0.8,
92 | facecolor='navajowhite')
93 | else:
94 | ax.add_geometries([shape_geom], crs=ccrs.PlateCarree(), edgecolor="black", alpha=0.5,
95 | facecolor='lightskyblue')
96 | else:
97 | print("contains other surface info, but do not figure. please optimize the code!")
98 | return
99 |
100 | for features_layer in features_dict:
101 | for features in features_dict[features_layer]:
102 | for shape_geom, attributes in features_dict[features_layer][features]:
103 | color, luminance, pattern = self.get_colour(attributes.get("COLOUR", ""),
104 | attributes.get("COLPAT", ""))
105 | if shape_geom.geom_type == "Polygon":
106 | ax.add_geometries([shape_geom], crs=ccrs.PlateCarree(),
107 | edgecolor="black", facecolor=color, alpha=0.5)
108 | elif shape_geom.geom_type == "LineString":
109 | ax.add_geometries([shape_geom], crs=ccrs.PlateCarree(),
110 | edgecolor=color, facecolor="None", alpha=1)
111 | elif shape_geom.geom_type == "Point":
112 | marker = attributes.get('BOYSHP', "")
113 | if marker != "":
114 | marker = boyshp_dic[str(marker)]
115 | else:
116 | marker = "o"
117 | point = shape_geom.coords[0]
118 | ax.plot(point[0], point[1], marker=marker, markersize=3, c=color, alpha=1,
119 | transform=ccrs.PlateCarree())
120 |
121 |
122 |
123 | # -----------Add latitude and longitude---------------------------------------
124 | ax.coastlines(resolution='50m')
125 |
126 | gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True, linewidth=0.8, color='k', alpha=0.3, linestyle='--')
127 | gl.top_labels = False
128 | gl.right_labels = False
129 | gl.xformatter = LONGITUDE_FORMATTER
130 | gl.yformatter = LATITUDE_FORMATTER
131 | gl.xlocator = mticker.FixedLocator(np.arange(self.region[0] - 0.5, self.region[1] + 0.5, 1))
132 | gl.ylocator = mticker.FixedLocator(np.arange(self.region[2] - 0.5, self.region[3] + 0.5, 1))
133 | gl.xlabel_style = {'size': 9}
134 | gl.ylabel_style = {'size': 9}
135 | ax.set_extent(self.region)
136 | ax.patch.set_facecolor('#f0f0f0')
137 | # plt.show()
138 |
139 | return ax
140 |
141 | @staticmethod
142 | def get_colour(colour, pattern):
143 | color, luminance, patt = "None", "lightgray", "....."
144 | if len(colour) == 1:
145 | color = colour_dic[colour[0]]
146 | elif len(colour) > 1:
147 | colour = colour.split(',')
148 | color = colour_dic[colour[0]]
149 | luminance = colour_luminance_dic[colour[1]]
150 |
151 | if pattern != '':
152 | patt = pattern_dic[pattern]
153 |
154 | return color, luminance, patt
155 |
156 | # +++++++++++++++++++++++++ build env including obstacle and bound +++++++++++++++++++++++++++++++++++
157 | def enc_obstacle_areas_analysis_layer(self, gpd_read_files):
158 | map_height = int((self.region[3] - self.region[2]) / self.pixel_size)
159 | map_width = int((self.region[1] - self.region[0]) / self.pixel_size)
160 | mask = np.zeros([map_width, map_height], dtype=np.uint8)
161 |
162 | # Rasterization is the process of converting vectors (i.e., points, lines, and surfaces) into rasters. A point will become the center of the pixel; A line will fill the width with pixel values, while the polygon will be drawn entirely in pixels.
163 | polygons = []
164 | max_polygon = None
165 | for read_file in gpd_read_files:
166 | if read_file.geometry.type[0] not in ['Polygon', 'Point']:
167 | continue
168 | for index, row in read_file[read_file.geometry.type == 'Polygon'].iterrows():
169 | if row['GRUP'] != 1 or row['OBJL'] == 42:
170 | continue
171 |
172 |
173 |
174 | polygons.append(row['geometry'])
175 |
176 | for index, row in read_file[read_file.geometry.type == 'Point'].iterrows():
177 | obj_name = row.get("OBJNAM", "")
178 | if row['GRUP'] != 2 or obj_name is None or obj_name == "":
179 | continue
180 | lat, lon = int((row['geometry'].y - self.region[2]) / self.pixel_size), \
181 | int((row['geometry'].x - self.region[0]) / self.pixel_size)
182 | try:
183 | mask[lat, lon] = True
184 | except IndexError:
185 | pass
186 |
187 |
188 | # The polygon geometry object collection is converted to raster data, and the mask two-dimensional array is output
189 | shapes = [(geom, 1) for geom in polygons]
190 | mask = rasterize(shapes, out_shape=(map_height, map_width),
191 | transform=rasterio.transform.from_bounds(self.region[0], self.region[3], self.region[1],
192 | self.region[2], map_width, map_height))
193 | mask = mask[::-1, :]
194 |
195 |
196 | # print(f"Binary grid data saved to {OUTPUT_FILE_PATH}")
197 | plt.xticks([])
198 | plt.yticks([])
199 | plt.imshow(mask, cmap='binary')
200 | plt.show()
201 |
202 | return mask, polygons
203 |
204 |
205 | def main():
206 |
207 | my_instance = map_enc_charts([105,108,-4,-1], 0.01, "D:\Desktop\China_South_Sea\China_South_Sea")
208 | #There is no permission to disclose this file, so some test data will be used in subsequent experiments(Results may be biased)
209 | #Can be replaced with electronic charts for different regions
210 | my_instance.enc_2d_init()
211 | my_instance.enc_file_layer()
212 | my_instance.enc_obstacle_areas_analysis_layer()
213 |
214 | if __name__ == "__main__":
215 | main()
--------------------------------------------------------------------------------
/Path planning module/Experiments for Comparison/A_star/A_star.py:
--------------------------------------------------------------------------------
1 | """
2 | original A* grid planning
3 | author: Atsushi Sakai(@Atsushi_twi)
4 | Nikos Kanargias (nkana@tee.gr)
5 | See Wikipedia article (https://en.wikipedia.org/wiki/A*_search_algorithm)
6 |
7 | We make slight changes to the A* algorithm as a benchmark for comparison.
8 | 1. Specifically, we introduce a new obstacle environment and action space.
9 | 2. However, it should be noted that Multimodal_characteristics_Marine_environment.py is a continuous environment,
10 | while the A_star.py is a discrete grid, and we need to adjust the the speed of agent and map resolution (gird size) appropriately.
11 | 3. The influence of meteorological factors should be considered and superimposed when the A* path is visualized (in demo1_3_algorithm_comparison.py).
12 |
13 | """
14 | import pandas as pd
15 | import math
16 | import time
17 | import matplotlib.pyplot as plt
18 | import numpy as np
19 | show_animation = True
20 |
21 |
22 | #The following data is the meteorological information corresponding to the test area and can be replaced
23 | excel_file_path1 = r'Meteorological analysis module\Meterorological Data\u2_new.xlsx'
24 | excel_file_path2 = r'Meteorological analysis module\Meterorological Data\v2_new.xlsx'
25 | df1 = pd.read_excel(excel_file_path1)
26 | df2 = pd.read_excel(excel_file_path2)
27 | array1 = df1.iloc[4:16, 12:24].values#21*37 5*5 12*12
28 | array2 = df2.iloc[4:16, 12:24].values
29 | list_u2 = [[round(num, 5) for num in row] for row in array1]
30 | list_v2 = [[round(num, 5) for num in row] for row in array2]
31 | u2 = np.loadtxt(r"Path planning module\Experiments for Comparison\A_star\meteorological data\u2.txt")
32 | v2 = np.loadtxt(r"Path planning module\Experiments for Comparison\A_star\meteorological data\v2.txt")
33 |
34 | #Vector field - ocean current field
35 | #The following data is the meteorological information corresponding to the test area and can be replaced
36 | excel_file_path_u = r'Meteorological analysis module\Meterorological Data\u_interpolated.xlsx'
37 | excel_file_path_v = r'Meteorological analysis module\Meterorological Data\v_interpolated.xlsx'
38 | df_u = pd.read_excel(excel_file_path_u)
39 | df_v = pd.read_excel(excel_file_path_v)
40 | array_u = df_u.iloc[4:16, 12:24].values
41 | array_v = df_v.iloc[4:16, 12:24].values
42 | list_u = [[round(num, 5) for num in row] for row in array_u]
43 | list_v = [[round(num, 5) for num in row] for row in array_v]
44 | u = np.loadtxt(r"Path planning module\Experiments for Comparison\A_star\meteorological data\u.txt")
45 | v = np.loadtxt(r"Path planning module\Experiments for Comparison\A_star\meteorological data\v.txt")
46 |
47 | class AStarPlanner:
48 |
49 | def __init__(self, ox, oy, resolution, rr, angle_threshold=36):
50 | """
51 | Initialize grid map for a star planning
52 |
53 | ox: x position list of Obstacles [m]
54 | oy: y position list of Obstacles [m]
55 | resolution: grid resolution [m],
56 | rr: robot radius[m]
57 | """
58 |
59 | self.resolution = resolution
60 | self.rr = rr
61 | self.angle_threshold = angle_threshold
62 | self.min_x, self.min_y = 0, 0
63 | self.max_x, self.max_y = 0, 0
64 | self.obstacle_map = None
65 | self.x_width, self.y_width = 0, 0
66 | self.motion = self.get_motion_model()
67 | self.calc_obstacle_map(ox, oy)
68 |
69 | class Node:
70 | def __init__(self, x, y, cost, parent_index):
71 | self.x = x # index of grid
72 | self.y = y # index of grid
73 | self.cost = cost
74 | self.parent_index = parent_index
75 |
76 | def __str__(self):
77 | return str(self.x) + "," + str(self.y) + "," + str(
78 | self.cost) + "," + str(self.parent_index)
79 |
80 | def planning(self, sx, sy, gx, gy):
81 | """
82 | A star path search
83 | input:
84 | s_x: start x position [m]
85 | s_y: start y position [m]
86 | gx: goal x position [m]
87 | gy: goal y position [m]
88 |
89 | output:
90 | rx: x position list of the final path
91 | ry: y position list of the final path
92 | """
93 |
94 | start_node = self.Node(self.calc_xy_index(sx, self.min_x),
95 | self.calc_xy_index(sy, self.min_y), 0.0, -1)
96 | goal_node = self.Node(self.calc_xy_index(gx, self.min_x),
97 | self.calc_xy_index(gy, self.min_y), 0.0, -1)
98 |
99 | open_set, closed_set = dict(), dict()
100 | open_set[self.calc_grid_index(start_node)] = start_node
101 | prev_angle = math.degrees(math.atan((gy-sy)/(gx-sx)))
102 | angle = prev_angle
103 |
104 | while 1:
105 | if len(open_set) == 0:
106 | print("Open set is empty..")
107 | break
108 |
109 | c_id = min(
110 | open_set,
111 | key=lambda o: open_set[o].cost + self.calc_heuristic(goal_node, open_set[o]))
112 | current = open_set[c_id]
113 |
114 | # # show graph
115 | # if show_animation: # pragma: no cover
116 | # plt.plot(self.calc_grid_position(current.x, self.min_x),
117 | # self.calc_grid_position(current.y, self.min_y), "xc")
118 | # # for stopping simulation with the esc key.
119 | # plt.gcf().canvas.mpl_connect('key_release_event', lambda event: [exit(0) if event.key == 'escape' else None])
120 | # if len(closed_set.keys()) % 10 == 0:
121 | # plt.pause(0.001)
122 |
123 | if math.hypot(current.x - goal_node.x, current.y - goal_node.y) <= 10:
124 | print("Find goal")
125 | goal_node.parent_index = current.parent_index
126 | goal_node.cost = current.cost
127 | break
128 |
129 | # Remove the item from the open set
130 | del open_set[c_id]
131 |
132 | # Add it to the closed set
133 | closed_set[c_id] = current
134 |
135 | # expand_grid search grid based on motion model
136 | for i, _ in enumerate(self.motion):
137 | radian = math.radians(self.motion[i][1]+angle)
138 | dx = math.cos(radian)*7 #The speed after conversion (which can be adjusted based on grid size.)
139 | dy = math.sin(radian)*7
140 | node = self.Node(round(current.x + dx + u2[current.x, 66-current.y] + u[current.x, 66-current.y]),
141 | round(current.y + dy + v2[current.x, 66-current.y] + v[current.x, 66-current.y]),
142 | current.cost + self.motion[i][0], c_id)
143 |
144 | n_id = self.calc_grid_index(node)
145 |
146 | # If the node is not safe, do nothing
147 | if not self.verify_node(node):
148 | continue
149 | # Check for turning angle constraint
150 |
151 |
152 | if n_id in closed_set:
153 | continue
154 |
155 | if n_id not in open_set:
156 | open_set[n_id] = node # discovered a new node
157 | else:
158 | if open_set[n_id].cost > node.cost:
159 | # This path is the best until now. record it
160 | open_set[n_id] = node
161 | angle = (self.motion[i][0]+angle) % 360
162 |
163 | rx, ry = self.calc_final_path(goal_node, closed_set)
164 |
165 | return rx, ry
166 |
167 | def calc_final_path(self, goal_node, closed_set):
168 | # generate final course
169 | rx, ry = [self.calc_grid_position(goal_node.x, self.min_x)], [
170 | self.calc_grid_position(goal_node.y, self.min_y)]
171 | parent_index = goal_node.parent_index
172 | while parent_index != -1:
173 | n = closed_set[parent_index]
174 | rx.append(self.calc_grid_position(n.x, self.min_x))
175 | ry.append(self.calc_grid_position(n.y, self.min_y))
176 | parent_index = n.parent_index
177 |
178 | return rx, ry
179 |
180 | @staticmethod
181 | def calc_heuristic(n1, n2):
182 | """
183 | Args:
184 | n1 (_type_): _description_
185 | n2 (_type_): _description_
186 |
187 | Returns:
188 | _type_: _description_
189 | """
190 | w = 1.0 # weight of heuristic
191 | d = w * math.hypot(n1.x - n2.x, n1.y - n2.y)
192 | return d
193 |
194 | def calc_grid_position(self, index, min_position):
195 | """
196 | calc grid position
197 | :param index:
198 | :param min_position:
199 | :return:
200 | """
201 | pos = index * self.resolution + min_position
202 | return pos
203 |
204 | def calc_xy_index(self, position, min_pos):
205 | return round((position - min_pos) / self.resolution)
206 |
207 | def calc_grid_index(self, node):
208 | return (node.y - self.min_y) * self.x_width + (node.x - self.min_x)
209 |
210 | def verify_node(self, node):
211 | px = self.calc_grid_position(node.x, self.min_x)
212 | py = self.calc_grid_position(node.y, self.min_y)
213 |
214 | if px < self.min_x:
215 | return False
216 | elif py < self.min_y:
217 | return False
218 | elif px >= self.max_x:
219 | return False
220 | elif py >= self.max_y:
221 | return False
222 |
223 | # collision check
224 | if self.obstacle_map[node.x][node.y]:
225 | return False
226 |
227 | return True
228 |
229 | def calc_obstacle_map(self, ox, oy):
230 |
231 | self.min_x = round(min(ox))
232 | self.min_y = round(min(oy))
233 | self.max_x = round(max(ox))
234 | self.max_y = round(max(oy))
235 | # print("min_x:", self.min_x)
236 | # print("min_y:", self.min_y)
237 | # print("max_x:", self.max_x)
238 | # print("max_y:", self.max_y)
239 |
240 | self.x_width = round((self.max_x - self.min_x) / self.resolution)
241 | self.y_width = round((self.max_y - self.min_y) / self.resolution)
242 | # print("x_width:", self.x_width)
243 | # print("y_width:", self.y_width)
244 |
245 | # obstacle map generation
246 | self.obstacle_map = [[False for _ in range(self.y_width)]
247 | for _ in range(self.x_width)]
248 | for ix in range(self.x_width):
249 | x = self.calc_grid_position(ix, self.min_x)
250 | for iy in range(self.y_width):
251 | y = self.calc_grid_position(iy, self.min_y)
252 | for iox, ioy in zip(ox, oy):
253 | d = math.hypot(iox - x, ioy - y)
254 | if d <= self.rr:
255 | self.obstacle_map[ix][iy] = True
256 | break
257 |
258 | @staticmethod
259 | def get_motion_model():
260 | # dx, dy, cost
261 | # motion = [[1, 0, 1],
262 | # [0, 1, 1],
263 | # [-1, 0, 1],
264 | # [0, -1, 1],
265 | # [-1, -1, math.sqrt(2)],
266 | # [-1, 1, math.sqrt(2)],
267 | # [1, -1, math.sqrt(2)],
268 | # [1, 1, math.sqrt(2)]]
269 | motion = []
270 | for a in [-36,-18,0,18,36]:
271 | cost = 1
272 | angle_motion = a
273 | motion.append([cost, angle_motion])
274 |
275 | return motion
276 |
277 |
278 |
279 | def main():
280 | print(__file__ + " start!!")
281 |
282 | # start and goal position
283 | sx = 4 # [m] #sx,xy the position of starting point
284 | sy = 7 # [m]
285 | gx = 60 # [m] #gx,gy the position of the goal point
286 | gy = 73 # [m]
287 | grid_size = 1 # [m] we need to adjust the map resolution appropriately.
288 | robot_radius = 2.0 # [m]
289 |
290 | # # set obstacle positions
291 | # ox, oy = [], []
292 | # for i in range(-10, 60):
293 | # ox.append(i)
294 | # oy.append(-10.0)
295 | # for i in range(-10, 60):
296 | # ox.append(60.0)
297 | # oy.append(i)
298 | # for i in range(-10, 61):
299 | # ox.append(i)
300 | # oy.append(60.0)
301 | # for i in range(-10, 61):
302 | # ox.append(-10.0)
303 | # oy.append(i)
304 | # for i in range(-10, 40):
305 | # ox.append(20.0)
306 | # oy.append(i)
307 | # for i in range(0, 40):
308 | # ox.append(40.0)
309 | # oy.append(60.0 - i)
310 | output_file_path = r'Path planning module\Experiments for Comparison\A_star\grid_with_circles.txt'
311 | grid_with_circles = np.loadtxt(output_file_path, dtype=int)
312 | # print(grid_with_circles.shape[0])
313 | # print(grid_with_circles.shape[1])
314 | obstacle_x_coordinates = []
315 | obstacle_y_coordinates = []
316 | for y in range(grid_with_circles.shape[0]):
317 | for x in range(grid_with_circles.shape[1]):
318 | if grid_with_circles[y, x] == 0:
319 | obstacle_x_coordinates.append(x)
320 | obstacle_y_coordinates.append(80-y)
321 | ox = obstacle_x_coordinates
322 | oy = obstacle_y_coordinates
323 |
324 | if show_animation: # pragma: no cover
325 | plt.plot(ox, oy, ".k")
326 | plt.plot(sx, sy, "og")
327 | plt.plot(gx, gy, "xb")
328 | plt.grid(True)
329 | plt.axis("equal")
330 |
331 |
332 |
333 | # start_time = time.time()
334 | a_star = AStarPlanner(ox, oy, grid_size, robot_radius)
335 | rx, ry = a_star.planning(sx, sy, gx, gy)
336 | end_time = time.time()
337 | # dtime = end_time - start_time
338 | # print("running time: %.8s s" % dtime)
339 |
340 |
341 | if show_animation: # pragma: no cover
342 | plt.plot(rx, ry, "-r")
343 | plt.pause(0.001)
344 | plt.show()
345 |
346 |
347 | if __name__ == '__main__':
348 | main()
349 |
350 |
--------------------------------------------------------------------------------
/Path planning module/Experiments for Comparison/A_star/A_star_MLP (additional test)/A_star_MLP.py:
--------------------------------------------------------------------------------
1 | """
2 | ## Additional supplementary experiments
3 | We also make an additional attempt to integrate the idea of multimodal environment information with the A* algorithm,
4 | where we learn the historical optimal data through a multi-layer perceptron network, replacing the original heuristic function h.
5 | There is a slight improvement in the results.
6 | And Data-driven can combine multimodal environment information with more path planning algorithms (such as A*).
7 | """
8 | import pandas as pd
9 | import math
10 | import time
11 | import matplotlib.pyplot as plt
12 | import numpy as np
13 | import torch
14 | import torch.nn as nn
15 | import pandas as pd
16 | show_animation = True
17 |
18 |
19 | #The following data is the meteorological information corresponding to the test area and can be replaced
20 | excel_file_path1 = r'Meteorological analysis module\Meterorological Data\u2_new.xlsx'
21 | excel_file_path2 = r'Meteorological analysis module\Meterorological Data\v2_new.xlsx'
22 | df1 = pd.read_excel(excel_file_path1)
23 | df2 = pd.read_excel(excel_file_path2)
24 | array1 = df1.iloc[4:16, 12:24].values#21*37 5*5 12*12
25 | array2 = df2.iloc[4:16, 12:24].values
26 | list_u2 = [[round(num, 5) for num in row] for row in array1]
27 | list_v2 = [[round(num, 5) for num in row] for row in array2]
28 | u2 = np.loadtxt(r"Path planning module\Experiments for Comparison\A_star\meteorological data\u2.txt")
29 | v2 = np.loadtxt(r"Path planning module\Experiments for Comparison\A_star\meteorological data\v2.txt")
30 |
31 | #Vector field - ocean current field
32 | #The following data is the meteorological information corresponding to the test area and can be replaced
33 | excel_file_path_u = r'Meteorological analysis module\Meterorological Data\u_interpolated.xlsx'
34 | excel_file_path_v = r'Meteorological analysis module\Meterorological Data\v_interpolated.xlsx'
35 | df_u = pd.read_excel(excel_file_path_u)
36 | df_v = pd.read_excel(excel_file_path_v)
37 | array_u = df_u.iloc[4:16, 12:24].values
38 | array_v = df_v.iloc[4:16, 12:24].values
39 | list_u = [[round(num, 5) for num in row] for row in array_u]
40 | list_v = [[round(num, 5) for num in row] for row in array_v]
41 | u = np.loadtxt(r"Path planning module\Experiments for Comparison\A_star\meteorological data\u.txt")
42 | v = np.loadtxt(r"Path planning module\Experiments for Comparison\A_star\meteorological data\v.txt")
43 |
44 |
45 |
46 | class MLP(nn.Module):
47 | def __init__(self):
48 | super(MLP, self).__init__()
49 | self.fc1 = nn.Linear(7, 64)
50 | self.relu = nn.ReLU()
51 | self.fc2 = nn.Linear(64, 32)
52 | self.fc3 = nn.Linear(32, 1)
53 |
54 | def forward(self, x):
55 | x = self.fc1(x)
56 | x = self.relu(x)
57 | x = self.fc2(x)
58 | x = self.relu(x)
59 | x = self.fc3(x)
60 | return x
61 | model = MLP()
62 | model.load_state_dict(torch.load(r'Path planning module\Experiments for Comparison\A_star\mlp_model.pth'))
63 |
64 | class AStarPlanner:
65 |
66 | def __init__(self, ox, oy, resolution, rr, angle_threshold=36):
67 | """
68 | Initialize grid map for a star planning
69 |
70 | ox: x position list of Obstacles [m]
71 | oy: y position list of Obstacles [m]
72 | resolution: grid resolution [m],
73 | rr: robot radius[m]
74 | """
75 |
76 | self.resolution = resolution
77 | self.rr = rr
78 | self.angle_threshold = angle_threshold
79 | self.min_x, self.min_y = 0, 0
80 | self.max_x, self.max_y = 0, 0
81 | self.obstacle_map = None
82 | self.x_width, self.y_width = 0, 0
83 | self.motion = self.get_motion_model()
84 | self.calc_obstacle_map(ox, oy)
85 |
86 | class Node:
87 | def __init__(self, x, y, cost, parent_index):
88 | self.x = x # index of grid
89 | self.y = y # index of grid
90 | self.cost = cost
91 | self.parent_index = parent_index
92 |
93 | def __str__(self):
94 | return str(self.x) + "," + str(self.y) + "," + str(
95 | self.cost) + "," + str(self.parent_index)
96 |
97 | def planning(self, sx, sy, gx, gy):
98 | """
99 | A star path search
100 | input:
101 | s_x: start x position [m]
102 | s_y: start y position [m]
103 | gx: goal x position [m]
104 | gy: goal y position [m]
105 |
106 | output:
107 | rx: x position list of the final path
108 | ry: y position list of the final path
109 | """
110 |
111 | start_node = self.Node(self.calc_xy_index(sx, self.min_x),
112 | self.calc_xy_index(sy, self.min_y), 0.0, -1)
113 | goal_node = self.Node(self.calc_xy_index(gx, self.min_x),
114 | self.calc_xy_index(gy, self.min_y), 0.0, -1)
115 |
116 | open_set, closed_set = dict(), dict()
117 | open_set[self.calc_grid_index(start_node)] = start_node
118 | prev_angle = math.degrees(math.atan((gy-sy)/(gx-sx)))
119 | angle = prev_angle
120 |
121 | while 1:
122 | if len(open_set) == 0:
123 | print("Open set is empty..")
124 | break
125 |
126 | c_id = min(
127 | open_set,
128 | key=lambda o: open_set[o].cost + self.calc_heuristic(open_set[o].x, open_set[o].y,
129 | goal_node.x, goal_node.y, 0,
130 | u[goal_node.x, 66-goal_node.y],
131 | v[goal_node.x, 66-goal_node.y],
132 | u2[goal_node.x, 66-goal_node.y],
133 | v2[goal_node.x, 66-goal_node.y]))
134 | current = open_set[c_id]
135 |
136 | # # show graph
137 | # if show_animation: # pragma: no cover
138 | # plt.plot(self.calc_grid_position(current.x, self.min_x),
139 | # self.calc_grid_position(current.y, self.min_y), "xc")
140 | # # for stopping simulation with the esc key.
141 | # plt.gcf().canvas.mpl_connect('key_release_event', lambda event: [exit(0) if event.key == 'escape' else None])
142 | # if len(closed_set.keys()) % 10 == 0:
143 | # plt.pause(0.001)
144 |
145 | if math.hypot(current.x - goal_node.x, current.y - goal_node.y) <= 10:
146 | print("Find goal")
147 | goal_node.parent_index = current.parent_index
148 | goal_node.cost = current.cost
149 | break
150 |
151 | # Remove the item from the open set
152 | del open_set[c_id]
153 |
154 | # Add it to the closed set
155 | closed_set[c_id] = current
156 |
157 | # expand_grid search grid based on motion model
158 | for i, _ in enumerate(self.motion):
159 | radian = math.radians(self.motion[i][1]+angle)
160 | dx = math.cos(radian)*7 #The speed after conversion (which can be adjusted based on grid size.)
161 | dy = math.sin(radian)*7
162 | node = self.Node(round(current.x + dx + u2[current.x, 66-current.y] + u[current.x, 66-current.y]),
163 | round(current.y + dy + v2[current.x, 66-current.y] + v[current.x, 66-current.y]),
164 | current.cost + self.motion[i][0], c_id)
165 |
166 | n_id = self.calc_grid_index(node)
167 |
168 | # If the node is not safe, do nothing
169 | if not self.verify_node(node):
170 | continue
171 | # Check for turning angle constraint
172 |
173 |
174 | if n_id in closed_set:
175 | continue
176 |
177 | if n_id not in open_set:
178 | open_set[n_id] = node # discovered a new node
179 | else:
180 | if open_set[n_id].cost > node.cost:
181 | # This path is the best until now. record it
182 | open_set[n_id] = node
183 | angle = (self.motion[i][0]+angle) % 360
184 |
185 | rx, ry = self.calc_final_path(goal_node, closed_set)
186 |
187 | return rx, ry
188 |
189 | def calc_final_path(self, goal_node, closed_set):
190 | # generate final course
191 | rx, ry = [self.calc_grid_position(goal_node.x, self.min_x)], [
192 | self.calc_grid_position(goal_node.y, self.min_y)]
193 | parent_index = goal_node.parent_index
194 | while parent_index != -1:
195 | n = closed_set[parent_index]
196 | rx.append(self.calc_grid_position(n.x, self.min_x))
197 | ry.append(self.calc_grid_position(n.y, self.min_y))
198 | parent_index = n.parent_index
199 |
200 | return rx, ry
201 |
202 | @staticmethod
203 | def calc_heuristic(gx,gy,x, y, angle, u, v, u2, v2):
204 | w = 0.9 # weight of multimodal environmental infomation
205 | d = (1-w)* math.hypot(gx - x, gy - y) + w * model(torch.tensor([x, y, angle, u, v, u2, v2], dtype=torch.float32)).item()
206 | #d = model(torch.tensor([x, y, angle, u, v, u2, v2], dtype=torch.float32)).item()
207 | return d
208 |
209 | def calc_grid_position(self, index, min_position):
210 | """
211 | calc grid position
212 | :param index:
213 | :param min_position:
214 | :return:
215 | """
216 | pos = index * self.resolution + min_position
217 | return pos
218 |
219 | def calc_xy_index(self, position, min_pos):
220 | return round((position - min_pos) / self.resolution)
221 |
222 | def calc_grid_index(self, node):
223 | return (node.y - self.min_y) * self.x_width + (node.x - self.min_x)
224 |
225 | def verify_node(self, node):
226 | px = self.calc_grid_position(node.x, self.min_x)
227 | py = self.calc_grid_position(node.y, self.min_y)
228 |
229 | if px < self.min_x:
230 | return False
231 | elif py < self.min_y:
232 | return False
233 | elif px >= self.max_x:
234 | return False
235 | elif py >= self.max_y:
236 | return False
237 |
238 | # collision check
239 | if self.obstacle_map[node.x][node.y]:
240 | return False
241 |
242 | return True
243 |
244 | def calc_obstacle_map(self, ox, oy):
245 |
246 | self.min_x = round(min(ox))
247 | self.min_y = round(min(oy))
248 | self.max_x = round(max(ox))
249 | self.max_y = round(max(oy))
250 | # print("min_x:", self.min_x)
251 | # print("min_y:", self.min_y)
252 | # print("max_x:", self.max_x)
253 | # print("max_y:", self.max_y)
254 |
255 | self.x_width = round((self.max_x - self.min_x) / self.resolution)
256 | self.y_width = round((self.max_y - self.min_y) / self.resolution)
257 | # print("x_width:", self.x_width)
258 | # print("y_width:", self.y_width)
259 |
260 | # obstacle map generation
261 | self.obstacle_map = [[False for _ in range(self.y_width)]
262 | for _ in range(self.x_width)]
263 | for ix in range(self.x_width):
264 | x = self.calc_grid_position(ix, self.min_x)
265 | for iy in range(self.y_width):
266 | y = self.calc_grid_position(iy, self.min_y)
267 | for iox, ioy in zip(ox, oy):
268 | d = math.hypot(iox - x, ioy - y)
269 | if d <= self.rr:
270 | self.obstacle_map[ix][iy] = True
271 | break
272 |
273 | @staticmethod
274 | def get_motion_model():
275 | # dx, dy, cost
276 | # motion = [[1, 0, 1],
277 | # [0, 1, 1],
278 | # [-1, 0, 1],
279 | # [0, -1, 1],
280 | # [-1, -1, math.sqrt(2)],
281 | # [-1, 1, math.sqrt(2)],
282 | # [1, -1, math.sqrt(2)],
283 | # [1, 1, math.sqrt(2)]]
284 | motion = []
285 | for a in [-36,-18,0,18,36]:
286 | cost = 1
287 | angle_motion = a
288 | motion.append([cost, angle_motion])
289 |
290 | return motion
291 |
292 |
293 |
294 | def main():
295 | print(__file__ + " start!!")
296 |
297 | # start and goal position
298 | sx = 4 # [m] #sx,xy the position of starting point
299 | sy = 7 # [m]
300 | gx = 60 # [m] #gx,gy the position of the goal point
301 | gy = 73 # [m]
302 | grid_size = 1 # [m] we need to adjust the map resolution appropriately.
303 | robot_radius = 2.0 # [m]
304 |
305 | # # set obstacle positions
306 | # ox, oy = [], []
307 | # for i in range(-10, 60):
308 | # ox.append(i)
309 | # oy.append(-10.0)
310 | # for i in range(-10, 60):
311 | # ox.append(60.0)
312 | # oy.append(i)
313 | # for i in range(-10, 61):
314 | # ox.append(i)
315 | # oy.append(60.0)
316 | # for i in range(-10, 61):
317 | # ox.append(-10.0)
318 | # oy.append(i)
319 | # for i in range(-10, 40):
320 | # ox.append(20.0)
321 | # oy.append(i)
322 | # for i in range(0, 40):
323 | # ox.append(40.0)
324 | # oy.append(60.0 - i)
325 | output_file_path = r'Path planning module\Experiments for Comparison\A_star\grid_with_circles.txt'
326 | grid_with_circles = np.loadtxt(output_file_path, dtype=int)
327 | # print(grid_with_circles.shape[0])
328 | # print(grid_with_circles.shape[1])
329 | obstacle_x_coordinates = []
330 | obstacle_y_coordinates = []
331 | for y in range(grid_with_circles.shape[0]):
332 | for x in range(grid_with_circles.shape[1]):
333 | if grid_with_circles[y, x] == 0:
334 | obstacle_x_coordinates.append(x)
335 | obstacle_y_coordinates.append(80-y)
336 | ox = obstacle_x_coordinates
337 | oy = obstacle_y_coordinates
338 |
339 | if show_animation: # pragma: no cover
340 | plt.plot(ox, oy, ".k")
341 | plt.plot(sx, sy, "og")
342 | plt.plot(gx, gy, "xb")
343 | plt.grid(True)
344 | plt.axis("equal")
345 |
346 |
347 |
348 | # start_time = time.time()
349 | a_star = AStarPlanner(ox, oy, grid_size, robot_radius)
350 | rx, ry = a_star.planning(sx, sy, gx, gy)
351 | end_time = time.time()
352 | # dtime = end_time - start_time
353 | # print("running time: %.8s s" % dtime)
354 |
355 |
356 | if show_animation: # pragma: no cover
357 | plt.plot(rx, ry, "-r")
358 | plt.pause(0.001)
359 | plt.show()
360 |
361 |
362 | if __name__ == '__main__':
363 | main()
364 |
365 |
--------------------------------------------------------------------------------
/Path planning module/.idea/workspace.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 |
143 |
144 |
145 |
146 |
147 |
148 |
149 |
150 |
151 |
152 |
153 |
154 |
155 |
156 |
157 |
158 |
159 |
160 |
161 |
162 |
163 |
164 |
165 |
166 |
167 |
168 |
169 | 1587464215387
170 |
171 |
172 | 1587464215387
173 |
174 |
175 |
176 |
177 |
178 |
179 |
180 |
181 |
182 |
183 |
184 |
185 |
186 |
187 |
188 |
189 |
190 |
191 |
192 |
193 |
194 |
195 |
196 |
197 |
198 |
199 |
200 |
201 |
202 |
203 |
204 |
205 |
206 |
207 |
208 |
209 |
210 |
211 |
212 |
213 |
214 |
215 |
216 |
217 |
218 |
219 |
220 |
221 |
222 |
223 |
224 |
225 |
226 |
227 |
228 |
229 |
230 |
231 |
232 |
233 |
234 |
235 |
236 |
237 |
238 |
239 | file://$PROJECT_DIR$/../../tensor/pseudo-rainbow_DQN/RL_brain.py
240 | 210
241 |
242 |
243 |
244 | file://$PROJECT_DIR$/DQN_pytorch.py
245 | 287
246 |
247 |
248 |
249 |
250 |
251 |
252 |
253 |
254 |
255 |
256 |
257 |
258 |
259 |
260 |
261 |
262 |
263 |
264 |
265 |
266 |
267 |
268 |
269 |
270 |
271 |
272 |
273 |
274 |
275 |
276 |
277 |
278 |
--------------------------------------------------------------------------------
/Path planning module/Multimodal_characteristics_Marine_environment.py:
--------------------------------------------------------------------------------
1 | """
2 | @File : fusion_DQN.py
3 | @Author: DavidLin
4 | @Date : 2024/12/7
5 | @Contact : davidlin659562@gmail.com
6 | @Description :
7 | This is the environment construction file for this article "RL-based USV Path Planning Under the Marine Multimodal Features Considerations"
8 | state space = [ppx, ppy, angle, tx, ty, u2_wind, v2_wind, u_ocean, v_ocean]
9 | action space = [-36°, -18°, 0°, 18°, 36°]
10 | """
11 | import matplotlib.pyplot as plt
12 | import math
13 | import gym
14 | from gym import spaces
15 | from gym.utils import seeding
16 | import numpy as np
17 | from gym.envs.classic_control import rendering
18 | from scipy.spatial import ConvexHull
19 | import netCDF4 as nc
20 | import pandas as pd
21 | from txt_to_matrix import read_binary_txt
22 | # from opencv import center_change, radii
23 | import pandas as pd
24 | import torch
25 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
26 | #The following random obstacle data can be replaced by Edge detection and Contour extraction results
27 | # The position of obstacles is theoretically randomly generated
28 | center_change = [(26, 175), (122, 102), (140, 161), (129, 56), (106, 10), (215, 154), (200, 16), (105, 33)]
29 | radii = [2, 4, 2, 3, 2, 2, 4, 3]
30 |
31 |
32 | RAD2DEG = 57.29577951308232
33 | CHANGE = False
34 |
35 |
36 |
37 | #----------------------------Multimodal environment information fusion---------------------------------------------
38 | #The images below are test images and can be replaced with maps of different areas
39 | file_path = r'Image processing module\Geographic Data\testmap_105108.txt'
40 | binary_grid_reverse = read_binary_txt(file_path)
41 | binary_array_reverse = np.array(binary_grid_reverse)
42 | binary_grid = np.flipud(binary_array_reverse)
43 |
44 |
45 | #Vector field - wind field
46 | #The following data is the meteorological information corresponding to the test area and can be replaced
47 | excel_file_path1 = r'Meteorological analysis module\Meterorological Data\u10_new.xlsx'
48 | excel_file_path2 = r'Meteorological analysis module\Meterorological Data\v10_new.xlsx'
49 | df1 = pd.read_excel(excel_file_path1)
50 | df2 = pd.read_excel(excel_file_path2)
51 | array1 = df1.iloc[4:16, 12:24].values#21*37 5*5 12*12
52 | array2 = df2.iloc[4:16, 12:24].values
53 | list_u10 = [[round(num, 5) for num in row] for row in array1]
54 | list_v10 = [[round(num, 5) for num in row] for row in array2]
55 |
56 |
57 |
58 | #Vector field - ocean current field
59 | #The following data is the meteorological information corresponding to the test area and can be replaced
60 | excel_file_path_u = r'Meteorological analysis module\Meterorological Data\u_interpolated.xlsx'
61 | excel_file_path_v = r'Meteorological analysis module\Meterorological Data\v_interpolated.xlsx'
62 | df_u = pd.read_excel(excel_file_path_u)
63 | df_v = pd.read_excel(excel_file_path_v)
64 | array_u = df_u.iloc[4:16, 12:24].values
65 | array_v = df_v.iloc[4:16, 12:24].values
66 | list_u = [[round(num, 5) for num in row] for row in array_u]
67 | list_v = [[round(num, 5) for num in row] for row in array_v]
68 |
69 |
70 |
71 |
72 |
73 | class PuckWorldEnv(gym.Env):
74 | metadata = {
75 | 'render.modes': ['human', 'rgb_array'],
76 | 'video.frames_per_second': 30
77 | }
78 | #The following hyperparameters need to be adjusted based on the selected scenario.
79 | def __init__(self):
80 | self.width = 300 #screen width
81 | self.length = 300 #screen length
82 | self.speed = 7
83 | self.accel_x = 1 # agent acceleration
84 | self.accel_y = 1
85 | self.interval = 400
86 | self.goal_dis = 10 # expected goal distance
87 | self.t = 0 # puck world clock
88 | self.update_time = 1 # time for target randomize its position
89 | self.circles_this = center_change
90 | self.radius_this = radii
91 | self.low = np.array([0, # agent position x
92 | 0,
93 | -np.inf,
94 | 0, # target position x
95 | 0,
96 | -np.inf,
97 | -np.inf,
98 | -np.inf,
99 | -np.inf
100 | ])
101 | self.high = np.array([300,
102 | 300,
103 | np.inf,
104 | 300,
105 | 300,
106 | np.inf,
107 | np.inf,
108 | np.inf,
109 | np.inf
110 | ])
111 |
112 | self.reward = 0 # for rendering
113 | self.collision = 0
114 | self.action = None # for rendering
115 | self.viewer = None
116 | self.done = None
117 | self.action_space = spaces.Discrete(5)
118 | self.observation_space = spaces.Box(self.low, self.high)#state space
119 | self.reset()
120 |
121 |
122 | def step(self, action):
123 |
124 |
125 | action = int(action)#GPU
126 | assert self.action_space.contains(action), \
127 | "%r (%s) invalid" % (action, type(action))
128 | self.reward = 0
129 | self.collision = 0
130 |
131 |
132 | self.action = action # action for rendering
133 | ppx, ppy, angle, tx, ty, u2_right, v2_right, u_right, v_right= self.state
134 |
135 | angle_last = angle
136 | angle_last = round(angle_last,5)
137 | #action space--------------------------------
138 | if action == 0:
139 | angle -= 36
140 | angle = angle % 360
141 | if action == 1:
142 | angle -= 18
143 | angle = angle % 360
144 | if action == 2:
145 | angle = angle
146 | angle = angle % 360
147 | if action == 3:
148 | angle += 18
149 | angle = angle % 360
150 | if action == 4:
151 | angle +=36
152 | angle = angle % 360
153 |
154 |
155 | #Original position
156 | ppx_last = ppx
157 | ppx_last = round(ppx_last,5)
158 | ppy_last = ppy
159 | ppy_last = round(ppy_last,5)
160 |
161 |
162 |
163 |
164 | #update agent position
165 | pvx = self.speed * self.interval / 333000 * 300 * math.cos(math.radians(angle))#Converted to grid velocity
166 | pvx = round(pvx,5)
167 | pvy = self.speed * self.interval / 333000 * 300 * math.sin(math.radians(angle))
168 | pvy = round(pvy,5)
169 | ppx += pvx + u_right + u2_right
170 | ppx = round(ppx,5)
171 | ppy += pvy + v_right + v2_right # update agent position
172 | ppy = round(ppy,5)
173 |
174 |
175 | dx, dy = ppx - tx, ppy - ty # calculate distance from
176 | dis = self.compute_dis(abs(dx), abs(dy)) # agent to target
177 | self.reward += round((self.goal_dis - dis)/100, 5) # give an reward
178 |
179 |
180 |
181 | self.done = bool(dis <= self.goal_dis)
182 | if self.done :
183 | self.reward += 100
184 | return self.state, self.reward, self.done, self.collision
185 |
186 |
187 |
188 | #Detect collisions with edges
189 | if ppx <= 90: # encounter left bound. Adjust according to task requirements
190 | ppx = ppx_last
191 | ppy = ppy_last
192 | angle = 540 - angle_last
193 | angle = angle % 360
194 | self.reward -= 10
195 | #self.reward -= 0.1~10
196 | # Penalty value can be adjusted basd on Select scenarios and task requirements
197 | self.collision = 1
198 | return self.state, self.reward, self.done, self.collision
199 |
200 | if ppx >= 155: # right bound. Adjust according to task requirements
201 | ppx = -ppx_last
202 | ppy = ppy_last
203 | angle = 540 - angle_last
204 | angle = angle % 360
205 | self.reward -= 10
206 | #self.reward -= 0.1~10
207 | # Penalty value can be adjusted basd on Select scenarios and task requirements
208 | self.collision = 1
209 |
210 | return self.state, self.reward, self.done, self.collision
211 |
212 | if ppy <= 17: # bottom bound. Adjust according to task requirements
213 | ppx = ppx_last
214 | ppy = ppy_last
215 | angle = 360 - angle_last
216 | angle = angle % 360
217 | self.reward -= 10
218 | #self.reward -= 0.1~10
219 | # Penalty value can be adjusted basd on Select scenarios and task requirements
220 | self.collision = 1
221 |
222 | return self.state, self.reward, self.done, self.collision
223 |
224 | if ppy >= 97: # top bound. Adjust according to task requirements
225 | ppx = ppx_last
226 | ppy = ppy_last
227 | angle = 360 -angle_last
228 | angle = angle % 360
229 | self.reward -= 10
230 | self.collision = 1
231 | #self.reward -= 0.1~10
232 | # Penalty value for detect collisions with edges can be adjusted basd on Select scenarios and task requirements
233 | return self.state, self.reward, self.done, self.collision
234 |
235 | def point_to_line_distance(point, line_coefficients):
236 | x0, y0 = point
237 | a, b, c = line_coefficients
238 | numerator = abs(a * x0 + b * y0 + c)
239 | denominator = math.sqrt(a**2 + b**2)
240 | distance = numerator / denominator
241 | return distance
242 | def line_coefficients(point1, point2):
243 | x1, y1 = point1
244 | x2, y2 = point2
245 | m = (y2 - y1) / (x2 - x1)
246 | b = -m * x1 + y1
247 | a = -m
248 | c = -b
249 | return a, 1, c
250 | def linear_interpolation(point1, point2, num_points):
251 | x1, y1 = point1
252 | x2, y2 = point2
253 | x_values = [x1 + (x2 - x1) * i / (num_points - 1) for i in range(num_points)]
254 | y_values = [y1 + (y2 - y1) * i / (num_points - 1) for i in range(num_points)]
255 | interpolated_points = list(zip(x_values, y_values))
256 | return interpolated_points
257 |
258 |
259 | #Detect collisions with shorelines
260 | a,b,c = line_coefficients((ppx,ppy), (ppx_last,ppy_last))
261 | num_points = 10
262 | interpolated_points = linear_interpolation((ppx,ppy), (ppx_last,ppy_last), num_points)
263 | for p in interpolated_points:
264 | if binary_grid[math.ceil(p[1])][math.ceil(p[0])] == 0: #碰到障碍
265 | ppx = ppx_last
266 | ppy = ppy_last
267 | angle = angle_last
268 | self.collision = 1
269 | self.reward -= 10
270 | #self.reward -= 0.1~10
271 | #Penalty value for detect collisions with shorelines can be adjusted basd on Select scenarios and task requirements
272 | return self.state, self.reward, self.done, self.collision
273 |
274 |
275 | #Detect collisions with circle obstacles-----------------------------------------------------------------------
276 | for i in self.circles_this:
277 | distance_ = point_to_line_distance((i[0],i[1]),(a,b,c))
278 | radius_ = self.radius_this[self.circles_this.index(i)]
279 | if distance_<=radius_:
280 | ppx = ppx_last
281 | ppy = ppy_last
282 | angle = angle_last
283 | self.collision = 1
284 | self.reward -= 10
285 | #Penalty value for detect collisions with shorelines can be adjusted basd on Select scenarios and task requirements
286 | return self.state, self.reward, self.done, self.collision
287 |
288 |
289 |
290 |
291 |
292 | row_index_10 = int(3*(1-ppy/300)//0.25)
293 | column_index_10 = int(ppx/300*3//0.25)
294 |
295 | u10 = list_u10[row_index_10][column_index_10]#x
296 | v10 = list_v10[row_index_10][column_index_10]#y
297 | u2 = u10 * (0.4*math.log(2/0.003) / math.log(10/0.003))#speed
298 | v2 = v10 * (0.4*math.log(2/0.003) / math.log(10/0.003))
299 | u2_right = round(u2*self.interval/333000*300, 5)
300 | v2_right = round(v2*self.interval/333000*300, 5)
301 |
302 |
303 |
304 | row_index = int(3*(1-ppy/300)//0.25)
305 | column_index = int(ppx/300*3//0.25)
306 | u = list_u[row_index][column_index]#x
307 | v = list_v[row_index][column_index]#y
308 | u_right = round(u*self.interval/333000*300, 5)
309 | v_right = round(v*self.interval/333000*300, 5)
310 |
311 |
312 | self.state = (ppx, ppy, angle, tx, ty, u2_right, v2_right, u_right, v_right)#9-dimension state spce
313 |
314 | # print(self.collision)
315 | return self.state, self.reward, self.done, self.collision
316 |
317 |
318 | def compute_dis(self, dx, dy):
319 | return math.sqrt(math.pow(dx, 2) + math.pow(dy, 2))
320 |
321 |
322 | def reset(self):
323 | ppx = 95
324 | ppy = 22
325 | tx = 149
326 | ty = 92
327 | angle = math.degrees(math.atan((ty - ppy) / (tx - ppx)))
328 | row_index_10 = int(3*(1-ppy/300)//0.25)
329 | column_index_10 = int(ppx/300*3//0.25)
330 | u10 = list_u10[row_index_10][column_index_10]#x direction
331 | v10 = list_v10[row_index_10][column_index_10]#y direction
332 | u2 = u10 * (math.log(2/0.003) / math.log(10/0.003))
333 | v2 = v10 * (math.log(2/0.003) / math.log(10/0.003))
334 | u2_right = round(u2*self.interval/333000*300, 5)
335 | v2_right = round(v2*self.interval/333000*300, 5)
336 | row_index = int(3*(1-ppy/300)//0.25)
337 | column_index = int(ppx/300*3//0.25)
338 | u = list_u[row_index][column_index]#x direction
339 | v = list_v[row_index][column_index]#y direction
340 | u_right = round(u*self.interval/333000*300, 5)
341 | v_right = round(v*self.interval/333000*300, 5)
342 | self.state = np.array([ppx, ppy, angle, tx, ty, u2_right, v2_right, u_right, v_right])
343 | #self.state = [330, 330, angle, 805, 575].clone().detach().to(device, dtype=torch.float32)
344 | # self.state = torch.tensor([ppx, ppy, pvx, pvy, tx, ty], device=device, dtype=torch.float32)
345 | return self.state
346 |
347 | def render(self,x,y, mode='human', close=False,DRAW=False):
348 | if close:
349 | if self.viewer is not None:
350 | self.viewer.close()
351 | self.viewer = None
352 | return
353 |
354 | rad = 4 # agent
355 | t_rad = 10 # target
356 | # obs_rad_list = self.radius_this #the radius of obstacle circles
357 |
358 |
359 | if self.viewer is None:
360 | from gym.envs.classic_control import rendering
361 | self.viewer = rendering.Viewer(self.length, self.width)
362 |
363 |
364 | #render the ECDIS---------------------------------------------------------------------
365 | for i in range(len(binary_grid)):
366 | for j in range(len(binary_grid[0])):
367 | if binary_grid[i][j] == 1:
368 | color = (25/255, 101/255, 149/255) # ocean
369 | else:
370 | color = (255/255, 228/255,181/255) # shoreline
371 | rect = rendering.FilledPolygon([(j, i),((j + 1), i),((j + 1), (i + 1)), (j, (i + 1))])
372 | rect.set_color(*color)
373 | self.viewer.add_geom(rect)
374 |
375 |
376 | # render the detected circles----------------------------------------------------------------------------------
377 | numb=0
378 | global circle_points
379 | circle_points={}
380 | global circle_trans
381 | circle_trans={}
382 | for number in radii:
383 | from gym.envs.classic_control import rendering
384 | key="circle_point"+str(numb)
385 | circle_points[key]= rendering.make_circle(number, 30, True)
386 | circle_points[key].set_color(119/255, 136/255, 153/255)
387 | self.viewer.add_geom(circle_points[key])
388 | circle_trans[key] = rendering.Transform()
389 | circle_points[key].add_attr(circle_trans[key])
390 | numb += 1
391 |
392 | target = rendering.make_circle(t_rad, 30, True)
393 | target.set_color(255/255, 215/255, 0)
394 | self.viewer.add_geom(target)
395 | target_circle = rendering.make_circle(t_rad, 30, False)
396 | target_circle.set_color(0, 0, 0)
397 | self.viewer.add_geom(target_circle)
398 | self.target_trans = rendering.Transform()
399 | target.add_attr(self.target_trans)
400 | target_circle.add_attr(self.target_trans)
401 |
402 | self.agent = rendering.make_circle(rad, 30, True)
403 | self.agent.set_color(0, 1, 0)
404 | self.viewer.add_geom(self.agent)
405 | self.agent_trans = rendering.Transform()
406 | self.agent.add_attr(self.agent_trans)
407 | agent_circle = rendering.make_circle(rad, 30, False)
408 | agent_circle.set_color(0, 0, 0)
409 | agent_circle.add_attr(self.agent_trans)
410 | self.viewer.add_geom(agent_circle)
411 |
412 |
413 | ppx, ppy, angle, tx, ty, u2_right, v2_right, u_right, v_right= self.state
414 | self.target_trans.set_translation(tx, ty)
415 | self.agent_trans.set_translation(ppx, ppy)
416 |
417 |
418 | # Render obstacles---------------------------------------------------------------------------
419 | nuum=0
420 | for k in center_change:
421 | key="circle_point"+str(nuum)
422 | circle_trans[key].set_translation(k[0], k[1])
423 | nuum+=1
424 |
425 | if DRAW == True:
426 |
427 | points2 = [(x[k], y[k]) for k in range(len(x))]
428 | self.viewer.draw_polyline(points2, color=(0, 0, 255), linewidth=5)
429 |
430 | return self.viewer.render(return_rgb_array=mode == 'rgb_array')
431 |
432 |
433 |
434 |
435 | if __name__ == "__main__":
436 | env = PuckWorldEnv()
437 | print("hello")
438 | nfs = env.observation_space.shape[0]
439 | nfa = env.action_space.n
440 | print("nfs:%d; nfa:%d" % (nfs,nfa))
441 | print(env.observation_space)
442 | print(env.action_space)
443 | done = False
444 |
445 | for _ in range(10000):
446 | # env.__init__()
447 | env.reset()
448 | while not done :
449 | env.render(0,0)
450 | s,r,done,collision= env.step(env.action_space.sample())
451 |
452 |
453 |
454 |
455 | print("env closed")
456 |
457 |
458 |
--------------------------------------------------------------------------------
/Path planning module/fusion_DQN.py:
--------------------------------------------------------------------------------
1 | """
2 | @File : fusion_DQN.py
3 | @Author: DavidLin
4 | @Date : 2024/12/7
5 | @Contact : davidlin659562@gmail.com
6 | @Description :
7 | This is the main program file for this article "RL-based USV Path Planning Under the Marine Multimodal Features Considerations"
8 | 1. The preprocessing steps need to be completed according to the Image processing module and Meteorological analysis module.
9 | (Of course, the processed test data has also been prepared. )
10 | 2. Interactive environment file is Multimodal_characteristic_Marine_environment.py.
11 | 3. In all files, the paths of your own data file and parameters that can be adjusted have been marked.
12 | If you have any questions or suggestions, please feel free to contact me.
13 | """
14 |
15 | import torch
16 | print(torch.__version__)
17 | import torch.nn as nn
18 | import torch.nn.functional as F
19 | import numpy as np
20 | import matplotlib.pyplot as plt
21 | import gym
22 | import time
23 | import pickle
24 | import datetime
25 | import Multimodal_characteristics_Marine_environment
26 | import os
27 | import pandas as pd
28 | # from torch.utils.tensorboard import SummaryWriter
29 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
30 |
31 |
32 | # render: online visualization
33 | # Double structure/Multivariate Weighted Dueling Network/Priority Sampling Mechanism : Ablation experiments
34 |
35 | RENDER = True
36 | LAST_RENDER = False
37 | OLD_NET= False
38 | '''
39 | For Ablation experiments
40 | fusion DQN: All is True
41 | No Double/No Dueling/No Priority: The corresponding module is False, others are True.
42 | No multimodal: Dueling and Multimodal_characteristics_Marine_environment need to reduce the part of
43 | Multimodal information fusion and learning (containing state space, multivariate weighted Dueling network)
44 | '''
45 | DOUBLE= False
46 | DUELING= False
47 | PRIORITIZED= False
48 |
49 | #The following hyperparameters can be adjusted
50 | MAX_EPISODES = 10000
51 | ZEROS_EPSILONS_NUMBER = 30 #
52 | RENDER_NUMBER = 1
53 | MAX_EPSILON=1
54 | BATCH_SIZE = 32
55 | LR = 0.001
56 | GAMMA = 0.9
57 | TARGET_REPLACE_ITER = 100
58 | MEMORY_CAPACITY = 10000
59 | HIDDEN_NUMBER=128
60 | env = Multimodal_characteristics_Marine_environment.PuckWorldEnv()
61 | N_ACTIONS = env.action_space.n
62 | N_OBSERVATION = env.observation_space.shape[0]
63 | '''
64 | puckworld_n_observation = 9,
65 | self.observation_space = spaces.Box(self.low, self.high)
66 | '''
67 |
68 | ENV_A_SHAPE = 0 if isinstance(env.action_space.sample(), int) else env.action_space.sample().shape # to confirm the shape
69 |
70 | #record
71 | total_reward,mean_reward,steps,episode,mean_Loss,trainingtime,total_Loss,total_collision,mean_collision = [],[],[],[],[],[],[],[],[]
72 |
73 |
74 | class SumTree(object):#sumtree structure(s,a,r,s_)
75 |
76 | data_pointer = 0
77 |
78 | def __init__(self, capacity):
79 | self.capacity = capacity # for all priority values
80 | self.tree = np.zeros(2 * capacity - 1)
81 | self.data = np.zeros(capacity, dtype=object) # for all transitions
82 |
83 |
84 | #update the value and priority
85 | def add(self, p, data): #
86 | tree_idx = self.data_pointer + self.capacity - 1
87 |
88 | self.data[self.data_pointer] = data
89 | self.update(tree_idx, p)
90 |
91 | self.data_pointer += 1
92 | if self.data_pointer >= self.capacity:
93 | self.data_pointer = 0
94 |
95 | #update the priority of the whole sumtree
96 | def update(self, tree_idx, p):
97 | change = p - self.tree[tree_idx]
98 | #update the priority
99 | self.tree[tree_idx] = p
100 | while tree_idx != 0: # this method is faster than the recursive loop in the reference code
101 | tree_idx = (tree_idx - 1) // 2
102 | # self.tree[tree_idx] += change
103 | self.tree[tree_idx] = np.add(self.tree[tree_idx], change)
104 |
105 | def get_leaf(self, v):
106 | parent_idx = 0
107 | while True: # the while loop is faster than the method in the reference code
108 | cl_idx = 2 * parent_idx + 1
109 | cr_idx = cl_idx + 1
110 | if cl_idx >= len(self.tree):
111 | leaf_idx = parent_idx
112 | break
113 | else: # downward search, always search for a higher priority node
114 | if v <= self.tree[cl_idx]:
115 | parent_idx = cl_idx
116 | else:
117 | v -= self.tree[cl_idx]
118 | parent_idx = cr_idx
119 |
120 | data_idx = leaf_idx - self.capacity + 1
121 | return leaf_idx, self.tree[leaf_idx], self.data[data_idx]
122 |
123 |
124 | def total_p(self):#total probility
125 | return self.tree[0] # the root
126 |
127 |
128 | class Memory(object): #memory space
129 | """
130 | This SumTree code is modified version and the original code is from:
131 | https://github.com/jaara/AI-blog/blob/master/Seaquest-DDQN-PER.py
132 | """
133 | epsilon = 0.01 # small amount to avoid zero priority
134 | alpha = 0.6 # [0~1] convert the importance of TD error to priority
135 | beta = 0.4 #importance-sampling, from initial value increasing to 1
136 | beta_increment_per_sampling = 0.001
137 | abs_err_upper = 1. # clipped abs error
138 |
139 | def __init__(self, capacity):
140 | self.tree = SumTree(capacity)
141 |
142 | def store(self, transition):#
143 | max_p = np.max(self.tree.tree[-self.tree.capacity:])
144 | if max_p == 0:
145 | max_p = self.abs_err_upper
146 | self.tree.add(max_p, transition)
147 |
148 | def sample(self, n):
149 | b_idx = np.empty((n, 1), dtype=np.int32)
150 | b_memory = np.empty((n, self.tree.data[0].size))
151 | ISWeights = np.empty((n, 1))
152 | #importance sampling weights
153 | segment = self.tree.total_p() / n
154 | self.beta = np.min([1., self.beta + self.beta_increment_per_sampling])
155 | min_prob = np.min(self.tree.tree[-self.tree.capacity:]) / self.tree.total_p()
156 |
157 |
158 | for i in range(n):
159 | a = segment * i
160 | b = segment * (i + 1)
161 | lower_bound = np.random.uniform(a, b)
162 | idx, p, data = self.tree.get_leaf(lower_bound)
163 | prob = p / self.tree.total_p()
164 | ISWeights[i,0]=np.power(prob/min_prob,-self.beta)
165 | b_idx[i],b_memory[i,:]=idx,data
166 |
167 | return b_idx, b_memory, ISWeights
168 |
169 | def batch_update(self, tree_idx, abs_errors):
170 | abs_errors += self.epsilon
171 | clipped_errors = np.minimum(abs_errors.data, self.abs_err_upper)
172 | ps = np.power(clipped_errors, self.alpha) #pi^α
173 | for ti, p in zip(tree_idx, ps):
174 | self.tree.update(ti, p)
175 |
176 |
177 |
178 | class Net(nn.Module):
179 | def __init__(self, ):
180 | super(Net, self).__init__()
181 | self.fc1 = nn.Linear(N_OBSERVATION, HIDDEN_NUMBER) #input: N_OBSERVATION
182 | self.fc1.weight.data.normal_(0, 0.1) # initialization
183 | self.out = nn.Linear(HIDDEN_NUMBER, N_ACTIONS) #output: N_ACTIONS
184 | self.out.weight.data.normal_(0, 0.1) # initialization
185 |
186 | ##Dueling use
187 | self.fc2 = nn.Linear(N_OBSERVATION-4, HIDDEN_NUMBER)
188 | self.fc2.weight.data.normal_(0, 0.1)
189 | self.out2 = nn.Linear(HIDDEN_NUMBER, 1)
190 | self.out2.weight.data.normal_(0, 0.1)
191 |
192 | self.fc3 = nn.Linear(4, HIDDEN_NUMBER)
193 | self.fc3.weight.data.normal_(0, 0.1)
194 | self.out3 = nn.Linear(HIDDEN_NUMBER, 1)
195 | self.out3.weight.data.normal_(0, 0.1)
196 |
197 | def forward(self, x):
198 | if DUELING:
199 | x = self.fc1(x)
200 | x = F.relu(x) #Relu
201 | #Multimodal_environmental_fusion_and_learn
202 | x2 = self.fc2(x[:, :5])
203 | x2 = F.relu(x2) #Relu
204 | x3 = self.fc3(x[:, -4:])
205 | x3 = F.relu(x3) #Relu
206 |
207 | a=0.8#weight1, geographic characteristic
208 | b=0.2#weight2, meterorological characteristic
209 | #The weights are adjusted based on different environment information and task requirements
210 |
211 | A = self.out(x)
212 | V = self.out2(x2)
213 | V_new = self.out3(x3)
214 | V_right = V * a + V_new * b
215 | actions_value = V_right.expand_as(A) + (A - torch.mean(A, dim=1,keepdim=True).expand_as(A))
216 | else:
217 | x = self.fc1(x)
218 | x = F.relu(x)
219 | actions_value = self.out(x)
220 | return actions_value
221 |
222 |
223 | class DQN(object):
224 | def __init__(self):
225 | self.eval_net, self.target_net = Net().to(device), Net().to(device) #evaluate network
226 | if OLD_NET:
227 | self.eval_net.load_state_dict(torch.load('eval_net_params2_DQN_path1_x9522_y14992.pkl'))
228 | self.target_net.load_state_dict(torch.load('eval_net_params2_DQN_path1_x9522_y14992.pkl'))
229 |
230 | self.learn_step_counter = 0 # for target updating
231 | self.memory_counter = 0 # for storing memory
232 | if PRIORITIZED:
233 | self.memory = Memory(capacity=MEMORY_CAPACITY)
234 | else:
235 | self.memory = np.zeros((MEMORY_CAPACITY, N_OBSERVATION * 2 + 2))
236 | '''self.memory
237 | s(N_OBSERVATION) s_(N_OBSERVATION) a r
238 | 0
239 | 1
240 | 2
241 | 3
242 | ...
243 | 50000
244 |
245 | '''
246 | self.optimizer = torch.optim.Adam(self.eval_net.parameters(), lr=LR)
247 | self.loss_func = nn.MSELoss()#先定义个损失函数
248 |
249 |
250 | def choose_action(self, x, i_episode):
251 | x = torch.unsqueeze(torch.FloatTensor(x), 0)
252 | '''
253 | torch.FloatTensor()
254 | torch.unsqueeze(tensor)
255 | [1,2,3] -> [[1,2,3]]
256 | '''
257 | if i_episode >= MAX_EPISODES*0.3:
258 | epsilon=0
259 | else:
260 | epsilon = 0.01 + (1 - 0.01) * np.exp(-0.002 * i_episode)
261 | if np.random.uniform() < epsilon: # random
262 | action = np.random.randint(0, N_ACTIONS)
263 | action = action if ENV_A_SHAPE == 0 else action.reshape(ENV_A_SHAPE)
264 |
265 | else: # greedy
266 | x = x.to(device)
267 | actions_value = self.eval_net.forward(x)
268 | action = torch.max(actions_value, 1)[1].data.cpu().numpy()
269 | action = action[0] if ENV_A_SHAPE == 0 else action.reshape(ENV_A_SHAPE) # return the argmax index
270 | return action,epsilon
271 |
272 | def store_transition(self, s, a, r, s_):
273 | transition = np.hstack((s, [a, r], s_))
274 | if PRIORITIZED:
275 | self.memory.store(transition)
276 | self.memory_counter += 1
277 | else:
278 | # replace the old memory with new memory
279 | index = self.memory_counter % MEMORY_CAPACITY
280 | self.memory[index, :] = transition
281 | self.memory_counter += 1
282 |
283 |
284 | def learn(self):
285 | # target parameter update
286 | if self.learn_step_counter % TARGET_REPLACE_ITER == 0:
287 | self.target_net.load_state_dict(self.eval_net.state_dict())#eval_net -> target_net
288 | self.learn_step_counter += 1
289 |
290 | # sample batch transitions
291 | if PRIORITIZED:
292 | tree_idx, batch_memory, ISWeights = self.memory.sample(BATCH_SIZE)
293 | b_memory = batch_memory
294 | else:
295 | if self.memory_counter > MEMORY_CAPACITY:
296 | sample_index = np.random.choice(MEMORY_CAPACITY, BATCH_SIZE)#From the experience playback area, randomly select batchsize samples
297 | else:
298 | sample_index = np.random.choice(self.memory_counter, BATCH_SIZE)
299 | b_memory = self.memory[sample_index, :]#s,a,r,s_
300 | #s,a,r,s_ form b memeory
301 | b_s = torch.FloatTensor(b_memory[:, :N_OBSERVATION]).to(device)#s(32*4)
302 | b_a = torch.LongTensor(b_memory[:, N_OBSERVATION:N_OBSERVATION+1].astype(int)).to(device)#a(32*1)
303 | b_r = torch.FloatTensor(b_memory[:, N_OBSERVATION+1:N_OBSERVATION+2]).to(device)#r(32*1)
304 | b_s_ = torch.FloatTensor(b_memory[:, -N_OBSERVATION:]).to(device)#s_(32*4)
305 | q_eval = self.eval_net(b_s).gather(1, b_a)
306 |
307 | ##Double DQN
308 | if DOUBLE:
309 | q_eval_next=self.eval_net(b_s_)
310 | max_action=torch.unsqueeze(torch.max(q_eval_next,1)[1],1)
311 | q_next = self.target_net.forward(b_s_).gather(1, max_action)
312 | q_target = b_r + GAMMA * q_next#Q
313 | else:
314 | q_next = self.target_net(b_s_).detach()
315 | q_target = b_r + GAMMA * q_next.max(1)[0].view(BATCH_SIZE, 1)
316 | if PRIORITIZED:
317 | abs_errors = torch.sum(torch.abs(q_target - q_eval), dim=1)#abs_errors |δ|
318 | ISWeights = torch.Tensor(ISWeights).to(device)
319 | loss = torch.mean(torch.mean(ISWeights* (q_target - q_eval)**2,dim=1))
320 | self.memory.batch_update(tree_idx, abs_errors.cpu())
321 | else:
322 | loss = self.loss_func(q_eval, q_target)
323 | self.optimizer.zero_grad()
324 | loss.backward()
325 | self.optimizer.step()
326 |
327 |
328 | return loss
329 |
330 |
331 | #Save the RL training results-------------------------------------------
332 | def Save_results():
333 | episodeVSsteps = np.vstack((episode, steps))
334 | episodeVSmean_reward = np.vstack((episode, mean_reward))
335 | episodeVStotal_reward = np.vstack((episode, total_reward))
336 | episodeVSmean_Loss = np.vstack((episode, mean_Loss))
337 | episodeVStotal_Loss = np.vstack((episode, total_Loss))
338 | episodeVSmean_collision = np.vstack((episode,mean_collision))
339 | episodeVStotal_collision = np.vstack((episode,total_collision))
340 |
341 | output_folder = r'Path planning module\Experiments for Testing Generalization Ability' ## Replace it with your folder path
342 | os.makedirs(output_folder, exist_ok=True)
343 |
344 | file1_path = os.path.join(output_folder, 'episode-steps(fusion DQN).pickle')
345 | with open(file1_path, 'wb') as file1:
346 | pickle.dump(episodeVSsteps, file1)
347 |
348 | file2_path = os.path.join(output_folder, 'episode-mean_reward(fusion DQN).pickle')
349 | with open(file2_path, 'wb') as file2:
350 | pickle.dump(episodeVSmean_reward, file2)
351 |
352 | file3_path = os.path.join(output_folder, 'episode-total_reward(fusion DQN).pickle')
353 | with open(file3_path, 'wb') as file3:
354 | pickle.dump(episodeVStotal_reward, file3)
355 |
356 | file4_path = os.path.join(output_folder, 'episode-mean_Loss(fusion DQN).pickle')
357 | with open(file4_path, 'wb') as file4:
358 | pickle.dump(episodeVSmean_Loss, file4)
359 |
360 | file5_path = os.path.join(output_folder, 'episode-total_Loss(fusion DQN).pickle')
361 | with open(file5_path, 'wb') as file5:
362 | pickle.dump(episodeVStotal_Loss, file5)
363 |
364 | file6_path = os.path.join(output_folder, 'episode-mean_collision(fusion DQN).pickle')
365 | with open(file6_path, 'wb') as file6:
366 | pickle.dump(episodeVSmean_collision, file6)
367 |
368 | file7_path = os.path.join(output_folder, 'episode-total_collision(fusion DQN).pickle')
369 | with open(file7_path, 'wb') as file7:
370 | pickle.dump(episodeVStotal_collision, file7)
371 |
372 |
373 | file1.close()
374 | file2.close()
375 | file3.close()
376 | file4.close()
377 | file5.close()
378 | file6.close()
379 | file7.close()
380 |
381 |
382 |
383 |
384 |
385 |
386 |
387 |
388 |
389 | def main():
390 | dqn = DQN()
391 | print('\nlearning...')
392 | for i_episode in range(1,MAX_EPISODES+1):
393 | s = env.reset()
394 | episode_epsilon,episode_loss,episode_step, episode_reward, episode_collision=0,0,0,0,0
395 | x=[]
396 | y=[]
397 | angle=[]
398 | starttime = time.time()
399 | while True:
400 |
401 | if RENDER == True:
402 | if LAST_RENDER:
403 | if i_episode>=MAX_EPISODES-RENDER_NUMBER+1:
404 | env.render(0,0)
405 | else:
406 | env.render(0,0)
407 | # take action
408 | a,episode_epsilon = dqn.choose_action(s, i_episode)#Epsilon-Greedy strategy
409 | s_, r, done, c = env.step(a)
410 | dqn.store_transition(s, a, r, s_)#store
411 | #record
412 | episode_reward += r
413 | episode_step += 1
414 | episode_collision += c
415 | x.append(s[0])
416 | y.append(s[1])
417 | angle.append(s[2])
418 | if dqn.memory_counter > MEMORY_CAPACITY:
419 | episode_loss += dqn.learn().item()
420 | if done or episode_step >= 200:
421 | break
422 | s = s_
423 | endtime = time.time()
424 |
425 |
426 | #------------------------------trainning process(1/3--2/3--3/3)--------------------------------------------------
427 | if i_episode == 8000 :
428 | path_br_x_3 = x[:]
429 | path_br_y_3 = y[:]
430 | path_br_angle_3 = angle[:]
431 | data_3 = {'X_3': path_br_x_3, 'Y_3': path_br_y_3, 'angle_3': path_br_angle_3}
432 | df_3 = pd.DataFrame(data_3)
433 | folder_path_3 = r'Path planning module\Experiments for Testing Generalization Ability' #replace it with your file path
434 | excel_filename_3 = 'fusionDQN_trainning_process_third_stage.xlsx'
435 | excel_path_3 = os.path.join(folder_path_3, excel_filename_3)
436 | df_3.to_excel(excel_path_3, index=False)
437 |
438 | if i_episode == 5000 :
439 | path_br_x_2 = x[:]
440 | path_br_y_2 = y[:]
441 | path_br_angle_2 = angle[:]
442 | data_2 = {'X_2': path_br_x_2, 'Y_2': path_br_y_2, 'angle_2': path_br_angle_2}
443 | df_2 = pd.DataFrame(data_2)
444 | folder_path_2 = r'Path planning module\Experiments for Testing Generalization Ability' # Replace it with your folder path
445 | excel_filename_2 = 'fusionDQN_trainning_process_second_stage.xlsx'
446 | excel_path_2 = os.path.join(folder_path_2, excel_filename_2)
447 | df_2.to_excel(excel_path_2, index=False)
448 |
449 | if i_episode == 3000 :
450 | path_br_x_1 = x[:]
451 | path_br_y_1 = y[:]
452 | path_br_angle_1 = angle[:]
453 | data_1 = {'X_1': path_br_x_1, 'Y_1': path_br_y_1, 'angle_1': path_br_angle_1}
454 | df_1 = pd.DataFrame(data_1)
455 | folder_path_1 = r'Path planning module\Experiments for Testing Generalization Ability' # Replace it with your folder path
456 | excel_filename_1 = 'fusionDQN_trainning_process_first_stage.xlsx'
457 | excel_path_1 = os.path.join(folder_path_1, excel_filename_1)
458 | df_1.to_excel(excel_path_1, index=False)
459 |
460 |
461 |
462 |
463 | print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
464 | print("episode:{0},steps:{1},m_reward:{2:3.5f},t_reward:{3:3.5f},m_loss:{4:3.5f},t_loss:{5:3.5f},epsilon:{6:3.5f},t_collision:{7:3.1f},m_collsion:{8:3.5f}".
465 | format(i_episode,episode_step,episode_reward/episode_step,episode_reward,episode_loss/episode_step,episode_loss,episode_epsilon,episode_collision,episode_collision/episode_step))
466 | dtime = endtime - starttime
467 | print("Run time of one epoch:%.8s s" % dtime)
468 | episode.append(i_episode)
469 | steps.append(episode_step)
470 | mean_reward.append(episode_reward / episode_step)
471 | total_reward.append(episode_reward)
472 | mean_Loss.append(episode_loss/episode_step)
473 | total_Loss.append(episode_loss)
474 | total_collision.append(episode_collision)
475 | mean_collision.append(episode_collision/episode_step)
476 | max_value = max(total_reward)
477 | max_index = total_reward.index(max_value)
478 | print("The rounds with the highest awards:", max_index, "The corresponding reward value:", max_value, "The corresponding step:", steps[max_index])
479 | if i_episode % 500 == 0:
480 | Save_results()
481 | #torch.save(dqn.eval_net, r'eval_net.pkl') # save the total network
482 | # os.makedirs(os.path.dirname(save_path), exist_ok=True)
483 | # torch.save(dqn.eval_net.state_dict(), save_path) # only save the parameters
484 |
485 | endtime = time.time()
486 | dtime = endtime - starttime
487 | print("Run time of the program: %.8s s" % dtime)
488 |
489 |
490 | return
491 | main()
492 |
493 |
494 |
495 |
--------------------------------------------------------------------------------