├── results_and_figures ├── baseline1_evaluation.mat ├── baseline3_evaluation.mat ├── Read me.txt ├── TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_velocity.mat ├── TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_velocity.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_velocity.mat ├── TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_10dBm_evaluation.mat ├── TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_15dBm_evaluation.mat ├── TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_20dBm_evaluation.mat ├── TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_25dBm_evaluation.mat ├── TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_10dBm_evaluation.mat ├── TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_15dBm_evaluation.mat ├── TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_20dBm_evaluation.mat ├── TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_25dBm_evaluation.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_10dBm_evaluation.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_15dBm_evaluation.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_20dBm_evaluation.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_25dBm_evaluation.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_velocity.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_velocity.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_K=3_probability.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=3_evaluation.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=4_evaluation.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=5_evaluation.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=6_evaluation.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=7_evaluation.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=8_evaluation.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=9_evaluation.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=3_evaluation.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_evaluation.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_evaluation.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=9_evaluation.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_10dBm_evaluation.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_20dBm_evaluation.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_25dBm_evaluation.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_10dBm_evaluation.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_20dBm_evaluation.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_25dBm_evaluation.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=3_evaluation.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=4_evaluation.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=5_evaluation.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=6_evaluation.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=7_evaluation.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=8_evaluation.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=9_evaluation.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=3_evaluation.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=4_evaluation.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=5_evaluation.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=6_evaluation.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=7_evaluation.mat ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=8_evaluation.mat └── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=9_evaluation.mat ├── supplementary_material ├── ASE │ ├── averaged_results.mat │ ├── Readme.txt │ └── ASE_plot.m ├── Baseline3 │ └── baseline3_evaluation.mat ├── Beam_number │ ├── baseline1_evaluation_32beam.mat │ ├── baseline1_evaluation_64beam.mat │ ├── baseline3_evaluation_32beam.mat │ ├── baseline3_evaluation_64beam.mat │ ├── baseline1_evaluation_128beam.mat │ ├── baseline1_evaluation_256beam.mat │ ├── baseline3_evaluation_128beam.mat │ ├── baseline3_evaluation_256beam.mat │ ├── TCOM_LOS_32beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_15dBm_evaluation.mat │ ├── TCOM_LOS_32beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_15dBm_evaluation.mat │ ├── TCOM_LOS_32beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_15dBm_evaluation.mat │ ├── TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_15dBm_evaluation.mat │ ├── TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_15dBm_evaluation.mat │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_15dBm_evaluation.mat │ ├── TCOM_LOS_128beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_15dBm_evaluation.mat │ ├── TCOM_LOS_128beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_15dBm_evaluation.mat │ ├── TCOM_LOS_128beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_15dBm_evaluation.mat │ ├── TCOM_LOS_128beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_evaluation.mat │ ├── TCOM_LOS_128beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_evaluation.mat │ ├── TCOM_LOS_256beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_15dBm_evaluation.mat │ ├── TCOM_LOS_256beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_15dBm_evaluation.mat │ ├── TCOM_LOS_256beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_15dBm_evaluation.mat │ ├── TCOM_LOS_256beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_evaluation.mat │ ├── TCOM_LOS_256beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_evaluation.mat │ ├── TCOM_LOS_32beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_evaluation.mat │ ├── TCOM_LOS_32beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_evaluation.mat │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_evaluation.mat │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_evaluation.mat │ ├── plot_scalability.m │ └── baseline3_beamnum.m ├── Model │ ├── training_parameter │ │ ├── learning_rate │ │ │ ├── proposed1_lr0.001.mat │ │ │ ├── proposed1_lr0.003.mat │ │ │ ├── proposed2_lr0.001.mat │ │ │ ├── proposed2_lr0.003.mat │ │ │ ├── proposed1_lr0.0001.mat │ │ │ ├── proposed1_lr0.0003.mat │ │ │ ├── proposed2_lr0.0001.mat │ │ │ ├── proposed2_lr0.0003.mat │ │ │ ├── proposed3(basic)_MPC_k=7_lr0.001.mat │ │ │ ├── proposed3(basic)_ONC_k=7_lr0.001.mat │ │ │ ├── proposed3(basic)_MPC_k=7_lr0.00003.mat │ │ │ ├── proposed3(basic)_MPC_k=7_lr0.0001.mat │ │ │ ├── proposed3(basic)_MPC_k=7_lr0.0003.mat │ │ │ ├── proposed3(basic)_ONC_k=7_lr0.00003.mat │ │ │ ├── proposed3(basic)_ONC_k=7_lr0.0001.mat │ │ │ ├── proposed3(basic)_ONC_k=7_lr0.0003.mat │ │ │ ├── proposed3(enhanced)_MPC_k=7_lr0.001.mat │ │ │ ├── proposed3(enhanced)_ONC_k=7_lr0.001.mat │ │ │ ├── proposed3(enhanced)_MPC_k=7_lr0.00003.mat │ │ │ ├── proposed3(enhanced)_MPC_k=7_lr0.0001.mat │ │ │ ├── proposed3(enhanced)_MPC_k=7_lr0.0003.mat │ │ │ ├── proposed3(enhanced)_ONC_k=7_lr0.00003.mat │ │ │ ├── proposed3(enhanced)_ONC_k=7_lr0.0001.mat │ │ │ └── proposed3(enhanced)_ONC_k=7_lr0.0003.mat │ │ └── mu │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu1.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu2.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu1.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu2.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu0.1.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu0.2.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu0.3.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu0.4.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu0.5.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu0.6.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu0.7.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu0.8.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu0.9.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu1.1.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu1.2.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu1.3.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu1.4.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu1.5.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu1.6.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu1.7.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu1.8.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu1.9.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu2.1.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu2.2.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu2.3.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu2.4.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu2.5.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu0.1.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu0.2.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu0.3.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu0.4.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu0.5.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu0.6.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu0.7.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu0.8.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu0.9.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu1.1.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu1.2.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu1.3.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu1.4.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu1.5.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu1.6.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu1.7.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu1.8.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu1.9.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu2.1.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu2.2.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu2.3.mat │ │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu2.4.mat │ │ │ └── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu2.5.mat │ ├── complexity │ │ ├── TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_testtime.mat │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_testtime.mat │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_k=7_testtime.mat │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_K=7_execution_time.mat │ │ └── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(enhanced)_k=7_testtime.mat │ └── result │ │ └── plot_model.m └── Readme.txt ├── deep_learning_model ├── baseline1 │ ├── baseline1_evaluation.mat │ └── Read me.txt ├── Read me.txt ├── basic_model │ ├── TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_velocity.mat │ ├── TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_velocity.mat │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_velocity.mat │ ├── TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_10dBm_evaluation.mat │ ├── TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_15dBm_evaluation.mat │ ├── TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_20dBm_evaluation.mat │ ├── TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_25dBm_evaluation.mat │ ├── TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_10dBm_evaluation.mat │ ├── TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_15dBm_evaluation.mat │ ├── TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_20dBm_evaluation.mat │ ├── TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_25dBm_evaluation.mat │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_10dBm_evaluation.mat │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_15dBm_evaluation.mat │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_20dBm_evaluation.mat │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_25dBm_evaluation.mat │ ├── Read me.txt │ ├── model_3Dcov_basic.py │ ├── beam_evaluation_velocity.py │ ├── beam_evaluation.py │ ├── beam_evaluation_baseline2.py │ ├── dataloader.py │ └── train_beam_basic.py ├── adaptive_model │ ├── Enhanced │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_velocity.mat │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_velocity.mat │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=3_evaluation.mat │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=4_evaluation.mat │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=5_evaluation.mat │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=6_evaluation.mat │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=7_evaluation.mat │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=8_evaluation.mat │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=9_evaluation.mat │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=3_evaluation.mat │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_evaluation.mat │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_evaluation.mat │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=9_evaluation.mat │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_10dBm_evaluation.mat │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_20dBm_evaluation.mat │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_25dBm_evaluation.mat │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_10dBm_evaluation.mat │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_20dBm_evaluation.mat │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_25dBm_evaluation.mat │ │ ├── beam_evaluation_velocity.py │ │ ├── beam_evaluation.py │ │ ├── beam_evaluation_MPC_K=3.py │ │ ├── train_beam_basic.py │ │ ├── dataloader.py │ │ └── model_3Dcov_basic.py │ ├── Basic │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=3_evaluation.mat │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=4_evaluation.mat │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=5_evaluation.mat │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=6_evaluation.mat │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=7_evaluation.mat │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=8_evaluation.mat │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=9_evaluation.mat │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=3_evaluation.mat │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=4_evaluation.mat │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=5_evaluation.mat │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=6_evaluation.mat │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=7_evaluation.mat │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=8_evaluation.mat │ │ ├── TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=9_evaluation.mat │ │ ├── beam_evaluation.py │ │ ├── dataloader.py │ │ ├── train_beam_basic.py │ │ └── model_3Dcov_basic.py │ └── Read me.txt └── evaluation_model │ ├── Read me.txt │ ├── beam_evaluation_velocity.py │ ├── beam_evaluation.py │ └── beam_evaluation_baseline2.py └── Read me.txt /results_and_figures/baseline1_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/baseline1_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/baseline3_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/baseline3_evaluation.mat -------------------------------------------------------------------------------- /supplementary_material/ASE/averaged_results.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/ASE/averaged_results.mat -------------------------------------------------------------------------------- /deep_learning_model/baseline1/baseline1_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/baseline1/baseline1_evaluation.mat -------------------------------------------------------------------------------- /supplementary_material/Baseline3/baseline3_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Baseline3/baseline3_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/Read me.txt: -------------------------------------------------------------------------------- 1 | The folder includes training results and the program for plotting figures. 2 | Note: 3 | 1. Environment: MATLAB R2019a. 4 | 2. Run figure_plot.m to obtain simulation results. -------------------------------------------------------------------------------- /supplementary_material/Beam_number/baseline1_evaluation_32beam.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Beam_number/baseline1_evaluation_32beam.mat -------------------------------------------------------------------------------- /supplementary_material/Beam_number/baseline1_evaluation_64beam.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Beam_number/baseline1_evaluation_64beam.mat -------------------------------------------------------------------------------- /supplementary_material/Beam_number/baseline3_evaluation_32beam.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Beam_number/baseline3_evaluation_32beam.mat -------------------------------------------------------------------------------- /supplementary_material/Beam_number/baseline3_evaluation_64beam.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Beam_number/baseline3_evaluation_64beam.mat -------------------------------------------------------------------------------- /supplementary_material/Beam_number/baseline1_evaluation_128beam.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Beam_number/baseline1_evaluation_128beam.mat -------------------------------------------------------------------------------- /supplementary_material/Beam_number/baseline1_evaluation_256beam.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Beam_number/baseline1_evaluation_256beam.mat -------------------------------------------------------------------------------- /supplementary_material/Beam_number/baseline3_evaluation_128beam.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Beam_number/baseline3_evaluation_128beam.mat -------------------------------------------------------------------------------- /supplementary_material/Beam_number/baseline3_evaluation_256beam.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Beam_number/baseline3_evaluation_256beam.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/learning_rate/proposed1_lr0.001.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/learning_rate/proposed1_lr0.001.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/learning_rate/proposed1_lr0.003.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/learning_rate/proposed1_lr0.003.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/learning_rate/proposed2_lr0.001.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/learning_rate/proposed2_lr0.001.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/learning_rate/proposed2_lr0.003.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/learning_rate/proposed2_lr0.003.mat -------------------------------------------------------------------------------- /deep_learning_model/Read me.txt: -------------------------------------------------------------------------------- 1 | The folder includes the source codes of baselines and our proposed schemes. 2 | Reference: K. Ma et al., Deep Learning Assisted Calibrated Beam Training for Millimeter-Wave Communication Systems, to be published in Arxiv. -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/learning_rate/proposed1_lr0.0001.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/learning_rate/proposed1_lr0.0001.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/learning_rate/proposed1_lr0.0003.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/learning_rate/proposed1_lr0.0003.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/learning_rate/proposed2_lr0.0001.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/learning_rate/proposed2_lr0.0001.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/learning_rate/proposed2_lr0.0003.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/learning_rate/proposed2_lr0.0003.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_velocity.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_velocity.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_velocity.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_velocity.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_velocity.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_velocity.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/learning_rate/proposed3(basic)_MPC_k=7_lr0.001.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/learning_rate/proposed3(basic)_MPC_k=7_lr0.001.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/learning_rate/proposed3(basic)_ONC_k=7_lr0.001.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/learning_rate/proposed3(basic)_ONC_k=7_lr0.001.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/learning_rate/proposed3(basic)_MPC_k=7_lr0.00003.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/learning_rate/proposed3(basic)_MPC_k=7_lr0.00003.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/learning_rate/proposed3(basic)_MPC_k=7_lr0.0001.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/learning_rate/proposed3(basic)_MPC_k=7_lr0.0001.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/learning_rate/proposed3(basic)_MPC_k=7_lr0.0003.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/learning_rate/proposed3(basic)_MPC_k=7_lr0.0003.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/learning_rate/proposed3(basic)_ONC_k=7_lr0.00003.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/learning_rate/proposed3(basic)_ONC_k=7_lr0.00003.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/learning_rate/proposed3(basic)_ONC_k=7_lr0.0001.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/learning_rate/proposed3(basic)_ONC_k=7_lr0.0001.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/learning_rate/proposed3(basic)_ONC_k=7_lr0.0003.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/learning_rate/proposed3(basic)_ONC_k=7_lr0.0003.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/learning_rate/proposed3(enhanced)_MPC_k=7_lr0.001.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/learning_rate/proposed3(enhanced)_MPC_k=7_lr0.001.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/learning_rate/proposed3(enhanced)_ONC_k=7_lr0.001.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/learning_rate/proposed3(enhanced)_ONC_k=7_lr0.001.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_10dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_10dBm_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_15dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_15dBm_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_20dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_20dBm_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_25dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_25dBm_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_10dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_10dBm_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_15dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_15dBm_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_20dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_20dBm_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_25dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_25dBm_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_10dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_10dBm_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_15dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_15dBm_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_20dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_20dBm_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_25dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_25dBm_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_velocity.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_velocity.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_velocity.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_velocity.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/learning_rate/proposed3(enhanced)_MPC_k=7_lr0.00003.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/learning_rate/proposed3(enhanced)_MPC_k=7_lr0.00003.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/learning_rate/proposed3(enhanced)_MPC_k=7_lr0.0001.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/learning_rate/proposed3(enhanced)_MPC_k=7_lr0.0001.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/learning_rate/proposed3(enhanced)_MPC_k=7_lr0.0003.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/learning_rate/proposed3(enhanced)_MPC_k=7_lr0.0003.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/learning_rate/proposed3(enhanced)_ONC_k=7_lr0.00003.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/learning_rate/proposed3(enhanced)_ONC_k=7_lr0.00003.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/learning_rate/proposed3(enhanced)_ONC_k=7_lr0.0001.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/learning_rate/proposed3(enhanced)_ONC_k=7_lr0.0001.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/learning_rate/proposed3(enhanced)_ONC_k=7_lr0.0003.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/learning_rate/proposed3(enhanced)_ONC_k=7_lr0.0003.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_K=3_probability.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_K=3_probability.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=3_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=3_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=4_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=4_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=5_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=5_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=6_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=6_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=7_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=7_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=8_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=8_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=9_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=9_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=3_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=3_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=9_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=9_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/basic_model/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_velocity.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/basic_model/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_velocity.mat -------------------------------------------------------------------------------- /deep_learning_model/basic_model/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_velocity.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/basic_model/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_velocity.mat -------------------------------------------------------------------------------- /deep_learning_model/basic_model/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_velocity.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/basic_model/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_velocity.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_10dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_10dBm_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_20dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_20dBm_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_25dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_25dBm_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_10dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_10dBm_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_20dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_20dBm_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_25dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_25dBm_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/baseline1/Read me.txt: -------------------------------------------------------------------------------- 1 | The folder includes the source codes of baseline 1. 2 | Note: 3 | 1. Environment: MATLAB R2019a. 4 | 2. Run baseline1.m to start simulations. 5 | 3. Reference of baseline 1: Xingyi Luo et al., Calibrated Beam Training for Millimeter-Wave Massive MIMO Systems. 6 | -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=3_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=3_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=4_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=4_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=5_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=5_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=6_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=6_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=7_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=7_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=8_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=8_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=9_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=9_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=3_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=3_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=4_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=4_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=5_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=5_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=6_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=6_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=7_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=7_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=8_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=8_evaluation.mat -------------------------------------------------------------------------------- /results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=9_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/results_and_figures/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=9_evaluation.mat -------------------------------------------------------------------------------- /supplementary_material/Model/complexity/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_testtime.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/complexity/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_testtime.mat -------------------------------------------------------------------------------- /supplementary_material/Model/complexity/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_testtime.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/complexity/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_testtime.mat -------------------------------------------------------------------------------- /deep_learning_model/basic_model/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_10dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/basic_model/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_10dBm_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/basic_model/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_15dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/basic_model/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_15dBm_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/basic_model/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_20dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/basic_model/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_20dBm_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/basic_model/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_25dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/basic_model/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_25dBm_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/basic_model/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_10dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/basic_model/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_10dBm_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/basic_model/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_15dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/basic_model/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_15dBm_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/basic_model/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_20dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/basic_model/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_20dBm_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/basic_model/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_25dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/basic_model/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_25dBm_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/basic_model/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_10dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/basic_model/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_10dBm_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/basic_model/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_15dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/basic_model/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_15dBm_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/basic_model/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_20dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/basic_model/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_20dBm_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/basic_model/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_25dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/basic_model/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_25dBm_evaluation.mat -------------------------------------------------------------------------------- /supplementary_material/Beam_number/TCOM_LOS_32beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_15dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Beam_number/TCOM_LOS_32beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_15dBm_evaluation.mat -------------------------------------------------------------------------------- /supplementary_material/Beam_number/TCOM_LOS_32beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_15dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Beam_number/TCOM_LOS_32beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_15dBm_evaluation.mat -------------------------------------------------------------------------------- /supplementary_material/Beam_number/TCOM_LOS_32beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_15dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Beam_number/TCOM_LOS_32beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_15dBm_evaluation.mat -------------------------------------------------------------------------------- /supplementary_material/Beam_number/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_15dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Beam_number/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_15dBm_evaluation.mat -------------------------------------------------------------------------------- /supplementary_material/Beam_number/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_15dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Beam_number/TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_15dBm_evaluation.mat -------------------------------------------------------------------------------- /supplementary_material/Beam_number/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_15dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Beam_number/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_15dBm_evaluation.mat -------------------------------------------------------------------------------- /supplementary_material/Beam_number/TCOM_LOS_128beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_15dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Beam_number/TCOM_LOS_128beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_15dBm_evaluation.mat -------------------------------------------------------------------------------- /supplementary_material/Beam_number/TCOM_LOS_128beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_15dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Beam_number/TCOM_LOS_128beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_15dBm_evaluation.mat -------------------------------------------------------------------------------- /supplementary_material/Beam_number/TCOM_LOS_128beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_15dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Beam_number/TCOM_LOS_128beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_15dBm_evaluation.mat -------------------------------------------------------------------------------- /supplementary_material/Beam_number/TCOM_LOS_128beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Beam_number/TCOM_LOS_128beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_evaluation.mat -------------------------------------------------------------------------------- /supplementary_material/Beam_number/TCOM_LOS_128beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Beam_number/TCOM_LOS_128beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_evaluation.mat -------------------------------------------------------------------------------- /supplementary_material/Beam_number/TCOM_LOS_256beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_15dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Beam_number/TCOM_LOS_256beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_15dBm_evaluation.mat -------------------------------------------------------------------------------- /supplementary_material/Beam_number/TCOM_LOS_256beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_15dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Beam_number/TCOM_LOS_256beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_15dBm_evaluation.mat -------------------------------------------------------------------------------- /supplementary_material/Beam_number/TCOM_LOS_256beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_15dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Beam_number/TCOM_LOS_256beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_15dBm_evaluation.mat -------------------------------------------------------------------------------- /supplementary_material/Beam_number/TCOM_LOS_256beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Beam_number/TCOM_LOS_256beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_evaluation.mat -------------------------------------------------------------------------------- /supplementary_material/Beam_number/TCOM_LOS_256beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Beam_number/TCOM_LOS_256beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_evaluation.mat -------------------------------------------------------------------------------- /supplementary_material/Beam_number/TCOM_LOS_32beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Beam_number/TCOM_LOS_32beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_evaluation.mat -------------------------------------------------------------------------------- /supplementary_material/Beam_number/TCOM_LOS_32beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Beam_number/TCOM_LOS_32beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_evaluation.mat -------------------------------------------------------------------------------- /supplementary_material/Beam_number/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Beam_number/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_evaluation.mat -------------------------------------------------------------------------------- /supplementary_material/Beam_number/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Beam_number/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_evaluation.mat -------------------------------------------------------------------------------- /supplementary_material/Model/complexity/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_k=7_testtime.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/complexity/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_k=7_testtime.mat -------------------------------------------------------------------------------- /supplementary_material/Model/complexity/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_K=7_execution_time.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/complexity/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_K=7_execution_time.mat -------------------------------------------------------------------------------- /supplementary_material/Model/complexity/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(enhanced)_k=7_testtime.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/complexity/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(enhanced)_k=7_testtime.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu1.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu2.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu2.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu1.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu2.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu2.mat -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_velocity.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_velocity.mat -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_velocity.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_velocity.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu0.1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu0.1.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu0.2.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu0.2.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu0.3.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu0.3.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu0.4.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu0.4.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu0.5.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu0.5.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu0.6.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu0.6.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu0.7.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu0.7.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu0.8.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu0.8.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu0.9.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu0.9.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu1.1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu1.1.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu1.2.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu1.2.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu1.3.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu1.3.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu1.4.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu1.4.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu1.5.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu1.5.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu1.6.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu1.6.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu1.7.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu1.7.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu1.8.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu1.8.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu1.9.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu1.9.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu2.1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu2.1.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu2.2.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu2.2.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu2.3.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu2.3.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu2.4.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu2.4.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu2.5.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu2.5.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu0.1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu0.1.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu0.2.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu0.2.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu0.3.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu0.3.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu0.4.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu0.4.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu0.5.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu0.5.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu0.6.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu0.6.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu0.7.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu0.7.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu0.8.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu0.8.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu0.9.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu0.9.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu1.1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu1.1.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu1.2.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu1.2.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu1.3.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu1.3.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu1.4.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu1.4.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu1.5.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu1.5.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu1.6.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu1.6.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu1.7.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu1.7.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu1.8.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu1.8.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu1.9.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu1.9.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu2.1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu2.1.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu2.2.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu2.2.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu2.3.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu2.3.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu2.4.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu2.4.mat -------------------------------------------------------------------------------- /supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu2.5.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/supplementary_material/Model/training_parameter/mu/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu2.5.mat -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=3_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=3_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=4_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=4_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=5_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=5_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=6_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=6_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=7_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=7_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=8_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=8_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=9_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k=9_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=3_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=3_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=9_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=9_evaluation.mat -------------------------------------------------------------------------------- /supplementary_material/ASE/Readme.txt: -------------------------------------------------------------------------------- 1 | ASE (average spectral efficiency [1]) can be rewritten as effecient spectral efficiency. 2 | [1] M. Hussain, M. Scalabrin, M. Rossi, and N. Michelusi, “Mobility and blockage-aware communications in millimeter-wave vehicular networks,” IEEE Trans. Veh. Tech., vol. 69, no. 11, pp. 13072–13086, Nov. 2020. -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Basic/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=3_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/adaptive_model/Basic/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=3_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Basic/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=4_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/adaptive_model/Basic/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=4_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Basic/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=5_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/adaptive_model/Basic/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=5_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Basic/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=6_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/adaptive_model/Basic/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=6_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Basic/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=7_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/adaptive_model/Basic/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=7_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Basic/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=8_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/adaptive_model/Basic/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=8_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Basic/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=9_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/adaptive_model/Basic/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_v1_k=9_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Basic/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=3_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/adaptive_model/Basic/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=3_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Basic/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=4_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/adaptive_model/Basic/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=4_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Basic/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=5_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/adaptive_model/Basic/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=5_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Basic/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=6_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/adaptive_model/Basic/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=6_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Basic/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=7_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/adaptive_model/Basic/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=7_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Basic/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=8_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/adaptive_model/Basic/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=8_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Basic/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=9_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/adaptive_model/Basic/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_v1_k=9_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_10dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_10dBm_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_20dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_20dBm_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_25dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_25dBm_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_10dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_10dBm_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_20dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_20dBm_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_25dBm_evaluation.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KeMa1998/DL-assisted-calibrated-beam-training/HEAD/deep_learning_model/adaptive_model/Enhanced/TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_25dBm_evaluation.mat -------------------------------------------------------------------------------- /deep_learning_model/basic_model/Read me.txt: -------------------------------------------------------------------------------- 1 | The folder consists of python files and saved models for baseline 2, proposed CNN and LSTM based schemes. 2 | Note: 3 | 1. Environment: Pytorch, Python 2.7. 4 | 2. Run train_beam_basic.py to start simulations. 5 | 3. The evaluation of baseline 2 varies from other schemes, since baseline 2 has measured partial narrow beams. 6 | 4. Reference of baseline 2: C. Qi et al., Deep Learning for Beam Training in Millimeter Wave Massive MIMO Systems. -------------------------------------------------------------------------------- /deep_learning_model/evaluation_model/Read me.txt: -------------------------------------------------------------------------------- 1 | The folder consists of python files for performance evaluations. 2 | Note: 3 | 1. Environment: Pytorch, Python 2.7. 4 | 2. The evaluation of baseline 2 varies from other schemes, since baseline 2 has measured partial narrow beams. 5 | 3. When evaluating the performance of the adaptive CBT schemes, the trained wide beam number K in model_3Dcov_basic.py should be set to be consistent with the applied model. 6 | 4. The detailed code comments can be seen in the folder basic_model. -------------------------------------------------------------------------------- /supplementary_material/Readme.txt: -------------------------------------------------------------------------------- 1 | Added simulation files in major revision of IEEE Transactions on Communications. 2 | (1) ASE: average spectral efficiency. 3 | (2) Baseline3: added baseline 3 [1]. 4 | (3) Beam_number: simulation results under different output beam numbers. 5 | (4) Model: investigation on model complexities and training parameters. 6 | [1] S. Chiu, N. Ronquillo, and T. Javidi, “Active learning and CSI acquisition for mmWave initial alignment,” IEEE J. Sel. Areas Commun., vol. 37, no. 11, pp. 2474–2489, Nov. 2019. -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Read me.txt: -------------------------------------------------------------------------------- 1 | The folder consists of python files and saved models for our proposed adaptive calibrated beam training schemes. 2 | Note: 3 | 1. Environment: Pytorch, Python 2.7. 4 | 2. Run train_beam_basic.py to start simulations. 5 | 3. The folders Basic and Enhanced correspond to the basic scheme and the enhanced scheme, respectively. 6 | 4. When evaluating the performance of the adaptive CBT schemes, the trained wide beam number K in model_3Dcov_basic.py should be set to be consistent with the applied model. 7 | 5. The detailed code comments can be seen in the folder basic_model. -------------------------------------------------------------------------------- /Read me.txt: -------------------------------------------------------------------------------- 1 | This folder includes deep learning codes and corresponding results. 2 | Due to storage restriction of Github, datasets and well-trained models can be downloaded from: 3 | 1. Tsinghua Cloud: https://cloud.tsinghua.edu.cn/d/4b29cf2c534f4481905a/ 4 | 2. Google Drive: https://drive.google.com/drive/folders/1az6C7vJZXogSu66EAEhTWqYrcCh6uMRp 5 | The folder is free to use, including dataset utilization, simulation result reproduction, model improvement, etc. 6 | For academic use, please cite the reference below: 7 | K. Ma, et al., Deep Learning Assisted Calibrated Beam Training for Millimeter-Wave Communication Systems, Arxiv: 2101.05206. -------------------------------------------------------------------------------- /deep_learning_model/basic_model/model_3Dcov_basic.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torchvision 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | 6 | class Model_3D(nn.Module): 7 | 8 | def __init__(self, N=60, K=32, Tx=8, Channel=2): 9 | super(Model_3D, self).__init__() 10 | 11 | # first dimension in CNN denotes beam training number 12 | self.bn0 = nn.BatchNorm2d(2) 13 | self.conv1 = nn.Conv2d(in_channels=2, out_channels=64, 14 | kernel_size=(1,3), stride=(1,3), padding=(0,1)) 15 | self.bn1 = nn.BatchNorm2d(64) 16 | self.conv2 = nn.Conv2d(in_channels=64, out_channels=256, 17 | kernel_size=(1,3), stride=(1,1), padding=(0,1)) 18 | self.bn2 = nn.BatchNorm2d(256) 19 | 20 | # if proposed LSTM assisted scheme 21 | #self.lstm = nn.LSTM(input_size=256, hidden_size=256, num_layers=2, dropout=0.2) 22 | self.fc = nn.Linear(256, 64) 23 | self.drop = nn.Dropout(0.3) 24 | 25 | def forward(self, x): 26 | 27 | x = self.bn0(x) 28 | 29 | x = self.conv1(x) 30 | x = self.bn1(x) 31 | x = F.relu(x) 32 | 33 | x = self.conv2(x) 34 | x = self.bn2(x) 35 | x = F.relu(x) 36 | 37 | # if 0: 38 | P_dim_size = x.shape[3] 39 | x = nn.MaxPool2d(kernel_size=(1, P_dim_size))(x) 40 | x = torch.squeeze(x) 41 | 42 | x = x.permute(2, 0, 1) 43 | 44 | # if proposed LSTM assisted scheme 45 | #y = self.lstm(x) 46 | #y = y[0] 47 | #y1 = y 48 | #y1 = self.drop(y1) 49 | #y1 = self.fc(y1) 50 | 51 | # if baseline 2 or proposed CNN assisted scheme 52 | y1 = self.drop(x) 53 | y1 = self.fc(y1) 54 | 55 | result = y1 56 | 57 | return result 58 | -------------------------------------------------------------------------------- /supplementary_material/ASE/ASE_plot.m: -------------------------------------------------------------------------------- 1 | clear all; 2 | close all; 3 | clc; 4 | 5 | %% Plot average spectral efficiency 6 | % Specific data can be found in cloud disks 7 | load('averaged_results.mat'); 8 | figure; 9 | grid on; 10 | hold on; 11 | 12 | % Baseline 1 13 | plot(10 : 10 : 100, ase1, 'r-o', 'linewidth', 1.5); 14 | 15 | % Baseline 2 16 | plot(10 : 10 : 100, ase2, 'b-*', 'linewidth', 1.5); 17 | 18 | % Baseline 3 19 | plot(10 : 10 : 100, ase3, '+-', 'color', [0 0.5 0.5], 'linewidth', 1.5); 20 | 21 | % CNN assisted 22 | plot(10 : 10 : 100, ase_CNN, 'g-v', 'linewidth', 1.5); 23 | 24 | % LSTM assisted 25 | plot(10 : 10 : 100, ase_LSTM, 'm-^', 'linewidth', 1.5); 26 | 27 | % Enhanced adaptive scheme, ONC, K = 5 28 | plot(10 : 10 : 100, ase_e_adaptive5, 'p-', 'color', [0.5 0.5 0], 'linewidth', 1.5); 29 | 30 | % Enhanced adaptive scheme, ONC, K = 7 31 | plot(10 : 10 : 100, ase_e_adaptive7, 'x-', 'color', [0.5 0 0.5], 'linewidth', 1.5); 32 | 33 | % LSTM assisted, Kn = 5 34 | plot(10 : 10 : 100, ase_LSTM_a, 'm-.^', 'linewidth', 1.5); 35 | 36 | % Enhanced adaptive scheme, ONC, K = 5, Kn = 5 37 | plot(10 : 10 : 100, ase_e_adaptive5_a, 'p-.', 'color', [0.5 0.5 0], 'linewidth', 1.5); 38 | 39 | % Enhanced adaptive scheme, ONC, K = 7, Kn = 5 40 | plot(10 : 10 : 100, ase_e_adaptive7_a, 'x-.', 'color', [0.5 0 0.5], 'linewidth', 1.5); 41 | 42 | legend('Baseline 1 [14]', 'Baseline 2 [25]', 'Baseline 3 [16]', 'CNN assisted CBT of Sec. III ($K_{\rm{n}}=0$)',... 43 | 'LSTM assisted CBT of Sec. IV ($K_{\rm{n}}=0$)', 'Enhanced adaptive CBT of Sec. V (ONC, $K=5,K_{\rm{n}}=0$)', 'Enhanced adaptive CBT of Sec. V (ONC, $K=7,K_{\rm{n}}=0$)',... 44 | 'LSTM assisted CBT of Sec. IV ($K_{\rm{n}}=5$)', 'Enhanced adaptive CBT of Sec. V (ONC, $K=5, K_{\rm{n}}=5$)', 'Enhanced adaptive CBT of Sec. V (ONC, $K=7, K_{\rm{n}}=5$)',... 45 | 'interpreter', 'latex', 'fontsize', 6); 46 | ylim([2 6]); 47 | xlabel('Beam training period $\tau$ (ms)', 'interpreter', 'latex'); 48 | ylabel('Average spectral efficiency $\overline{E}$ (bps/Hz)', 'interpreter', 'latex'); 49 | -------------------------------------------------------------------------------- /supplementary_material/Beam_number/plot_scalability.m: -------------------------------------------------------------------------------- 1 | clear all; 2 | close all; 3 | clc 4 | 5 | %plot different output narrow beam number 6 | figure; 7 | hold on; 8 | xlabel('Candidate narrow beam number $N_{\rm{Tx}}$', 'interpreter', 'latex'); 9 | ylabel('Normalized beamforming gain $G_{\rm{N}}$', 'interpreter', 'latex'); 10 | grid on; 11 | set(gca,'xscale','log'); 12 | xlim([32 256]); 13 | xticks([32 64 128 256]); 14 | set(gca,'xticklabel',[32 64 128 256]); 15 | 16 | 17 | power_ratios = []; 18 | beamnum = [32; 64; 128; 256]; 19 | for i = 1 : length(beamnum) 20 | beam = beamnum(i); 21 | file = ['baseline1_evaluation_' num2str(beam) 'beam.mat']; 22 | load(file); 23 | power_ratios = [power_ratios mean(power_ratio(6 : 10))]; 24 | end 25 | plot([32; 64; 128; 256], power_ratios, 'r-o', 'LineWidth', 1.5); 26 | 27 | power_ratio = []; 28 | beamnum = [32; 64; 128; 256]; 29 | for i = 1 : length(beamnum) 30 | beam = beamnum(i); 31 | file = ['TCOM_LOS_' num2str(beam) 'beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_15dBm_evaluation.mat']; 32 | load(file); 33 | power_ratio = [power_ratio mean(mean(BL_eval(6 : 10, 1, :)))]; 34 | end 35 | plot([32; 64; 128; 256], power_ratio, 'b-*', 'LineWidth', 1.5); 36 | 37 | power_ratios = []; 38 | beamnum = [32; 64; 128; 256]; 39 | for i = 1 : length(beamnum) 40 | beam = beamnum(i); 41 | file = ['baseline3_evaluation_' num2str(beam) 'beam.mat']; 42 | load(file); 43 | power_ratios = [power_ratios mean(power_ratio(6 : 10))]; 44 | end 45 | plot([32; 64; 128; 256], power_ratios, '+-', 'color', [0 0.5 0.5], 'LineWidth', 1.5); 46 | 47 | power_ratio = []; 48 | beamnum = [32; 64; 128; 256]; 49 | for i = 1 : length(beamnum) 50 | beam = beamnum(i); 51 | file = ['TCOM_LOS_' num2str(beam) 'beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_15dBm_evaluation.mat']; 52 | load(file); 53 | power_ratio = [power_ratio mean(mean(BL_eval(6 : 10, 1, :)))]; 54 | end 55 | plot([32; 64; 128; 256], power_ratio, 'g-v', 'LineWidth', 1.5); 56 | 57 | power_ratio = []; 58 | beamnum = [32; 64; 128; 256]; 59 | for i = 1 : length(beamnum) 60 | beam = beamnum(i); 61 | file = ['TCOM_LOS_' num2str(beam) 'beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_v1_15dBm_evaluation.mat']; 62 | load(file); 63 | power_ratio = [power_ratio mean(mean(BL_eval(6 : 10, 1, :)))]; 64 | end 65 | plot([32; 64; 128; 256], power_ratio, 'm-^', 'LineWidth', 1.5); 66 | 67 | power_ratio = []; 68 | beamnum = [32; 64; 128; 256]; 69 | for i = 1 : length(beamnum) 70 | beam = beamnum(i); 71 | file = ['TCOM_LOS_' num2str(beam) 'beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=5_evaluation']; 72 | load(file); 73 | power_ratio = [power_ratio mean(mean(BL_eval(6 : 10, 1, :)))]; 74 | end 75 | plot([32; 64; 128; 256], power_ratio, 'p-', 'color', [0.5 0.5 0], 'LineWidth', 1.5); 76 | 77 | power_ratio = []; 78 | beamnum = [32; 64; 128; 256]; 79 | for i = 1 : length(beamnum) 80 | beam = beamnum(i); 81 | file = ['TCOM_LOS_' num2str(beam) 'beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_v1_k=7_evaluation']; 82 | load(file); 83 | power_ratio = [power_ratio mean(mean(BL_eval(6 : 10, 1, :)))]; 84 | end 85 | plot([32; 64; 128; 256], power_ratio, 'x-', 'color', [0.5 0 0.5], 'LineWidth', 1.5); 86 | 87 | legend('Baseline 1 [14]', 'Baseline 2 [25]', 'Baseline 3 [16]', 'CNN assisted CBT of Sec. III',... 88 | 'LSTM assisted CBT of Sec. IV', 'Enhanced adaptive CBT of Sec. V (ONC, $K=5$)', 'Enhanced adaptive CBT of Sec. V (ONC, $K=7$)',... 89 | 'interpreter', 'latex'); 90 | ylim([0.1 0.9]); -------------------------------------------------------------------------------- /deep_learning_model/basic_model/beam_evaluation_velocity.py: -------------------------------------------------------------------------------- 1 | import torch.optim as optim 2 | import numpy as np 3 | import torch 4 | import torchvision 5 | import torch.nn as nn 6 | import torch.nn.functional as F 7 | import scipy.io as sio 8 | from dataloader import Dataloader 9 | from model_3Dcov_basic import Model_3D 10 | import sys 11 | 12 | def eval(model, loader, device): 13 | loader.reset() 14 | criterion = nn.CrossEntropyLoss() 15 | done = False 16 | P = 0 17 | N = 0 18 | #M = 0 19 | batch_size = 16 20 | BL = np.zeros((10,1)) 21 | running_loss = 0 22 | batch_num = 0 23 | rank = np.zeros((10,64)) 24 | while not done: 25 | channel1, channel2, label_m, beam_power_m, beam_power_nonoise_m, label_nonoise_m, done, count = loader.next_batch() 26 | if count==True: 27 | batch_num += 1 28 | out_tensor = model(channel1) 29 | loss = 0 30 | for loss_count in range(10): 31 | loss += criterion(torch.squeeze(out_tensor[loss_count, :, :]), label_nonoise_m[:, loss_count]) 32 | out_tensor_np = out_tensor.cpu().detach().numpy() 33 | gt_labels = label_nonoise_m.cpu().detach().numpy() 34 | gt_labels = np.float32(gt_labels) 35 | gt_labels = gt_labels.transpose(1, 0) 36 | beam_power = beam_power_nonoise_m.cpu().detach().numpy() 37 | beam_power = beam_power.transpose(1, 0, 2) 38 | out_shape = gt_labels.shape 39 | for i in range(out_shape[0]): 40 | for j in range(out_shape[1]): 41 | train_ans = np.squeeze(out_tensor_np[i, j, :]) 42 | train_index = np.argmax(train_ans) 43 | train_sorted = np.argsort(train_ans) 44 | rank_index = np.where(train_sorted == gt_labels[i, j]) 45 | rank[i, rank_index[0]] = rank[i, rank_index[0]] + 1 46 | if train_index == gt_labels[i, j]: 47 | P = P + 1 48 | else: 49 | N = N + 1 50 | BL[i, 0] = BL[i, 0] + (beam_power[i, j, train_index] / max(beam_power[i, j, :])) ** 2 51 | running_loss += loss.data.cpu() 52 | acur = float(P) / (P + N) 53 | losses = running_loss / batch_num 54 | BL = BL / batch_num / batch_size 55 | print("Accuracy: %.3f" % (acur)) 56 | print("Loss: %.3f" % (losses)) 57 | print("Beam power loss:") 58 | print(BL.T) 59 | return acur, losses, rank, BL 60 | 61 | def main(): 62 | version_name = 'v1_velocity' 63 | info = 'TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_' + version_name 64 | print(info) 65 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 66 | print(device) 67 | 68 | t = 5 69 | batch_size = 16 70 | 71 | print('batch_size:%d'%(batch_size)) 72 | 73 | acur_eval = [] 74 | loss_eval = [] 75 | BL_eval = np.zeros((10, 5, t)) 76 | rank_eval = np.zeros((10, 64, 5, t)) 77 | 78 | for tt in range(t): 79 | print('Train %d times' % (tt)) 80 | model_name = 'TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_15dBm_' + str(tt) + '_MODEL.pkl' 81 | model = torch.load(model_name) 82 | model.to(device) 83 | model.eval() 84 | count = 0 85 | 86 | # evaluate the performance under different UE velocities 87 | for v in range(10, 60, 10): 88 | eval_loader_name = '/usr/mk/TCOM/velocity_' + str(v) 89 | eval_loader = Dataloader(path=eval_loader_name, batch_size=batch_size, device=device) 90 | eval_loader.reset() 91 | acur, losses, rank, BL = eval(model, eval_loader, device) 92 | acur_eval.append(acur) 93 | loss_eval.append(losses) 94 | rank_eval[:, :, count, tt] = np.squeeze(rank) 95 | BL_eval[:, count, tt] = np.squeeze(BL) 96 | count = count + 1 97 | 98 | mat_name = info + '.mat' 99 | sio.savemat(mat_name, {'acur_eval': acur_eval, 'loss_eval': loss_eval, 'rank_eval': rank_eval, 'BL_eval': BL_eval}) 100 | 101 | if __name__ == '__main__': 102 | main() -------------------------------------------------------------------------------- /deep_learning_model/evaluation_model/beam_evaluation_velocity.py: -------------------------------------------------------------------------------- 1 | import torch.optim as optim 2 | import numpy as np 3 | import torch 4 | import torchvision 5 | import torch.nn as nn 6 | import torch.nn.functional as F 7 | import scipy.io as sio 8 | from dataloader import Dataloader 9 | from model_3Dcov_basic import Model_3D 10 | import sys 11 | 12 | def eval(model, loader, device): 13 | loader.reset() 14 | criterion = nn.CrossEntropyLoss() 15 | done = False 16 | P = 0 17 | N = 0 18 | #M = 0 19 | batch_size = 16 20 | BL = np.zeros((10,1)) 21 | running_loss = 0 22 | batch_num = 0 23 | rank = np.zeros((10,64)) 24 | while not done: 25 | channel1, channel2, label_m, beam_power_m, beam_power_nonoise_m, label_nonoise_m, done, count = loader.next_batch() 26 | if count==True: 27 | batch_num += 1 28 | out_tensor = model(channel1) 29 | loss = 0 30 | for loss_count in range(10): 31 | loss += criterion(torch.squeeze(out_tensor[loss_count, :, :]), label_nonoise_m[:, loss_count]) 32 | out_tensor_np = out_tensor.cpu().detach().numpy() 33 | gt_labels = label_nonoise_m.cpu().detach().numpy() 34 | gt_labels = np.float32(gt_labels) 35 | gt_labels = gt_labels.transpose(1, 0) 36 | beam_power = beam_power_nonoise_m.cpu().detach().numpy() 37 | beam_power = beam_power.transpose(1, 0, 2) 38 | out_shape = gt_labels.shape 39 | for i in range(out_shape[0]): 40 | for j in range(out_shape[1]): 41 | train_ans = np.squeeze(out_tensor_np[i, j, :]) 42 | train_index = np.argmax(train_ans) 43 | train_sorted = np.argsort(train_ans) 44 | rank_index = np.where(train_sorted == gt_labels[i, j]) 45 | rank[i, rank_index[0]] = rank[i, rank_index[0]] + 1 46 | if train_index == gt_labels[i, j]: 47 | P = P + 1 48 | else: 49 | N = N + 1 50 | BL[i, 0] = BL[i, 0] + (beam_power[i, j, train_index] / max(beam_power[i, j, :])) ** 2 51 | running_loss += loss.data.cpu() 52 | acur = float(P) / (P + N) 53 | losses = running_loss / batch_num 54 | BL = BL / batch_num / batch_size 55 | print("Accuracy: %.3f" % (acur)) 56 | print("Loss: %.3f" % (losses)) 57 | print("Beam power loss:") 58 | print(BL.T) 59 | return acur, losses, rank, BL 60 | 61 | def main(): 62 | version_name = 'v1_velocity' 63 | info = 'TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_' + version_name 64 | print(info) 65 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 66 | print(device) 67 | 68 | t = 5 69 | batch_size = 16 70 | 71 | print('batch_size:%d'%(batch_size)) 72 | 73 | acur_eval = [] 74 | loss_eval = [] 75 | BL_eval = np.zeros((10, 5, t)) 76 | rank_eval = np.zeros((10, 64, 5, t)) 77 | 78 | for tt in range(t): 79 | print('Train %d times' % (tt)) 80 | model_name = 'TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_15dBm_' + str(tt) + '_MODEL.pkl' 81 | model = torch.load(model_name) 82 | model.to(device) 83 | model.eval() 84 | count = 0 85 | 86 | # evaluate the performance under different UE velocities 87 | for v in range(10, 60, 10): 88 | eval_loader_name = '/usr/mk/TCOM/velocity_' + str(v) 89 | eval_loader = Dataloader(path=eval_loader_name, batch_size=batch_size, device=device) 90 | eval_loader.reset() 91 | acur, losses, rank, BL = eval(model, eval_loader, device) 92 | acur_eval.append(acur) 93 | loss_eval.append(losses) 94 | rank_eval[:, :, count, tt] = np.squeeze(rank) 95 | BL_eval[:, count, tt] = np.squeeze(BL) 96 | count = count + 1 97 | 98 | mat_name = info + '.mat' 99 | sio.savemat(mat_name, {'acur_eval': acur_eval, 'loss_eval': loss_eval, 'rank_eval': rank_eval, 'BL_eval': BL_eval}) 100 | 101 | if __name__ == '__main__': 102 | main() -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Enhanced/beam_evaluation_velocity.py: -------------------------------------------------------------------------------- 1 | import torch.optim as optim 2 | import numpy as np 3 | import torch 4 | import torchvision 5 | import torch.nn as nn 6 | import torch.nn.functional as F 7 | import scipy.io as sio 8 | from dataloader import Dataloader 9 | from model_3Dcov_basic import Model_3D 10 | import sys 11 | 12 | def eval(model, loader, device): 13 | loader.reset() 14 | criterion = nn.CrossEntropyLoss() 15 | done = False 16 | P = 0 17 | N = 0 18 | #M = 0 19 | batch_size = 16 20 | BL = np.zeros((10,1)) 21 | running_loss = 0 22 | batch_num = 0 23 | rank = np.zeros((10,64)) 24 | while not done: 25 | channel1, channel2, label_m, beam_power_m, beam_power_nonoise_m, label_nonoise_m, label_widebeam_m, done, count = loader.next_batch() 26 | if count==True: 27 | batch_num += 1 28 | out_tensor, out_tensor2 = model(channel2, device) 29 | loss = 0 30 | for loss_count in range(10): 31 | loss += criterion(torch.squeeze(out_tensor[loss_count, :, :]), label_nonoise_m[:, loss_count]) 32 | out_tensor_np = out_tensor.cpu().detach().numpy() 33 | gt_labels = label_nonoise_m.cpu().detach().numpy() 34 | gt_labels = np.float32(gt_labels) 35 | gt_labels = gt_labels.transpose(1, 0) 36 | beam_power = beam_power_nonoise_m.cpu().detach().numpy() 37 | beam_power = beam_power.transpose(1, 0, 2) 38 | out_shape = gt_labels.shape 39 | for i in range(out_shape[0]): 40 | for j in range(out_shape[1]): 41 | train_ans = np.squeeze(out_tensor_np[i, j, :]) 42 | train_index = np.argmax(train_ans) 43 | train_sorted = np.argsort(train_ans) 44 | rank_index = np.where(train_sorted == gt_labels[i, j]) 45 | rank[i, rank_index[0]] = rank[i, rank_index[0]] + 1 46 | if train_index == gt_labels[i, j]: 47 | P = P + 1 48 | else: 49 | N = N + 1 50 | BL[i, 0] = BL[i, 0] + (beam_power[i, j, train_index] / max(beam_power[i, j, :])) ** 2 51 | running_loss += loss.data.cpu() 52 | acur = float(P) / (P + N) 53 | losses = running_loss / batch_num 54 | BL = BL / batch_num / batch_size 55 | print("Accuracy: %.3f" % (acur)) 56 | print("Loss: %.3f" % (losses)) 57 | print("Beam power loss:") 58 | print(BL.T) 59 | return acur, losses, rank, BL 60 | 61 | def main(): 62 | version_name = 'v1_k=7_velocity' 63 | info = 'TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_' + version_name 64 | print(info) 65 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 66 | print(device) 67 | 68 | t = 5 69 | batch_size = 16 70 | 71 | print('batch_size:%d'%(batch_size)) 72 | 73 | acur_eval = [] 74 | loss_eval = [] 75 | BL_eval = np.zeros((10, 5, t)) 76 | rank_eval = np.zeros((10, 64, 5, t)) 77 | 78 | for tt in range(t): 79 | print('Train %d times' % (tt)) 80 | model_name = 'TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_k=7_' + str(tt) + '_MODEL.pkl' 81 | model = torch.load(model_name) 82 | model.to(device) 83 | model.eval() 84 | count = 0 85 | 86 | # evaluate the performance under different UE velocities 87 | for v in range(10, 60, 10): 88 | eval_loader_name = '/usr/mk/TCOM/dataset/velocity_' + str(v) 89 | eval_loader = Dataloader(path=eval_loader_name, batch_size=batch_size, device=device) 90 | eval_loader.reset() 91 | acur, losses, rank, BL = eval(model, eval_loader, device) 92 | acur_eval.append(acur) 93 | loss_eval.append(losses) 94 | rank_eval[:, :, count, tt] = np.squeeze(rank) 95 | BL_eval[:, count, tt] = np.squeeze(BL) 96 | count = count + 1 97 | 98 | mat_name = info + '.mat' 99 | sio.savemat(mat_name, {'acur_eval': acur_eval, 'loss_eval': loss_eval, 'rank_eval': rank_eval, 'BL_eval': BL_eval}) 100 | 101 | if __name__ == '__main__': 102 | main() -------------------------------------------------------------------------------- /deep_learning_model/basic_model/beam_evaluation.py: -------------------------------------------------------------------------------- 1 | import torch.optim as optim 2 | import numpy as np 3 | import torch 4 | import torchvision 5 | import torch.nn as nn 6 | import torch.nn.functional as F 7 | import scipy.io as sio 8 | from dataloader import Dataloader 9 | from model_3Dcov_basic import Model_3D 10 | import sys 11 | 12 | def eval(model, loader, device): 13 | loader.reset() 14 | criterion = nn.CrossEntropyLoss() 15 | done = False 16 | P = 0 17 | N = 0 18 | #M = 0 19 | batch_size = 16 20 | # save the performance of 5 additional narrow beam trainings 21 | BL = np.zeros((10,5)) 22 | running_loss = 0 23 | batch_num = 0 24 | rank = np.zeros((10,64)) 25 | # cumulative probability function 26 | pdf = np.zeros((10, 101)) 27 | while not done: 28 | channel1, channel2, label_m, beam_power_m, beam_power_nonoise_m, label_nonoise_m, done, count = loader.next_batch() 29 | if count==True: 30 | batch_num += 1 31 | out_tensor = model(channel2) 32 | loss = 0 33 | for loss_count in range(10): 34 | loss += criterion(torch.squeeze(out_tensor[loss_count, :, :]), label_nonoise_m[:, loss_count]) 35 | out_tensor_np = out_tensor.cpu().detach().numpy() 36 | gt_labels = label_nonoise_m.cpu().detach().numpy() 37 | gt_labels = np.float32(gt_labels) 38 | gt_labels = gt_labels.transpose(1, 0) 39 | beam_power = beam_power_nonoise_m.cpu().detach().numpy() 40 | beam_power = beam_power.transpose(1, 0, 2) 41 | out_shape = gt_labels.shape 42 | for i in range(out_shape[0]): 43 | for j in range(out_shape[1]): 44 | train_ans = np.squeeze(out_tensor_np[i, j, :]) 45 | train_index = np.argmax(train_ans) 46 | train_sorted = np.argsort(train_ans) 47 | rank_index = np.where(train_sorted == gt_labels[i, j]) 48 | rank[i, rank_index[0]] = rank[i, rank_index[0]] + 1 49 | if train_index == gt_labels[i, j]: 50 | P = P + 1 51 | else: 52 | N = N + 1 53 | # perform 5 additional narrow beam trainings 54 | BL[i, 0] = BL[i, 0] + (beam_power[i, j, train_index] / max(beam_power[i, j, :])) ** 2 55 | BL[i, 1] = BL[i, 1] + (max(beam_power[i, j, train_sorted[62 : 64]]) / max(beam_power[i, j, :])) ** 2 56 | BL[i, 2] = BL[i, 2] + (max(beam_power[i, j, train_sorted[61 : 64]]) / max(beam_power[i, j, :])) ** 2 57 | BL[i, 3] = BL[i, 3] + (max(beam_power[i, j, train_sorted[60 : 64]]) / max(beam_power[i, j, :])) ** 2 58 | BL[i, 4] = BL[i, 4] + (max(beam_power[i, j, train_sorted[59 : 64]]) / max(beam_power[i, j, :])) ** 2 59 | # calculate CDF 60 | pdf_index = np.floor(((beam_power[i, j, train_index] / max(beam_power[i, j, :])) ** 2) * 100) 61 | pdf_index = pdf_index.astype(int) 62 | pdf[i, pdf_index : 101] = pdf[i, pdf_index : 101] + 1 63 | running_loss += loss.data.cpu() 64 | acur = float(P) / (P + N) 65 | losses = running_loss / batch_num 66 | BL = BL / batch_num / batch_size 67 | print("Accuracy: %.3f" % (acur)) 68 | print("Loss: %.3f" % (losses)) 69 | print("Beam power loss:") 70 | print(BL.T) 71 | #print(pdf) 72 | return acur, losses, rank, BL, pdf 73 | 74 | def main(): 75 | version_name = 'v1_25dBm_evaluation' 76 | info = 'TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_' + version_name 77 | print(info) 78 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 79 | #device = 'cpu' 80 | print(device) 81 | 82 | t = 5 83 | batch_size = 16 84 | 85 | print('batch_size:%d'%(batch_size)) 86 | 87 | acur_eval = [] 88 | loss_eval = [] 89 | BL_eval = np.zeros((10, 5, t)) 90 | rank_eval = np.zeros((10, 64, t)) 91 | pdf_eval = np.zeros((10, 101, t)) 92 | 93 | for tt in range(t): 94 | print('Train %d times' % (tt)) 95 | model_name = 'TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_25dBm_' + str(tt) + '_MODEL.pkl' 96 | model = torch.load(model_name) 97 | model.to(device) 98 | model.eval() 99 | 100 | eval_loader_name = '/usr/mk/TCOM/dataset/testing_25dBm' 101 | eval_loader = Dataloader(path=eval_loader_name, batch_size=batch_size, device=device) 102 | eval_loader.reset() 103 | acur, losses, rank, BL, pdf = eval(model, eval_loader, device) 104 | acur_eval.append(acur) 105 | loss_eval.append(losses) 106 | rank_eval[:, :, tt] = np.squeeze(rank) 107 | BL_eval[:, :, tt] = np.squeeze(BL) 108 | pdf_eval[:, :, tt] = np.squeeze(pdf) 109 | 110 | mat_name = info + '.mat' 111 | sio.savemat(mat_name, {'acur_eval': acur_eval, 'loss_eval': loss_eval, 'rank_eval': rank_eval, 'BL_eval': BL_eval, 'pdf_eval': pdf_eval}) 112 | 113 | if __name__ == '__main__': 114 | main() -------------------------------------------------------------------------------- /deep_learning_model/evaluation_model/beam_evaluation.py: -------------------------------------------------------------------------------- 1 | import torch.optim as optim 2 | import numpy as np 3 | import torch 4 | import torchvision 5 | import torch.nn as nn 6 | import torch.nn.functional as F 7 | import scipy.io as sio 8 | from dataloader import Dataloader 9 | from model_3Dcov_basic import Model_3D 10 | import sys 11 | 12 | def eval(model, loader, device): 13 | loader.reset() 14 | criterion = nn.CrossEntropyLoss() 15 | done = False 16 | P = 0 17 | N = 0 18 | #M = 0 19 | batch_size = 16 20 | # save the performance of 5 additional narrow beam trainings 21 | BL = np.zeros((10,5)) 22 | running_loss = 0 23 | batch_num = 0 24 | rank = np.zeros((10,64)) 25 | # cumulative probability function 26 | pdf = np.zeros((10, 101)) 27 | while not done: 28 | channel1, channel2, label_m, beam_power_m, beam_power_nonoise_m, label_nonoise_m, done, count = loader.next_batch() 29 | if count==True: 30 | batch_num += 1 31 | out_tensor = model(channel2) 32 | loss = 0 33 | for loss_count in range(10): 34 | loss += criterion(torch.squeeze(out_tensor[loss_count, :, :]), label_nonoise_m[:, loss_count]) 35 | out_tensor_np = out_tensor.cpu().detach().numpy() 36 | gt_labels = label_nonoise_m.cpu().detach().numpy() 37 | gt_labels = np.float32(gt_labels) 38 | gt_labels = gt_labels.transpose(1, 0) 39 | beam_power = beam_power_nonoise_m.cpu().detach().numpy() 40 | beam_power = beam_power.transpose(1, 0, 2) 41 | out_shape = gt_labels.shape 42 | for i in range(out_shape[0]): 43 | for j in range(out_shape[1]): 44 | train_ans = np.squeeze(out_tensor_np[i, j, :]) 45 | train_index = np.argmax(train_ans) 46 | train_sorted = np.argsort(train_ans) 47 | rank_index = np.where(train_sorted == gt_labels[i, j]) 48 | rank[i, rank_index[0]] = rank[i, rank_index[0]] + 1 49 | if train_index == gt_labels[i, j]: 50 | P = P + 1 51 | else: 52 | N = N + 1 53 | # perform 5 additional narrow beam trainings 54 | BL[i, 0] = BL[i, 0] + (beam_power[i, j, train_index] / max(beam_power[i, j, :])) ** 2 55 | BL[i, 1] = BL[i, 1] + (max(beam_power[i, j, train_sorted[62 : 64]]) / max(beam_power[i, j, :])) ** 2 56 | BL[i, 2] = BL[i, 2] + (max(beam_power[i, j, train_sorted[61 : 64]]) / max(beam_power[i, j, :])) ** 2 57 | BL[i, 3] = BL[i, 3] + (max(beam_power[i, j, train_sorted[60 : 64]]) / max(beam_power[i, j, :])) ** 2 58 | BL[i, 4] = BL[i, 4] + (max(beam_power[i, j, train_sorted[59 : 64]]) / max(beam_power[i, j, :])) ** 2 59 | # calculate CDF 60 | pdf_index = np.floor(((beam_power[i, j, train_index] / max(beam_power[i, j, :])) ** 2) * 100) 61 | pdf_index = pdf_index.astype(int) 62 | pdf[i, pdf_index : 101] = pdf[i, pdf_index : 101] + 1 63 | running_loss += loss.data.cpu() 64 | acur = float(P) / (P + N) 65 | losses = running_loss / batch_num 66 | BL = BL / batch_num / batch_size 67 | print("Accuracy: %.3f" % (acur)) 68 | print("Loss: %.3f" % (losses)) 69 | print("Beam power loss:") 70 | print(BL.T) 71 | #print(pdf) 72 | return acur, losses, rank, BL, pdf 73 | 74 | def main(): 75 | version_name = 'v1_25dBm_evaluation' 76 | info = 'TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_' + version_name 77 | print(info) 78 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 79 | #device = 'cpu' 80 | print(device) 81 | 82 | t = 5 83 | batch_size = 16 84 | 85 | print('batch_size:%d'%(batch_size)) 86 | 87 | acur_eval = [] 88 | loss_eval = [] 89 | BL_eval = np.zeros((10, 5, t)) 90 | rank_eval = np.zeros((10, 64, t)) 91 | pdf_eval = np.zeros((10, 101, t)) 92 | 93 | for tt in range(t): 94 | print('Train %d times' % (tt)) 95 | model_name = 'TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_v1_25dBm_' + str(tt) + '_MODEL.pkl' 96 | model = torch.load(model_name) 97 | model.to(device) 98 | model.eval() 99 | 100 | eval_loader_name = '/usr/mk/TCOM/dataset/testing_25dBm' 101 | eval_loader = Dataloader(path=eval_loader_name, batch_size=batch_size, device=device) 102 | eval_loader.reset() 103 | acur, losses, rank, BL, pdf = eval(model, eval_loader, device) 104 | acur_eval.append(acur) 105 | loss_eval.append(losses) 106 | rank_eval[:, :, tt] = np.squeeze(rank) 107 | BL_eval[:, :, tt] = np.squeeze(BL) 108 | pdf_eval[:, :, tt] = np.squeeze(pdf) 109 | 110 | mat_name = info + '.mat' 111 | sio.savemat(mat_name, {'acur_eval': acur_eval, 'loss_eval': loss_eval, 'rank_eval': rank_eval, 'BL_eval': BL_eval, 'pdf_eval': pdf_eval}) 112 | 113 | if __name__ == '__main__': 114 | main() -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Basic/beam_evaluation.py: -------------------------------------------------------------------------------- 1 | import torch.optim as optim 2 | import numpy as np 3 | import torch 4 | import torchvision 5 | import torch.nn as nn 6 | import torch.nn.functional as F 7 | import scipy.io as sio 8 | from dataloader import Dataloader 9 | from model_3Dcov_basic import Model_3D 10 | import sys 11 | 12 | def eval(model, loader, device): 13 | loader.reset() 14 | criterion = nn.CrossEntropyLoss() 15 | done = False 16 | P = 0 17 | N = 0 18 | M = 0 19 | batch_size = 16 20 | # save the performance of 5 additional narrow beam trainings 21 | BL = np.zeros((10,5)) 22 | running_loss = 0 23 | batch_num = 0 24 | rank = np.zeros((10,64)) 25 | # cumulative probability function 26 | pdf = np.zeros((10, 101)) 27 | while not done: 28 | channel1, channel2, label_m, beam_power_m, beam_power_nonoise_m, label_nonoise_m, done, count = loader.next_batch() 29 | if count==True: 30 | batch_num += 1 31 | out_tensor = model(channel2, device) 32 | loss = 0 33 | for loss_count in range(10): 34 | loss += criterion(torch.squeeze(out_tensor[loss_count, :, :]), label_nonoise_m[:, loss_count]) 35 | out_tensor_np = out_tensor.cpu().detach().numpy() 36 | gt_labels = label_nonoise_m.cpu().detach().numpy() 37 | gt_labels = np.float32(gt_labels) 38 | gt_labels = gt_labels.transpose(1, 0) 39 | beam_power = beam_power_nonoise_m.cpu().detach().numpy() 40 | beam_power = beam_power.transpose(1, 0, 2) 41 | out_shape = gt_labels.shape 42 | for i in range(out_shape[0]): 43 | for j in range(out_shape[1]): 44 | train_ans = np.squeeze(out_tensor_np[i, j, :]) 45 | train_index = np.argmax(train_ans) 46 | train_sorted = np.argsort(train_ans) 47 | rank_index = np.where(train_sorted == gt_labels[i, j]) 48 | rank[i, rank_index[0]] = rank[i, rank_index[0]] + 1 49 | if train_index == gt_labels[i, j]: 50 | P = P + 1 51 | else: 52 | N = N + 1 53 | # perform 5 additional narrow beam trainings 54 | BL[i, 0] = BL[i, 0] + (beam_power[i, j, train_index] / max(beam_power[i, j, :])) ** 2 55 | BL[i, 1] = BL[i, 1] + (max(beam_power[i, j, train_sorted[62 : 64]]) / max(beam_power[i, j, :])) ** 2 56 | BL[i, 2] = BL[i, 2] + (max(beam_power[i, j, train_sorted[61 : 64]]) / max(beam_power[i, j, :])) ** 2 57 | BL[i, 3] = BL[i, 3] + (max(beam_power[i, j, train_sorted[60 : 64]]) / max(beam_power[i, j, :])) ** 2 58 | BL[i, 4] = BL[i, 4] + (max(beam_power[i, j, train_sorted[59 : 64]]) / max(beam_power[i, j, :])) ** 2 59 | # calculate CDF 60 | pdf_index = np.floor(((beam_power[i, j, train_index] / max(beam_power[i, j, :])) ** 2) * 100) 61 | pdf_index = pdf_index.astype(int) 62 | pdf[i, pdf_index : 101] = pdf[i, pdf_index : 101] + 1 63 | running_loss += loss.data.cpu() 64 | acur = float(P) / (P + N) 65 | losses = running_loss / batch_num 66 | BL = BL / batch_num / batch_size 67 | print("Accuracy: %.3f" % (acur)) 68 | print("Loss: %.3f" % (losses)) 69 | print("Beam power loss:") 70 | print(BL.T) 71 | #print(pdf) 72 | return acur, losses, rank, BL, pdf 73 | 74 | def main(): 75 | version_name = 'v1_k=7_evaluation' 76 | info = 'TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_' + version_name 77 | print(info) 78 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 79 | #device = 'cpu' 80 | print(device) 81 | 82 | t = 5 83 | batch_size = 16 84 | 85 | print('batch_size:%d'%(batch_size)) 86 | 87 | acur_eval = [] 88 | loss_eval = [] 89 | BL_eval = np.zeros((10, 5, t)) 90 | rank_eval = np.zeros((10, 64, t)) 91 | pdf_eval = np.zeros((10, 101, t)) 92 | 93 | for tt in range(t): 94 | print('Train %d times' % (tt)) 95 | model_name = 'TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_MPC_k=7_' + str(tt) + '_MODEL.pkl' 96 | model = torch.load(model_name) 97 | model.to(device) 98 | model.eval() 99 | 100 | eval_loader_name = '/usr/mk/TCOM/dataset/testing_15dBm' 101 | eval_loader = Dataloader(path=eval_loader_name, batch_size=batch_size, device=device) 102 | eval_loader.reset() 103 | acur, losses, rank, BL, pdf = eval(model, eval_loader, device) 104 | acur_eval.append(acur) 105 | loss_eval.append(losses) 106 | rank_eval[:, :, tt] = np.squeeze(rank) 107 | BL_eval[:, :, tt] = np.squeeze(BL) 108 | pdf_eval[:, :, tt] = np.squeeze(pdf) 109 | 110 | mat_name = info + '.mat' 111 | sio.savemat(mat_name, {'acur_eval': acur_eval, 'loss_eval': loss_eval, 'rank_eval': rank_eval, 'BL_eval': BL_eval, 'pdf_eval': pdf_eval}) 112 | 113 | if __name__ == '__main__': 114 | main() -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Enhanced/beam_evaluation.py: -------------------------------------------------------------------------------- 1 | import torch.optim as optim 2 | import numpy as np 3 | import torch 4 | import torchvision 5 | import torch.nn as nn 6 | import torch.nn.functional as F 7 | import scipy.io as sio 8 | from dataloader import Dataloader 9 | from model_3Dcov_basic import Model_3D 10 | import sys 11 | 12 | def eval(model, loader, device): 13 | loader.reset() 14 | criterion = nn.CrossEntropyLoss() 15 | done = False 16 | P = 0 17 | N = 0 18 | M = 0 19 | batch_size = 16 20 | # save the performance of 5 additional narrow beam trainings 21 | BL = np.zeros((10,5)) 22 | running_loss = 0 23 | batch_num = 0 24 | rank = np.zeros((10,64)) 25 | # cumulative probability function 26 | pdf = np.zeros((10, 101)) 27 | while not done: 28 | channel1, channel2, label_m, beam_power_m, beam_power_nonoise_m, label_nonoise_m, label_widebeam_m, done, count = loader.next_batch() 29 | if count==True: 30 | batch_num += 1 31 | out_tensor, out_tensor2 = model(channel2, device) 32 | loss = 0 33 | for loss_count in range(10): 34 | loss += criterion(torch.squeeze(out_tensor[loss_count, :, :]), label_nonoise_m[:, loss_count]) 35 | out_tensor_np = out_tensor.cpu().detach().numpy() 36 | gt_labels = label_nonoise_m.cpu().detach().numpy() 37 | gt_labels = np.float32(gt_labels) 38 | gt_labels = gt_labels.transpose(1, 0) 39 | beam_power = beam_power_nonoise_m.cpu().detach().numpy() 40 | beam_power = beam_power.transpose(1, 0, 2) 41 | out_shape = gt_labels.shape 42 | for i in range(out_shape[0]): 43 | for j in range(out_shape[1]): 44 | train_ans = np.squeeze(out_tensor_np[i, j, :]) 45 | train_index = np.argmax(train_ans) 46 | train_sorted = np.argsort(train_ans) 47 | rank_index = np.where(train_sorted == gt_labels[i, j]) 48 | rank[i, rank_index[0]] = rank[i, rank_index[0]] + 1 49 | if train_index == gt_labels[i, j]: 50 | P = P + 1 51 | else: 52 | N = N + 1 53 | # perform 5 additional narrow beam trainings 54 | BL[i, 0] = BL[i, 0] + (beam_power[i, j, train_index] / max(beam_power[i, j, :])) ** 2 55 | BL[i, 1] = BL[i, 1] + (max(beam_power[i, j, train_sorted[62 : 64]]) / max(beam_power[i, j, :])) ** 2 56 | BL[i, 2] = BL[i, 2] + (max(beam_power[i, j, train_sorted[61 : 64]]) / max(beam_power[i, j, :])) ** 2 57 | BL[i, 3] = BL[i, 3] + (max(beam_power[i, j, train_sorted[60 : 64]]) / max(beam_power[i, j, :])) ** 2 58 | BL[i, 4] = BL[i, 4] + (max(beam_power[i, j, train_sorted[59 : 64]]) / max(beam_power[i, j, :])) ** 2 59 | # calculate CDF 60 | pdf_index = np.floor(((beam_power[i, j, train_index] / max(beam_power[i, j, :])) ** 2) * 100) 61 | pdf_index = pdf_index.astype(int) 62 | pdf[i, pdf_index : 101] = pdf[i, pdf_index : 101] + 1 63 | running_loss += loss.data.cpu() 64 | acur = float(P) / (P + N) 65 | losses = running_loss / batch_num 66 | BL = BL / batch_num / batch_size 67 | print("Accuracy: %.3f" % (acur)) 68 | print("Loss: %.3f" % (losses)) 69 | print("Beam power loss:") 70 | print(BL.T) 71 | #print(pdf) 72 | return acur, losses, rank, BL, pdf 73 | 74 | def main(): 75 | version_name = 'v1_k=7_evaluation' 76 | info = 'TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_' + version_name 77 | print(info) 78 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 79 | #device = 'cpu' 80 | print(device) 81 | 82 | t = 5 83 | batch_size = 16 84 | 85 | print('batch_size:%d'%(batch_size)) 86 | 87 | acur_eval = [] 88 | loss_eval = [] 89 | BL_eval = np.zeros((10, 5, t)) 90 | rank_eval = np.zeros((10, 64, t)) 91 | pdf_eval = np.zeros((10, 101, t)) 92 | 93 | for tt in range(t): 94 | print('Train %d times' % (tt)) 95 | model_name = 'TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_k=7_' + str(tt) + '_MODEL.pkl' 96 | model = torch.load(model_name) 97 | model.to(device) 98 | model.eval() 99 | 100 | eval_loader_name = '/usr/mk/TCOM/dataset/testing_15dBm' 101 | eval_loader = Dataloader(path=eval_loader_name, batch_size=batch_size, device=device) 102 | eval_loader.reset() 103 | acur, losses, rank, BL, pdf = eval(model, eval_loader, device) 104 | acur_eval.append(acur) 105 | loss_eval.append(losses) 106 | rank_eval[:, :, tt] = np.squeeze(rank) 107 | BL_eval[:, :, tt] = np.squeeze(BL) 108 | pdf_eval[:, :, tt] = np.squeeze(pdf) 109 | 110 | mat_name = info + '.mat' 111 | sio.savemat(mat_name, {'acur_eval': acur_eval, 'loss_eval': loss_eval, 'rank_eval': rank_eval, 'BL_eval': BL_eval, 'pdf_eval': pdf_eval}) 112 | 113 | if __name__ == '__main__': 114 | main() -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Enhanced/beam_evaluation_MPC_K=3.py: -------------------------------------------------------------------------------- 1 | import torch.optim as optim 2 | import numpy as np 3 | import torch 4 | import torchvision 5 | import torch.nn as nn 6 | import torch.nn.functional as F 7 | import scipy.io as sio 8 | from dataloader import Dataloader 9 | from model_3Dcov_basic import Model_3D 10 | import sys 11 | 12 | def eval(model, loader, device): 13 | loader.reset() 14 | criterion = nn.CrossEntropyLoss() 15 | done = False 16 | P = 0 17 | N = 0 18 | #M = 0 19 | BL = np.zeros((10,5)) 20 | running_loss = 0 21 | batch_num = 0 22 | rank = np.zeros((10,64)) 23 | pdf = np.zeros((10, 101)) 24 | batch_size = 16 25 | # save original predicted probabilities 26 | p = np.zeros((4096, 16)) 27 | while not done: 28 | channel1, channel2, label_m, beam_power_m, beam_power_nonoise_m, label_nonoise_m, label_widebeam_m, done, count = loader.next_batch() 29 | if count==True: 30 | batch_num += 1 31 | out_tensor, out_tensor2 = model(channel2, device) 32 | loss = 0 33 | for loss_count in range(10): 34 | loss += criterion(torch.squeeze(out_tensor[loss_count, :, :]), label_nonoise_m[:, loss_count]) 35 | out_tensor_np = out_tensor.cpu().detach().numpy() 36 | out_tensor2_np = out_tensor2.cpu().detach().numpy() 37 | gt_labels = label_nonoise_m.cpu().detach().numpy() 38 | gt_labels = np.float32(gt_labels) 39 | gt_labels = gt_labels.transpose(1, 0) 40 | beam_power = beam_power_nonoise_m.cpu().detach().numpy() 41 | beam_power = beam_power.transpose(1, 0, 2) 42 | out_shape = gt_labels.shape 43 | 44 | # save predicted probabilities 45 | p[(batch_num - 1) * batch_size: batch_num * batch_size, :] = np.squeeze(out_tensor2_np[0, :, :]) 46 | 47 | for i in range(out_shape[0]): 48 | for j in range(out_shape[1]): 49 | train_ans = np.squeeze(out_tensor_np[i, j, :]) 50 | train_index = np.argmax(train_ans) 51 | train_sorted = np.argsort(train_ans) 52 | rank_index = np.where(train_sorted == gt_labels[i, j]) 53 | rank[i, rank_index[0]] = rank[i, rank_index[0]] + 1 54 | if train_index == gt_labels[i, j]: 55 | P = P + 1 56 | else: 57 | N = N + 1 58 | 59 | BL[i, 0] = BL[i, 0] + (beam_power[i, j, train_index] / max(beam_power[i, j, :])) ** 2 60 | BL[i, 1] = BL[i, 1] + (max(beam_power[i, j, train_sorted[62 : 64]]) / max(beam_power[i, j, :])) ** 2 61 | BL[i, 2] = BL[i, 2] + (max(beam_power[i, j, train_sorted[61 : 64]]) / max(beam_power[i, j, :])) ** 2 62 | BL[i, 3] = BL[i, 3] + (max(beam_power[i, j, train_sorted[60 : 64]]) / max(beam_power[i, j, :])) ** 2 63 | BL[i, 4] = BL[i, 4] + (max(beam_power[i, j, train_sorted[59 : 64]]) / max(beam_power[i, j, :])) ** 2 64 | pdf_index = np.floor(((beam_power[i, j, train_index] / max(beam_power[i, j, :])) ** 2) * 100) 65 | pdf_index = pdf_index.astype(int) 66 | pdf[i, pdf_index : 101] = pdf[i, pdf_index : 101] + 1 67 | 68 | running_loss += loss.data.cpu() 69 | acur = float(P) / (P + N) 70 | losses = running_loss / batch_num 71 | BL = BL / batch_num / 16 72 | print("Accuracy: %.3f" % (acur)) 73 | print("Loss: %.3f" % (losses)) 74 | print("Beam power loss:") 75 | print(BL.T) 76 | print(pdf) 77 | return acur, losses, rank, BL, pdf, p 78 | 79 | def main(): 80 | version_name = 'v1_K=3_probability' 81 | info = 'TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_' + version_name 82 | print(info) 83 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 84 | print(device) 85 | 86 | t = 1 87 | batch_size = 16 88 | 89 | print('batch_size:%d'%(batch_size)) 90 | 91 | acur_eval = [] 92 | loss_eval = [] 93 | BL_eval = np.zeros((10, 5, t)) 94 | rank_eval = np.zeros((10, 64, t)) 95 | pdf_eval = np.zeros((10, 101, t)) 96 | pp = np.zeros((4096, 16, t)) 97 | 98 | 99 | for tt in range(t): 100 | print('Train %d times' % (tt)) 101 | model_name = 'TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_v1_k3_evaluation_MODEL.pkl' 102 | model = torch.load(model_name) 103 | model.to(device) 104 | model.eval() 105 | 106 | eval_loader = Dataloader(path='/usr/mk/TCOM/dataset/testing_15dBm', batch_size=batch_size, device=device) 107 | eval_loader.reset() 108 | acur, losses, rank, BL, pdf, p = eval(model, eval_loader, device) 109 | acur_eval.append(acur) 110 | loss_eval.append(losses) 111 | rank_eval[:, :, tt] = np.squeeze(rank) 112 | BL_eval[:, :, tt] = np.squeeze(BL) 113 | pdf_eval[:, :, tt] = np.squeeze(pdf) 114 | pp[:, :, tt] = np.squeeze(p) 115 | 116 | mat_name = info + '.mat' 117 | sio.savemat(mat_name, {'acur_eval': acur_eval, 'loss_eval': loss_eval, 118 | 'rank_eval': rank_eval, 'BL_eval': BL_eval, 'pdf_eval': pdf_eval, 'p_eval': pp}) 119 | 120 | if __name__ == '__main__': 121 | main() -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Basic/dataloader.py: -------------------------------------------------------------------------------- 1 | import os 2 | from os import listdir 3 | from os.path import isfile, join 4 | import scipy.io as sio 5 | import numpy as np 6 | import torch 7 | import math 8 | from collections import Counter 9 | 10 | # detailed code comments can be seen in the folder basic_model 11 | class Dataloader(): 12 | def __init__(self, path='', batch_size=32, device='cpu'): 13 | self.batch_size = batch_size 14 | self.device = device 15 | self.files = [join(path, f) for f in listdir(path) if isfile(join(path, f))] 16 | for i, f in enumerate(self.files): 17 | if not f.split('.')[-1] == 'mat': 18 | del (self.files[i]) 19 | self.reset() 20 | 21 | def reset(self): 22 | self.done = False 23 | self.unvisited_files = [f for f in self.files] 24 | self.buffer1 = np.zeros((0, 2, 10, 16)) 25 | self.buffer2 = np.zeros((0, 2, 10, 16)) 26 | self.buffer_label_m = np.zeros((0, 10)) 27 | self.buffer_label_nonoise_m = np.zeros((0, 10)) 28 | self.buffer_beam_power_m = np.zeros((0, 10, 64)) 29 | self.buffer_beam_power_nonoise_m = np.zeros((0, 10, 64)) 30 | 31 | def load(self, file): 32 | data = sio.loadmat(file) 33 | channel1 = data['channel_data1'] 34 | channel2 = data['channel_data2'] 35 | channel1 = np.transpose(channel1, (1, 0, 2, 3)) 36 | channel1 = channel1[:, :, :, 0 : 61 : 4] 37 | channel2 = np.transpose(channel2, (1, 0, 2, 3)) 38 | labels = data['max_id_sery_m'] - 1 39 | labels_nonoise = data['max_id_sery_no_noise_m'] - 1 40 | beam_power_m = data['rsrp_sery_m'] 41 | beam_power_nonoise_m = data['rsrp_sery_no_noise_m'] 42 | return channel1, channel2, labels, beam_power_m, beam_power_nonoise_m, labels_nonoise 43 | 44 | def pre_process(self, channels): 45 | return channels 46 | 47 | def next_batch(self): 48 | done = False 49 | count = True 50 | while self.buffer1.shape[0] < self.batch_size: 51 | if len(self.unvisited_files) == 0: 52 | done = True 53 | count = False 54 | break 55 | channel1, channel2, labels, beam_power_m, beam_power_nonoise_m, labels_nonoise = self.load( 56 | self.unvisited_files.pop(0)) 57 | 58 | del self.buffer1 59 | del self.buffer2 60 | del self.buffer_label_m 61 | del self.buffer_beam_power_m 62 | del self.buffer_beam_power_nonoise_m 63 | del self.buffer_label_nonoise_m 64 | 65 | self.buffer1 = np.zeros((0, 2, 10, 16)) 66 | self.buffer2 = np.zeros((0, 2, 10, 16)) 67 | self.buffer_label_m = np.zeros((0, 10)) 68 | self.buffer_beam_power_m = np.zeros((0, 10, 64)) 69 | self.buffer_beam_power_nonoise_m = np.zeros((0, 10, 64)) 70 | self.buffer_label_nonoise_m = np.zeros((0, 10)) 71 | 72 | self.buffer1 = np.concatenate((self.buffer1, channel1), axis=0) 73 | self.buffer2 = np.concatenate((self.buffer2, channel2), axis=0) 74 | self.buffer_label_m = np.concatenate((self.buffer_label_m, labels), axis=0) 75 | self.buffer_beam_power_m = np.concatenate((self.buffer_beam_power_m, beam_power_m), axis=0) 76 | self.buffer_beam_power_nonoise_m = np.concatenate((self.buffer_beam_power_nonoise_m, beam_power_nonoise_m), axis=0) 77 | self.buffer_label_nonoise_m = np.concatenate((self.buffer_label_nonoise_m, labels_nonoise), axis=0) 78 | 79 | out_size = min(self.batch_size, self.buffer1.shape[0]) 80 | batch_channel1 = self.buffer1[0:out_size, :, :, :] 81 | batch_channel2 = self.buffer2[0:out_size, :, :, :] 82 | batch_labels_m = np.squeeze(self.buffer_label_m[0:out_size, :]) 83 | batch_beam_power_m = np.squeeze(self.buffer_beam_power_m[0:out_size, :, :]) 84 | batch_beam_power_nonoise_m = np.squeeze(self.buffer_beam_power_nonoise_m[0:out_size, :, :]) 85 | batch_labels_nonoise_m = np.squeeze(self.buffer_label_nonoise_m[0:out_size, :]) 86 | 87 | self.buffer1 = np.delete(self.buffer1, np.s_[0:out_size], 0) 88 | self.buffer2 = np.delete(self.buffer2, np.s_[0:out_size], 0) 89 | self.buffer_label_m = np.delete(self.buffer_label_m, np.s_[0:out_size], 0) 90 | self.buffer_beam_power_m = np.delete(self.buffer_beam_power_m, np.s_[0:out_size], 0) 91 | self.buffer_beam_power_nonoise_m = np.delete(self.buffer_beam_power_nonoise_m, np.s_[0:out_size], 0) 92 | self.buffer_label_nonoise_m = np.delete(self.buffer_label_nonoise_m, np.s_[0:out_size], 0) 93 | 94 | batch_channel1 = np.float32(batch_channel1) 95 | batch_channel2 = np.float32(batch_channel2) 96 | batch_labels_m = batch_labels_m.astype(long) 97 | batch_beam_power_m = np.float32(batch_beam_power_m) 98 | batch_beam_power_nonoise_m = np.float32(batch_beam_power_nonoise_m) 99 | batch_labels_nonoise_m = batch_labels_nonoise_m.astype(long) 100 | 101 | return torch.from_numpy(batch_channel1).to(self.device), torch.from_numpy(batch_channel2).to( 102 | self.device), torch.from_numpy(batch_labels_m).to( 103 | self.device), torch.from_numpy(batch_beam_power_m).to( 104 | self.device), torch.from_numpy(batch_beam_power_nonoise_m).to( 105 | self.device), torch.from_numpy(batch_labels_nonoise_m).to( 106 | self.device), done, count -------------------------------------------------------------------------------- /deep_learning_model/basic_model/beam_evaluation_baseline2.py: -------------------------------------------------------------------------------- 1 | import torch.optim as optim 2 | import numpy as np 3 | import torch 4 | import torchvision 5 | import torch.nn as nn 6 | import torch.nn.functional as F 7 | import scipy.io as sio 8 | from dataloader import Dataloader 9 | from model_3Dcov_basic import Model_3D 10 | import sys 11 | 12 | def eval(model, loader, device): 13 | loader.reset() 14 | criterion = nn.CrossEntropyLoss() 15 | done = False 16 | P = 0 17 | N = 0 18 | #M = 0 19 | batch_size = 16 20 | # save the performance of 5 additional narrow beam trainings 21 | BL = np.zeros((10,6)) 22 | running_loss = 0 23 | batch_num = 0 24 | rank = np.zeros((10,64)) 25 | # accumulative probability function 26 | pdf = np.zeros((10, 101)) 27 | while not done: 28 | channel1, channel2, label_m, beam_power_m, beam_power_nonoise_m, label_nonoise_m, done, count = loader.next_batch() 29 | if count==True: 30 | batch_num += 1 31 | out_tensor = model(channel1)# baseline 2 32 | loss = 0 33 | for loss_count in range(10): 34 | loss += criterion(torch.squeeze(out_tensor[loss_count, :, :]), label_nonoise_m[:, loss_count]) 35 | out_tensor_np = out_tensor.cpu().detach().numpy() 36 | gt_labels = label_nonoise_m.cpu().detach().numpy() 37 | gt_labels = np.float32(gt_labels) 38 | gt_labels = gt_labels.transpose(1, 0) 39 | beam_power = beam_power_nonoise_m.cpu().detach().numpy() 40 | beam_power = beam_power.transpose(1, 0, 2) 41 | out_shape = gt_labels.shape 42 | for i in range(out_shape[0]): 43 | for j in range(out_shape[1]): 44 | train_ans = np.squeeze(out_tensor_np[i, j, :]) 45 | train_index = np.argmax(train_ans) 46 | train_sorted = np.argsort(train_ans) 47 | rank_index = np.where(train_sorted == gt_labels[i, j]) 48 | rank[i, rank_index[0]] = rank[i, rank_index[0]] + 1 49 | if train_index == gt_labels[i, j]: 50 | P = P + 1 51 | else: 52 | N = N + 1 53 | pdf_index = np.floor(((beam_power[i, j, train_index] / max(beam_power[i, j, :])) ** 2) * 100) 54 | pdf_index = pdf_index.astype(int) 55 | pdf[i, pdf_index : 101] = pdf[i, pdf_index : 101] + 1 56 | # Note that the evaluation of baseline 2 is different 57 | # since it has already trained 16 narrow beams 58 | # additional narrow beam training selects the optimal beam from (16 + K) trained beams 59 | BL[i, 0] = BL[i, 0] + (beam_power[i, j, train_sorted[63: 64]] / max(beam_power[i, j, :])) ** 2 60 | train_ans[0 : 61 : 4] = 0 61 | train_sorted = np.argsort(train_ans) 62 | BL[i, 1] = BL[i, 1] + (max(max(beam_power[i, j, train_sorted[63 : 64]]), max(beam_power[i, j, 0 : 61 : 4])) / max(beam_power[i, j, :])) ** 2 63 | BL[i, 2] = BL[i, 2] + (max(max(beam_power[i, j, train_sorted[62 : 64]]), max(beam_power[i, j, 0 : 61 : 4])) / max(beam_power[i, j, :])) ** 2 64 | BL[i, 3] = BL[i, 3] + (max(max(beam_power[i, j, train_sorted[61 : 64]]), max(beam_power[i, j, 0 : 61 : 4])) / max(beam_power[i, j, :])) ** 2 65 | BL[i, 4] = BL[i, 4] + (max(max(beam_power[i, j, train_sorted[60 : 64]]), max(beam_power[i, j, 0 : 61 : 4])) / max(beam_power[i, j, :])) ** 2 66 | BL[i, 5] = BL[i, 5] + (max(max(beam_power[i, j, train_sorted[59 : 64]]), max(beam_power[i, j, 0 : 61 : 4])) / max(beam_power[i, j, :])) ** 2 67 | running_loss += loss.data.cpu() 68 | acur = float(P) / (P + N) 69 | losses = running_loss / batch_num 70 | BL = BL / batch_num / batch_size 71 | print("Accuracy: %.3f" % (acur)) 72 | print("Loss: %.3f" % (losses)) 73 | print("Beam power loss:") 74 | print(BL.T) 75 | #print(pdf) 76 | return acur, losses, rank, BL, pdf 77 | 78 | def main(): 79 | version_name = 'v1_10dBm_evaluation' 80 | info = 'TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_' + version_name 81 | print(info) 82 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 83 | print(device) 84 | 85 | t = 5 86 | batch_size = 16 87 | 88 | print('batch_size:%d'%(batch_size)) 89 | 90 | acur_eval = [] 91 | loss_eval = [] 92 | BL_eval = np.zeros((10, 6, t)) 93 | rank_eval = np.zeros((10, 64, t)) 94 | pdf_eval = np.zeros((10, 101, t)) 95 | 96 | for tt in range(t): 97 | print('Train %d times' % (tt)) 98 | model_name = 'TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_10dBm_' + str(tt) + '_MODEL.pkl' 99 | model = torch.load(model_name) 100 | model.to(device) 101 | model.eval() 102 | 103 | eval_loader_name = '/usr/mk/TCOM/dataset/testing_10dBm' 104 | eval_loader = Dataloader(path=eval_loader_name, batch_size=batch_size, device=device) 105 | eval_loader.reset() 106 | acur, losses, rank, BL, pdf = eval(model, eval_loader, device) 107 | acur_eval.append(acur) 108 | loss_eval.append(losses) 109 | rank_eval[:, :, tt] = np.squeeze(rank) 110 | BL_eval[:, :, tt] = np.squeeze(BL) 111 | pdf_eval[:, :, tt] = np.squeeze(pdf) 112 | 113 | mat_name = info + '.mat' 114 | sio.savemat(mat_name, {'acur_eval': acur_eval, 'loss_eval': loss_eval, 'rank_eval': rank_eval, 'BL_eval': BL_eval, 'pdf_eval': pdf_eval}) 115 | 116 | if __name__ == '__main__': 117 | main() -------------------------------------------------------------------------------- /deep_learning_model/evaluation_model/beam_evaluation_baseline2.py: -------------------------------------------------------------------------------- 1 | import torch.optim as optim 2 | import numpy as np 3 | import torch 4 | import torchvision 5 | import torch.nn as nn 6 | import torch.nn.functional as F 7 | import scipy.io as sio 8 | from dataloader import Dataloader 9 | from model_3Dcov_basic import Model_3D 10 | import sys 11 | 12 | def eval(model, loader, device): 13 | loader.reset() 14 | criterion = nn.CrossEntropyLoss() 15 | done = False 16 | P = 0 17 | N = 0 18 | #M = 0 19 | batch_size = 16 20 | # save the performance of 5 additional narrow beam trainings 21 | BL = np.zeros((10,6)) 22 | running_loss = 0 23 | batch_num = 0 24 | rank = np.zeros((10,64)) 25 | # accumulative probability function 26 | pdf = np.zeros((10, 101)) 27 | while not done: 28 | channel1, channel2, label_m, beam_power_m, beam_power_nonoise_m, label_nonoise_m, done, count = loader.next_batch() 29 | if count==True: 30 | batch_num += 1 31 | out_tensor = model(channel1)# baseline 2 32 | loss = 0 33 | for loss_count in range(10): 34 | loss += criterion(torch.squeeze(out_tensor[loss_count, :, :]), label_nonoise_m[:, loss_count]) 35 | out_tensor_np = out_tensor.cpu().detach().numpy() 36 | gt_labels = label_nonoise_m.cpu().detach().numpy() 37 | gt_labels = np.float32(gt_labels) 38 | gt_labels = gt_labels.transpose(1, 0) 39 | beam_power = beam_power_nonoise_m.cpu().detach().numpy() 40 | beam_power = beam_power.transpose(1, 0, 2) 41 | out_shape = gt_labels.shape 42 | for i in range(out_shape[0]): 43 | for j in range(out_shape[1]): 44 | train_ans = np.squeeze(out_tensor_np[i, j, :]) 45 | train_index = np.argmax(train_ans) 46 | train_sorted = np.argsort(train_ans) 47 | rank_index = np.where(train_sorted == gt_labels[i, j]) 48 | rank[i, rank_index[0]] = rank[i, rank_index[0]] + 1 49 | if train_index == gt_labels[i, j]: 50 | P = P + 1 51 | else: 52 | N = N + 1 53 | pdf_index = np.floor(((beam_power[i, j, train_index] / max(beam_power[i, j, :])) ** 2) * 100) 54 | pdf_index = pdf_index.astype(int) 55 | pdf[i, pdf_index : 101] = pdf[i, pdf_index : 101] + 1 56 | # Note that the evaluation of baseline 2 is different 57 | # since it has already trained 16 narrow beams 58 | # additional narrow beam training selects the optimal beam from (16 + K) trained beams 59 | BL[i, 0] = BL[i, 0] + (beam_power[i, j, train_sorted[63: 64]] / max(beam_power[i, j, :])) ** 2 60 | train_ans[0 : 61 : 4] = 0 61 | train_sorted = np.argsort(train_ans) 62 | BL[i, 1] = BL[i, 1] + (max(max(beam_power[i, j, train_sorted[63 : 64]]), max(beam_power[i, j, 0 : 61 : 4])) / max(beam_power[i, j, :])) ** 2 63 | BL[i, 2] = BL[i, 2] + (max(max(beam_power[i, j, train_sorted[62 : 64]]), max(beam_power[i, j, 0 : 61 : 4])) / max(beam_power[i, j, :])) ** 2 64 | BL[i, 3] = BL[i, 3] + (max(max(beam_power[i, j, train_sorted[61 : 64]]), max(beam_power[i, j, 0 : 61 : 4])) / max(beam_power[i, j, :])) ** 2 65 | BL[i, 4] = BL[i, 4] + (max(max(beam_power[i, j, train_sorted[60 : 64]]), max(beam_power[i, j, 0 : 61 : 4])) / max(beam_power[i, j, :])) ** 2 66 | BL[i, 5] = BL[i, 5] + (max(max(beam_power[i, j, train_sorted[59 : 64]]), max(beam_power[i, j, 0 : 61 : 4])) / max(beam_power[i, j, :])) ** 2 67 | running_loss += loss.data.cpu() 68 | acur = float(P) / (P + N) 69 | losses = running_loss / batch_num 70 | BL = BL / batch_num / batch_size 71 | print("Accuracy: %.3f" % (acur)) 72 | print("Loss: %.3f" % (losses)) 73 | print("Beam power loss:") 74 | print(BL.T) 75 | #print(pdf) 76 | return acur, losses, rank, BL, pdf 77 | 78 | def main(): 79 | version_name = 'v1_15dBm_evaluation' 80 | info = 'TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_' + version_name 81 | print(info) 82 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 83 | print(device) 84 | 85 | t = 5 86 | batch_size = 16 87 | 88 | print('batch_size:%d'%(batch_size)) 89 | 90 | acur_eval = [] 91 | loss_eval = [] 92 | BL_eval = np.zeros((10, 6, t)) 93 | rank_eval = np.zeros((10, 64, t)) 94 | pdf_eval = np.zeros((10, 101, t)) 95 | 96 | for tt in range(t): 97 | print('Train %d times' % (tt)) 98 | model_name = 'TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_baseline2_v1_15dBm_' + str(tt) + '_MODEL.pkl' 99 | model = torch.load(model_name) 100 | model.to(device) 101 | model.eval() 102 | 103 | eval_loader_name = '/usr/mk/TCOM/dataset/testing_15dBm' 104 | eval_loader = Dataloader(path=eval_loader_name, batch_size=batch_size, device=device) 105 | eval_loader.reset() 106 | acur, losses, rank, BL, pdf = eval(model, eval_loader, device) 107 | acur_eval.append(acur) 108 | loss_eval.append(losses) 109 | rank_eval[:, :, tt] = np.squeeze(rank) 110 | BL_eval[:, :, tt] = np.squeeze(BL) 111 | pdf_eval[:, :, tt] = np.squeeze(pdf) 112 | 113 | mat_name = info + '.mat' 114 | sio.savemat(mat_name, {'acur_eval': acur_eval, 'loss_eval': loss_eval, 'rank_eval': rank_eval, 'BL_eval': BL_eval, 'pdf_eval': pdf_eval}) 115 | 116 | if __name__ == '__main__': 117 | main() -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Basic/train_beam_basic.py: -------------------------------------------------------------------------------- 1 | import torch.optim as optim 2 | import numpy as np 3 | import torch 4 | import torchvision 5 | import torch.nn as nn 6 | import torch.nn.functional as F 7 | import scipy.io as sio 8 | from dataloader import Dataloader 9 | from model_3Dcov_basic import Model_3D 10 | import sys 11 | 12 | # detailed code comments can be seen in the folder basic_model 13 | def eval(model, loader, device): 14 | loader.reset() 15 | criterion = nn.CrossEntropyLoss() 16 | done = False 17 | P = 0 18 | N = 0 19 | #M = 0 20 | batch_size = 16 21 | BL = np.zeros((10,1)) 22 | running_loss = 0 23 | batch_num = 0 24 | rank = np.zeros((10,64)) 25 | while not done: 26 | channel1, channel2, label_m, beam_power_m, beam_power_nonoise_m, label_nonoise_m, done, count = loader.next_batch() 27 | if count==True: 28 | batch_num += 1 29 | out_tensor = model(channel2, device) 30 | loss = 0 31 | # for performance evaluation, only the loss of narrow beam predictions is focused 32 | for loss_count in range(10): 33 | loss += criterion(torch.squeeze(out_tensor[loss_count, :, :]), label_nonoise_m[:, loss_count]) 34 | out_tensor_np = out_tensor.cpu().detach().numpy() 35 | gt_labels = label_nonoise_m.cpu().detach().numpy() 36 | gt_labels = np.float32(gt_labels) 37 | gt_labels = gt_labels.transpose(1, 0) 38 | beam_power = beam_power_nonoise_m.cpu().detach().numpy() 39 | beam_power = beam_power.transpose(1, 0, 2) 40 | out_shape = gt_labels.shape 41 | for i in range(out_shape[0]): 42 | for j in range(out_shape[1]): 43 | train_ans = np.squeeze(out_tensor_np[i, j, :]) 44 | train_index = np.argmax(train_ans) 45 | train_sorted = np.argsort(train_ans) 46 | rank_index = np.where(train_sorted == gt_labels[i, j]) 47 | rank[i, rank_index[0]] = rank[i, rank_index[0]] + 1 48 | if train_index == gt_labels[i, j]: 49 | P = P + 1 50 | else: 51 | N = N + 1 52 | BL[i, 0] = BL[i, 0] + (beam_power[i, j, train_index] / max(beam_power[i, j, :])) ** 2 53 | running_loss += loss.data.cpu() 54 | acur = float(P) / (P + N) 55 | losses = running_loss / batch_num 56 | BL = BL / batch_num / batch_size 57 | print("Accuracy: %.3f" % (acur)) 58 | print("Loss: %.3f" % (losses)) 59 | print("Beam power loss:") 60 | print(BL.T) 61 | # print(rank) 62 | return acur, losses, rank, BL 63 | 64 | def main(): 65 | version_name = 'v1_k=7' 66 | info = 'TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_ONC_' + version_name 67 | print(info) 68 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 69 | #device = 'cpu' 70 | print(device) 71 | 72 | t = 5 73 | epoch = 80 74 | batch_size = 16 75 | 76 | print('batch_size:%d'%(batch_size)) 77 | loader = Dataloader(path='/usr/mk/TCOM/dataset/training_15dBm', batch_size=batch_size, device=device) 78 | eval_loader = Dataloader(path='/usr/mk/TCOM/dataset/testing_15dBm', batch_size=batch_size, device=device) 79 | 80 | criterion = nn.CrossEntropyLoss() 81 | 82 | for tt in range(t): 83 | print('Train %d times' % (tt)) 84 | lr = 0.0001 # learning rate 85 | model = Model_3D(N=15, K=32, Tx=4, Channel=2) 86 | model.to(device) 87 | min_loss = 1000000 88 | optimizer = torch.optim.Adam(model.parameters(), lr / 10, betas=(0.9, 0.999)) # use the sum of 10 losses 89 | lr_decay = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=2, 90 | verbose=True, threshold=0.0001, 91 | threshold_mode='rel', cooldown=0, min_lr=0.0000001, 92 | eps=1e-08) 93 | for name, param in model.named_parameters(): 94 | print('Name:', name, 'Size:', param.size()) 95 | 96 | for e in range(epoch): 97 | print('Train %d epoch'%(e)) 98 | loader.reset() 99 | eval_loader.reset() 100 | done = False 101 | running_loss = 0 102 | batch_num = 0 103 | while not done: 104 | channel1, channel2, labels, beam_power_m, beam_power_nonoise_m, label_nonoise_m, done, count = loader.next_batch() 105 | if count == True: 106 | batch_num += 1 107 | out_tensor = model(channel2, device) 108 | # loss function for the basic scheme 109 | loss = 0 110 | for loss_count in range(10): 111 | loss += criterion(torch.squeeze(out_tensor[loss_count, :, :]), labels[:, loss_count]) 112 | loss.backward() 113 | optimizer.step() 114 | running_loss += loss.item() 115 | 116 | losses = running_loss / batch_num 117 | print('[%d] loss: %.3f' % 118 | (e + 1, losses)) 119 | model.eval() 120 | print('the evaling set:') 121 | acur, losses, rank, BL = eval(model, eval_loader, device) 122 | if losses < min_loss: 123 | min_loss = losses 124 | model_name = info + '_' + str(tt) + '_MODEL.pkl' 125 | torch.save(model, model_name) 126 | lr_decay.step(losses) 127 | model.train() 128 | 129 | if __name__ == '__main__': 130 | main() -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Enhanced/train_beam_basic.py: -------------------------------------------------------------------------------- 1 | import torch.optim as optim 2 | import numpy as np 3 | import torch 4 | import torchvision 5 | import torch.nn as nn 6 | import torch.nn.functional as F 7 | import scipy.io as sio 8 | from dataloader import Dataloader 9 | from model_3Dcov_basic import Model_3D 10 | import sys 11 | 12 | # detailed code comments can be seen in the folder basic_model 13 | def eval(model, loader, device): 14 | loader.reset() 15 | criterion = nn.CrossEntropyLoss() 16 | done = False 17 | P = 0 18 | N = 0 19 | #M = 0 20 | batch_size = 16 21 | BL = np.zeros((10,1)) 22 | running_loss = 0 23 | batch_num = 0 24 | rank = np.zeros((10,64)) 25 | while not done: 26 | channel1, channel2, label_m, beam_power_m, beam_power_nonoise_m, label_nonoise_m, label_widebeam_m, done, count = loader.next_batch() 27 | if count==True: 28 | batch_num += 1 29 | out_tensor1, out_tensor2 = model(channel2, device) 30 | loss = 0 31 | # for performance evaluation, only the loss of narrow beam predictions is focused 32 | for loss_count in range(10): 33 | loss += criterion(torch.squeeze(out_tensor1[loss_count, :, :]), label_nonoise_m[:, loss_count]) 34 | out_tensor_np = out_tensor1.cpu().detach().numpy() 35 | gt_labels = label_nonoise_m.cpu().detach().numpy() 36 | gt_labels = np.float32(gt_labels) 37 | gt_labels = gt_labels.transpose(1, 0) 38 | beam_power = beam_power_nonoise_m.cpu().detach().numpy() 39 | beam_power = beam_power.transpose(1, 0, 2) 40 | out_shape = gt_labels.shape 41 | for i in range(out_shape[0]): 42 | for j in range(out_shape[1]): 43 | train_ans = np.squeeze(out_tensor_np[i, j, :]) 44 | train_index = np.argmax(train_ans) 45 | train_sorted = np.argsort(train_ans) 46 | rank_index = np.where(train_sorted == gt_labels[i, j]) 47 | rank[i, rank_index[0]] = rank[i, rank_index[0]] + 1 48 | if train_index == gt_labels[i, j]: 49 | P = P + 1 50 | else: 51 | N = N + 1 52 | BL[i, 0] = BL[i, 0] + (beam_power[i, j, train_index] / max(beam_power[i, j, :])) ** 2 53 | running_loss += loss.data.cpu() 54 | acur = float(P) / (P + N) 55 | losses = running_loss / batch_num 56 | BL = BL / batch_num / batch_size 57 | print("Accuracy: %.3f" % (acur)) 58 | print("Loss: %.3f" % (losses)) 59 | print("Beam power loss:") 60 | print(BL.T) 61 | return acur, losses, rank, BL 62 | 63 | def main(): 64 | version_name = 'v1_k=7' 65 | info = 'TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_' + version_name 66 | print(info) 67 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 68 | print(device) 69 | 70 | t = 5 71 | epoch = 80 72 | batch_size = 16 73 | 74 | print('batch_size:%d'%(batch_size)) 75 | loader = Dataloader(path='/usr/mk/TCOM/dataset/training_15dBm', batch_size=batch_size, device=device) 76 | eval_loader = Dataloader(path='/usr/mk/TCOM/dataset/testing_15dBm', batch_size=batch_size, device=device) 77 | 78 | criterion = nn.CrossEntropyLoss() 79 | 80 | for tt in range(t): 81 | print('Train %d times' % (tt)) 82 | lr = 0.0001 # learning rate 83 | model = Model_3D(N=15, K=32, Tx=4, Channel=2) 84 | model.to(device) 85 | min_loss = 1000000 86 | optimizer = torch.optim.Adam(model.parameters(), lr / 10, betas=(0.9, 0.999)) # use the sum of 10 losses 87 | lr_decay = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=2, 88 | verbose=True, threshold=0.0001, 89 | threshold_mode='rel', cooldown=0, min_lr=0.0000001, 90 | eps=1e-08) 91 | for name, param in model.named_parameters(): 92 | print('Name:', name, 'Size:', param.size()) 93 | 94 | for e in range(epoch): 95 | print('Train %d epoch'%(e)) 96 | loader.reset() 97 | eval_loader.reset() 98 | done = False 99 | running_loss = 0 100 | batch_num = 0 101 | while not done: 102 | channel1, channel2, labels, beam_power_m, beam_power_nonoise_m, label_nonoise_m, labels_widebeam_m, done, count = loader.next_batch() 103 | if count == True: 104 | batch_num += 1 105 | out_tensor1, out_tensor2 = model(channel2, device) 106 | loss = 0 107 | # for model training, the total loss is the sum of narrow and wide beam predictions 108 | for loss_count in range(10): 109 | loss += criterion(torch.squeeze(out_tensor1[loss_count, :, :]), labels[:, loss_count]) 110 | for loss_count in range(9): 111 | loss += criterion(torch.squeeze(out_tensor2[loss_count, :, :]), labels_widebeam_m[:, loss_count + 1]) 112 | loss.backward() 113 | optimizer.step() 114 | running_loss += loss.item() 115 | 116 | losses = running_loss / batch_num 117 | print('[%d] loss: %.3f' % 118 | (e + 1, losses)) 119 | model.eval() 120 | print('the evaling set:') 121 | acur, losses, rank, BL = eval(model, eval_loader, device) 122 | if losses < min_loss: 123 | min_loss = losses 124 | model_name = info + '_' + str(tt) + '_MODEL.pkl' 125 | torch.save(model, model_name) 126 | lr_decay.step(losses) 127 | model.train() 128 | 129 | if __name__ == '__main__': 130 | main() -------------------------------------------------------------------------------- /deep_learning_model/basic_model/dataloader.py: -------------------------------------------------------------------------------- 1 | import os 2 | from os import listdir 3 | from os.path import isfile, join 4 | import scipy.io as sio 5 | import numpy as np 6 | import torch 7 | import math 8 | from collections import Counter 9 | 10 | 11 | class Dataloader(): 12 | # initialization 13 | def __init__(self, path='', batch_size=32, device='cpu'): 14 | self.batch_size = batch_size 15 | self.device = device 16 | # count file names 17 | self.files = [join(path, f) for f in listdir(path) if isfile(join(path, f))] 18 | for i, f in enumerate(self.files): 19 | if not f.split('.')[-1] == 'mat': 20 | del (self.files[i]) 21 | self.reset() 22 | 23 | # reset buffers 24 | def reset(self): 25 | self.done = False 26 | self.unvisited_files = [f for f in self.files] 27 | self.buffer1 = np.zeros((0, 2, 10, 16)) 28 | self.buffer2 = np.zeros((0, 2, 10, 16)) 29 | self.buffer_label_m = np.zeros((0, 10)) 30 | self.buffer_label_nonoise_m = np.zeros((0, 10)) 31 | self.buffer_beam_power_m = np.zeros((0, 10, 64)) 32 | self.buffer_beam_power_nonoise_m = np.zeros((0, 10, 64)) 33 | 34 | # load files 35 | def load(self, file): 36 | data = sio.loadmat(file) 37 | # channel1: baseline 1 by using uniformly sampled beams 38 | channel1 = data['channel_data1'] 39 | # channel2: proposed by using wide beams 40 | channel2 = data['channel_data2'] 41 | channel1 = np.transpose(channel1, (1, 0, 2, 3)) 42 | channel1 = channel1[:, :, :, 0 : 61 : 4] 43 | channel2 = np.transpose(channel2, (1, 0, 2, 3)) 44 | # classification label obtained by narrow beam search (imperfect due to noise) 45 | labels = data['max_id_sery_m'] - 1 46 | # evaluation label with no noise 47 | labels_nonoise = data['max_id_sery_no_noise_m'] - 1 48 | # beam power obtained by narrow beam search (imperfect due to noise) 49 | beam_power_m = data['rsrp_sery_m'] 50 | # evaluation beam power with no noise 51 | beam_power_nonoise_m = data['rsrp_sery_no_noise_m'] 52 | return channel1, channel2, labels, beam_power_m, beam_power_nonoise_m, labels_nonoise 53 | 54 | def next_batch(self): 55 | 56 | # serial load data 57 | done = False 58 | count = True 59 | while self.buffer1.shape[0] < self.batch_size: 60 | # if finishing load data 61 | if len(self.unvisited_files) == 0: 62 | done = True 63 | count = False 64 | break 65 | channel1, channel2, labels, beam_power_m, beam_power_nonoise_m, labels_nonoise = self.load( 66 | self.unvisited_files.pop(0)) 67 | 68 | del self.buffer1 69 | del self.buffer2 70 | del self.buffer_label_m 71 | del self.buffer_beam_power_m 72 | del self.buffer_beam_power_nonoise_m 73 | del self.buffer_label_nonoise_m 74 | 75 | # define buffers 76 | self.buffer1 = np.zeros((0, 2, 10, 16)) 77 | self.buffer2 = np.zeros((0, 2, 10, 16)) 78 | self.buffer_label_m = np.zeros((0, 10)) 79 | self.buffer_beam_power_m = np.zeros((0, 10, 64)) 80 | self.buffer_beam_power_nonoise_m = np.zeros((0, 10, 64)) 81 | self.buffer_label_nonoise_m = np.zeros((0, 10)) 82 | 83 | # load data into buffers 84 | self.buffer1 = np.concatenate((self.buffer1, channel1), axis=0) 85 | self.buffer2 = np.concatenate((self.buffer2, channel2), axis=0) 86 | self.buffer_label_m = np.concatenate((self.buffer_label_m, labels), axis=0) 87 | self.buffer_beam_power_m = np.concatenate((self.buffer_beam_power_m, beam_power_m), axis=0) 88 | self.buffer_beam_power_nonoise_m = np.concatenate((self.buffer_beam_power_nonoise_m, beam_power_nonoise_m), axis=0) 89 | self.buffer_label_nonoise_m = np.concatenate((self.buffer_label_nonoise_m, labels_nonoise), axis=0) 90 | 91 | # get data from buffers 92 | out_size = min(self.batch_size, self.buffer1.shape[0]) 93 | batch_channel1 = self.buffer1[0:out_size, :, :, :] 94 | batch_channel2 = self.buffer2[0:out_size, :, :, :] 95 | batch_labels_m = np.squeeze(self.buffer_label_m[0:out_size, :]) 96 | batch_beam_power_m = np.squeeze(self.buffer_beam_power_m[0:out_size, :, :]) 97 | batch_beam_power_nonoise_m = np.squeeze(self.buffer_beam_power_nonoise_m[0:out_size, :, :]) 98 | batch_labels_nonoise_m = np.squeeze(self.buffer_label_nonoise_m[0:out_size, :]) 99 | 100 | self.buffer1 = np.delete(self.buffer1, np.s_[0:out_size], 0) 101 | self.buffer2 = np.delete(self.buffer2, np.s_[0:out_size], 0) 102 | self.buffer_label_m = np.delete(self.buffer_label_m, np.s_[0:out_size], 0) 103 | self.buffer_beam_power_m = np.delete(self.buffer_beam_power_m, np.s_[0:out_size], 0) 104 | self.buffer_beam_power_nonoise_m = np.delete(self.buffer_beam_power_nonoise_m, np.s_[0:out_size], 0) 105 | self.buffer_label_nonoise_m = np.delete(self.buffer_label_nonoise_m, np.s_[0:out_size], 0) 106 | 107 | # format transformation for reducing overhead 108 | batch_channel1 = np.float32(batch_channel1) 109 | batch_channel2 = np.float32(batch_channel2) 110 | batch_labels_m = batch_labels_m.astype(long) 111 | batch_beam_power_m = np.float32(batch_beam_power_m) 112 | batch_beam_power_nonoise_m = np.float32(batch_beam_power_nonoise_m) 113 | batch_labels_nonoise_m = batch_labels_nonoise_m.astype(long) 114 | 115 | # return data 116 | return torch.from_numpy(batch_channel1).to(self.device), torch.from_numpy(batch_channel2).to( 117 | self.device), torch.from_numpy(batch_labels_m).to( 118 | self.device), torch.from_numpy(batch_beam_power_m).to( 119 | self.device), torch.from_numpy(batch_beam_power_nonoise_m).to( 120 | self.device), torch.from_numpy(batch_labels_nonoise_m).to( 121 | self.device), done, count -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Enhanced/dataloader.py: -------------------------------------------------------------------------------- 1 | import os 2 | from os import listdir 3 | from os.path import isfile, join 4 | import scipy.io as sio 5 | import numpy as np 6 | import torch 7 | import math 8 | from collections import Counter 9 | 10 | # detailed code comments can be seen in the folder basic_model 11 | class Dataloader(): 12 | def __init__(self, path='', batch_size=32, device='cpu'): 13 | self.batch_size = batch_size 14 | self.device = device 15 | self.files = [join(path, f) for f in listdir(path) if isfile(join(path, f))] 16 | for i, f in enumerate(self.files): 17 | if not f.split('.')[-1] == 'mat': 18 | del (self.files[i]) 19 | self.reset() 20 | 21 | def reset(self): 22 | self.done = False 23 | self.unvisited_files = [f for f in self.files] 24 | self.buffer1 = np.zeros((0, 2, 10, 16)) 25 | self.buffer2 = np.zeros((0, 2, 10, 16)) 26 | self.buffer_label_m = np.zeros((0, 10)) 27 | self.buffer_label_widebeam_m = np.zeros((0, 10)) 28 | self.buffer_label_nonoise_m = np.zeros((0, 10)) 29 | self.buffer_beam_power_m = np.zeros((0, 10, 64)) 30 | self.buffer_beam_power_nonoise_m = np.zeros((0, 10, 64)) 31 | 32 | def load(self, file): 33 | data = sio.loadmat(file) 34 | channel1 = data['channel_data1'] 35 | channel2 = data['channel_data2'] 36 | channel1 = np.transpose(channel1, (1, 0, 2, 3)) 37 | channel1 = channel1[:, :, :, 0 : 61 : 4] 38 | channel2 = np.transpose(channel2, (1, 0, 2, 3)) 39 | labels = data['max_id_sery_m'] - 1 40 | labels_widebeam = data['max_id_sery_widebeam_m'] - 1 41 | labels_nonoise = data['max_id_sery_no_noise_m'] - 1 42 | beam_power_m = data['rsrp_sery_m'] 43 | beam_power_nonoise_m = data['rsrp_sery_no_noise_m'] 44 | return channel1, channel2, labels, beam_power_m, beam_power_nonoise_m, labels_nonoise, labels_widebeam 45 | 46 | def pre_process(self, channels): 47 | return channels 48 | 49 | def next_batch(self): 50 | done = False 51 | count = True 52 | while self.buffer1.shape[0] < self.batch_size: 53 | if len(self.unvisited_files) == 0: 54 | done = True 55 | count = False 56 | break 57 | channel1, channel2, labels, beam_power_m, beam_power_nonoise_m, labels_nonoise, labels_widebeam = self.load( 58 | self.unvisited_files.pop(0)) 59 | 60 | del self.buffer1 61 | del self.buffer2 62 | del self.buffer_label_m 63 | del self.buffer_label_widebeam_m 64 | del self.buffer_beam_power_m 65 | del self.buffer_beam_power_nonoise_m 66 | del self.buffer_label_nonoise_m 67 | 68 | self.buffer1 = np.zeros((0, 2, 10, 16)) 69 | self.buffer2 = np.zeros((0, 2, 10, 16)) 70 | self.buffer_label_m = np.zeros((0, 10)) 71 | self.buffer_label_widebeam_m = np.zeros((0, 10)) 72 | self.buffer_beam_power_m = np.zeros((0, 10, 64)) 73 | self.buffer_beam_power_nonoise_m = np.zeros((0, 10, 64)) 74 | self.buffer_label_nonoise_m = np.zeros((0, 10)) 75 | 76 | self.buffer1 = np.concatenate((self.buffer1, channel1), axis=0) 77 | self.buffer2 = np.concatenate((self.buffer2, channel2), axis=0) 78 | self.buffer_label_m = np.concatenate((self.buffer_label_m, labels), axis=0) 79 | self.buffer_label_widebeam_m = np.concatenate((self.buffer_label_widebeam_m, labels_widebeam), axis=0) 80 | self.buffer_beam_power_m = np.concatenate((self.buffer_beam_power_m, beam_power_m), axis=0) 81 | self.buffer_beam_power_nonoise_m = np.concatenate((self.buffer_beam_power_nonoise_m, beam_power_nonoise_m), axis=0) 82 | self.buffer_label_nonoise_m = np.concatenate((self.buffer_label_nonoise_m, labels_nonoise), axis=0) 83 | 84 | out_size = min(self.batch_size, self.buffer1.shape[0]) 85 | batch_channel1 = self.buffer1[0:out_size, :, :, :] 86 | batch_channel2 = self.buffer2[0:out_size, :, :, :] 87 | batch_labels_m = np.squeeze(self.buffer_label_m[0:out_size, :]) 88 | batch_labels_widebeam_m = np.squeeze(self.buffer_label_widebeam_m[0:out_size, :]) 89 | batch_beam_power_m = np.squeeze(self.buffer_beam_power_m[0:out_size, :, :]) 90 | batch_beam_power_nonoise_m = np.squeeze(self.buffer_beam_power_nonoise_m[0:out_size, :, :]) 91 | batch_labels_nonoise_m = np.squeeze(self.buffer_label_nonoise_m[0:out_size, :]) 92 | 93 | self.buffer1 = np.delete(self.buffer1, np.s_[0:out_size], 0) 94 | self.buffer2 = np.delete(self.buffer2, np.s_[0:out_size], 0) 95 | self.buffer_label_m = np.delete(self.buffer_label_m, np.s_[0:out_size], 0) 96 | self.buffer_label_widebeam_m = np.delete(self.buffer_label_widebeam_m, np.s_[0:out_size], 0) 97 | self.buffer_beam_power_m = np.delete(self.buffer_beam_power_m, np.s_[0:out_size], 0) 98 | self.buffer_beam_power_nonoise_m = np.delete(self.buffer_beam_power_nonoise_m, np.s_[0:out_size], 0) 99 | self.buffer_label_nonoise_m = np.delete(self.buffer_label_nonoise_m, np.s_[0:out_size], 0) 100 | 101 | batch_channel1 = np.float32(batch_channel1) 102 | batch_channel2 = np.float32(batch_channel2) 103 | batch_labels_m = batch_labels_m.astype(long) 104 | batch_labels_widebeam_m = batch_labels_widebeam_m.astype(long) 105 | batch_beam_power_m = np.float32(batch_beam_power_m) 106 | batch_beam_power_nonoise_m = np.float32(batch_beam_power_nonoise_m) 107 | batch_labels_nonoise_m = batch_labels_nonoise_m.astype(long) 108 | 109 | return torch.from_numpy(batch_channel1).to(self.device), torch.from_numpy(batch_channel2).to( 110 | self.device), torch.from_numpy(batch_labels_m).to( 111 | self.device), torch.from_numpy(batch_beam_power_m).to( 112 | self.device), torch.from_numpy(batch_beam_power_nonoise_m).to( 113 | self.device), torch.from_numpy(batch_labels_nonoise_m).to( 114 | self.device), torch.from_numpy(batch_labels_widebeam_m).to( 115 | self.device), done, count -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Basic/model_3Dcov_basic.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torchvision 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | 6 | class Model_3D(nn.Module): 7 | 8 | def __init__(self, N=60, K=32, Tx=8, Channel=2): 9 | super(Model_3D, self).__init__() 10 | 11 | self.bn0 = nn.BatchNorm1d(2) 12 | self.conv1 = nn.Conv1d(in_channels=2, out_channels=64, 13 | kernel_size=(3), stride=(3), padding=(1)) 14 | self.bn1 = nn.BatchNorm1d(64) 15 | self.conv2 = nn.Conv1d(in_channels=64, out_channels=256, 16 | kernel_size=(3), stride=(1), padding=(1)) 17 | self.bn2 = nn.BatchNorm1d(256) 18 | 19 | # predict narrow beam 20 | self.lstm1 = nn.LSTM(input_size=256, hidden_size=256, num_layers=2, dropout=0.2) 21 | self.fc1 = nn.Linear(256, 64) 22 | self.drop = nn.Dropout(0.3) 23 | 24 | 25 | def forward(self, x, device): 26 | 27 | if 0: 28 | #ONC 29 | # trained wide beam number 30 | k = 7 31 | # batch size 32 | batch_size = 16 33 | # save narrow beam prediction results 34 | y = torch.zeros((10, 16, 64)).to(device) 35 | 36 | # candidate beam directions 37 | candidate_beam = torch.linspace(0, 15, steps=16) 38 | candidate_beam = candidate_beam.repeat(16, 1).to(device) 39 | 40 | # first loop for wide beam training numbers 41 | for i in range(10): 42 | 43 | # if i = 0, full wide beam training 44 | if i == 0: 45 | x_test = x[:, :, i, :] 46 | 47 | # CNN 48 | x_test = self.bn0(x_test) 49 | x_test = self.conv1(x_test) 50 | x_test = self.bn1(x_test) 51 | x_test = F.relu(x_test) 52 | x_test = self.conv2(x_test) 53 | x_test = self.bn2(x_test) 54 | x_test = F.relu(x_test) 55 | 56 | P_dim_size = x_test.shape[2] 57 | x_test = nn.MaxPool1d(kernel_size=P_dim_size)(x_test) 58 | 59 | # predict narrow beam 60 | x_test = x_test.permute(2, 0, 1) 61 | y_test, (hn, cn) = self.lstm1(x_test) 62 | 63 | # else, partial wide beam training 64 | else: 65 | # select partial beams based on ONC 66 | x_test = torch.zeros((batch_size, 2, 16)).to(device) 67 | for b in range(batch_size): 68 | x_test[b, :, max_id[b, :]] = x[b, :, i, max_id[b, :]] 69 | 70 | #CNN 71 | x_test = self.bn0(x_test) 72 | x_test = self.conv1(x_test) 73 | x_test = self.bn1(x_test) 74 | x_test = F.relu(x_test) 75 | x_test = self.conv2(x_test) 76 | x_test = self.bn2(x_test) 77 | x_test = F.relu(x_test) 78 | 79 | P_dim_size = x_test.shape[2] 80 | x_test = nn.MaxPool1d(kernel_size=P_dim_size)(x_test) 81 | 82 | # predict narrow beam 83 | x_test = x_test.permute(2, 0, 1) 84 | y_test, (hn, cn) = self.lstm1(x_test, (hn, cn)) 85 | 86 | # predict narrow beam 87 | y_test = self.drop(y_test) 88 | y_test = self.fc1(y_test) 89 | 90 | # ONC based beam selection 91 | max_value, max_id = torch.topk(y_test, k) 92 | max_id = torch.squeeze(max_id).to(torch.double) 93 | max_id = max_id[:, 0] / 4 - 0.375 94 | max_id = max_id.repeat(16, 1).T 95 | max_value, max_id = torch.topk(-torch.abs(candidate_beam - max_id), k) 96 | 97 | y[i, :, :] = y_test 98 | 99 | #if 0: 100 | # MPC 101 | # code structure is similar 102 | k = 7 103 | batch_size = 16 104 | y = torch.zeros((10, 16, 64)).to(device) 105 | 106 | candidate_beam = torch.linspace(0, 15, steps=16) 107 | candidate_beam = candidate_beam.repeat(16, 1).to(device) 108 | 109 | for i in range(10): 110 | 111 | if i == 0: 112 | x_test = x[:, :, i, :] 113 | 114 | x_test = self.bn0(x_test) 115 | x_test = self.conv1(x_test) 116 | x_test = self.bn1(x_test) 117 | x_test = F.relu(x_test) 118 | x_test = self.conv2(x_test) 119 | x_test = self.bn2(x_test) 120 | x_test = F.relu(x_test) 121 | 122 | P_dim_size = x_test.shape[2] 123 | x_test = nn.MaxPool1d(kernel_size=P_dim_size)(x_test) 124 | 125 | x_test = x_test.permute(2, 0, 1) 126 | y_test, (hn, cn) = self.lstm1(x_test) 127 | 128 | else: 129 | # MPC based beam selection 130 | x_test = torch.zeros((batch_size, 2, 16)).to(device) 131 | for b in range(batch_size): 132 | x_test[b, :, max_id[b, :]] = x[b, :, i, max_id[b, :]] 133 | 134 | x_test = self.bn0(x_test) 135 | x_test = self.conv1(x_test) 136 | x_test = self.bn1(x_test) 137 | x_test = F.relu(x_test) 138 | x_test = self.conv2(x_test) 139 | x_test = self.bn2(x_test) 140 | x_test = F.relu(x_test) 141 | 142 | P_dim_size = x_test.shape[2] 143 | x_test = nn.MaxPool1d(kernel_size=P_dim_size)(x_test) 144 | 145 | x_test = x_test.permute(2, 0, 1) 146 | y_test, (hn, cn) = self.lstm1(x_test, (hn, cn)) 147 | 148 | # MPC based beam selection 149 | y_test = self.drop(y_test) 150 | y_test = self.fc1(y_test) 151 | 152 | max_value, max_id = torch.topk( 153 | torch.squeeze(torch.sum(torch.reshape(F.softmax(y_test, dim=2), (1, 16, 16, 4)), dim=3)), k) 154 | 155 | y[i, :, :] = y_test 156 | 157 | return y 158 | -------------------------------------------------------------------------------- /supplementary_material/Beam_number/baseline3_beamnum.m: -------------------------------------------------------------------------------- 1 | clear all; 2 | close all; 3 | clc 4 | 5 | %Baseline 3 6 | % Chiu et al., Active Learning and CSI Acquisition for mmWwave Initial Alignment 7 | rng(1) 8 | 9 | N = 256; 10 | num = 256; 11 | 12 | % BS narrow beam number 13 | B = 256; 14 | % BS antenna number 15 | rx = 64; 16 | resolution = 256; % targeted resolution 17 | measure_t = 16; % number of each beam training 18 | layer = log2(min(resolution, rx)); 19 | 20 | sector_start=-pi/2; % Sectoar start point 21 | sector_end=pi/2; % Sector end point 22 | % calculate candidate beams 23 | candidate_beam_angle = sector_start + (sector_end - sector_start) / B * [0.5 : 1 : B - 0.5]; 24 | candidate_beam = sin(candidate_beam_angle(end : -1 : 1)); 25 | candidate_beam = exp(-1i * pi * candidate_beam' * [0 : rx - 1]); 26 | 27 | power_ratio = zeros(10, 1, 5); 28 | pdf = zeros(101, 10, 1); 29 | noise_e = 112; % Equivalent noise including AWGN and NLOS components 30 | count = 0; 31 | 32 | for snr_e = 120 % Equivalent SNR calculated as (P - sigma^2) 33 | count = count + 1; 34 | for n = 1 : 16 35 | file_name = ['..\dataset\testing_15dBm_channel\data_TCOM(withLOSparameter)_16Tx_64Tx_RK8dB_' num2str(n) '.mat']; 36 | load(file_name); 37 | for i = 1 : 256 38 | for j = 1 : 10 39 | mm_channel = squeeze(channel_m(i, j, :)); 40 | LOS_parameter = LOS_parameters(i, j); 41 | 42 | for bbb = 1 : B 43 | beam_select(bbb) = candidate_beam(bbb, :) * mm_channel; 44 | end 45 | rsrp = abs(beam_select); 46 | [~, max_rsrp_location] = max(rsrp); 47 | 48 | pis = 1 / resolution * ones(resolution, 1); % initialize pi 49 | for t = 1 : measure_t % for t = 1, 2, ..., measure_t 50 | k = 0; 51 | l_star = 1; 52 | for l = 1 : layer % for l = 1, 2, ..., S 53 | pi_test = zeros(2 ^ l, 1); 54 | for test_num = 1 : 2 ^ l 55 | pi_test(test_num) = sum(pis(1 + (resolution / (2 ^ l)) * (test_num - 1) : (resolution / (2 ^ l)) * test_num)); 56 | end 57 | [max_pi, location] = max(pi_test); 58 | % if l == layer, no descendent is considered 59 | if(l == layer) 60 | l_final = l; 61 | k_final = location - 1; 62 | break; 63 | % when max_pi > 0.5, select descendent 64 | elseif(max_pi > 0.5) 65 | l_star = l; 66 | descendent1 = sum(pis(1 + (resolution / (2 ^ (l + 1))) * 2 * (location - 1) : (resolution / (2 ^ (l + 1))) * (2 * location - 1))); 67 | descendent2 = sum(pis(1 + (resolution / (2 ^ (l + 1))) * (2 * location - 1) : (resolution / (2 ^ (l + 1))) * (2 * location))); 68 | if(descendent1 > descendent2) 69 | k = 2 * (location - 1); 70 | else 71 | k = 2 * location - 1; 72 | end 73 | % else, no descendent is considered 74 | else 75 | selection1 = sum(pis(1 + (resolution / (2 ^ (l_star))) * floor(k / 2) : (resolution / (2 ^ (l_star))) * (floor(k / 2) + 1))); 76 | selection2 = sum(pis(1 + (resolution / (2 ^ (l_star + 1))) * k : (resolution / (2 ^ (l_star + 1))) * (k + 1))); 77 | if((abs(selection1 - 0.5) > abs(selection2 - 0.5))) 78 | l_final = l_star + 1; 79 | k_final = k; 80 | else 81 | l_final = l_star; 82 | k_final = floor(k / 2); 83 | end 84 | break; 85 | end 86 | end 87 | 88 | % formulate the adopted beam 89 | w = exp(-1i * pi * sin(sector_start + (sector_end - sector_start) / (2 ^ l_final) * (0.5 + k_final)) * [0 : 2 ^ l - 1]) / sqrt(2 ^ l); 90 | 91 | % measure the received signal 92 | y = w * mm_channel(1 : 2 ^ l); 93 | y = awgn(y, snr_e); 94 | 95 | % formulate candidate beams 96 | candidate_beam_angle0 = sector_start + (sector_end - sector_start) / resolution * [0.5 : 1 : resolution - 0.5]; 97 | candidate_beam0 = sin(candidate_beam_angle0); 98 | candidate_beam0 = exp(-1i * pi * candidate_beam0' * [0 : 2^l - 1]); 99 | 100 | % calculate the posterior probability 101 | f = exp(- (abs(y - LOS_parameter * w * candidate_beam0.')).^2 / 2 / (10^(- noise_e(count) / 10))); 102 | 103 | % update the posterior probability 104 | pis_new = zeros(resolution, 1); 105 | for r = 1 : resolution 106 | pis_new(r) = pis(r) * f(r) / (sum(pis([1 : r - 1, r + 1 : end]) .* f([1 : r - 1, r + 1 : end])') + 1e-16); 107 | end 108 | pis = pis_new; 109 | pis = pis / sum(pis); 110 | end 111 | [~, max_location] = max(pis); 112 | [~, sort_index] = sort(pis,'descend'); 113 | % calculate top-5 beamforming gain 114 | power_ratio(j, count, 1) = power_ratio(j, count, 1) + rsrp(max_location)^2 / max(rsrp)^2; 115 | power_ratio(j, count, 2) = power_ratio(j, count, 2) + max(rsrp(sort_index(1 : 2)))^2 / max(rsrp)^2; 116 | power_ratio(j, count, 3) = power_ratio(j, count, 3) + max(rsrp(sort_index(1 : 3)))^2 / max(rsrp)^2; 117 | power_ratio(j, count, 4) = power_ratio(j, count, 4) + max(rsrp(sort_index(1 : 4)))^2 / max(rsrp)^2; 118 | power_ratio(j, count, 5) = power_ratio(j, count, 5) + max(rsrp(sort_index(1 : 5)))^2 / max(rsrp)^2; 119 | % calculate pdf 120 | pdf(floor(rsrp(max_location)^2 / max(rsrp)^2 * 100) + 1 : end, j, count) = ... 121 | pdf(floor(rsrp(max_location)^2 / max(rsrp)^2 * 100) + 1 : end, j, count) + 1; 122 | end 123 | end 124 | end 125 | end 126 | 127 | power_ratio = power_ratio / 256 / 16; 128 | pdf = pdf / 256 / 16; 129 | %save('baseline3_evaluation_256beam.mat', 'power_ratio'); -------------------------------------------------------------------------------- /deep_learning_model/adaptive_model/Enhanced/model_3Dcov_basic.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torchvision 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | 6 | class Model_3D(nn.Module): 7 | 8 | def __init__(self, N=60, K=32, Tx=8, Channel=2): 9 | super(Model_3D, self).__init__() 10 | 11 | self.bn0 = nn.BatchNorm1d(2) 12 | self.conv1 = nn.Conv1d(in_channels=2, out_channels=64, 13 | kernel_size=(3), stride=(3), padding=(1)) 14 | self.bn1 = nn.BatchNorm1d(64) 15 | self.conv2 = nn.Conv1d(in_channels=64, out_channels=256, 16 | kernel_size=(3), stride=(1), padding=(1)) 17 | self.bn2 = nn.BatchNorm1d(256) 18 | 19 | # predict narrow beam 20 | self.lstm1 = nn.LSTM(input_size=256, hidden_size=256, num_layers=2, dropout=0.2) 21 | self.fc1 = nn.Linear(256, 64) 22 | # predict wide beam 23 | self.lstm2 = nn.LSTM(input_size=256, hidden_size=256, num_layers=2, dropout=0.2) 24 | self.fc2 = nn.Linear(256, 16) 25 | self.drop1 = nn.Dropout(0.3) 26 | self.drop2 = nn.Dropout(0.3) 27 | 28 | 29 | def forward(self, x, device): 30 | 31 | if 0: 32 | # MPC 33 | # trained wide beam number 34 | k = 7 35 | # batch size 36 | batch_size = 16 37 | # save narrow beam prediction results 38 | y1 = torch.zeros((10, 16, 64)).to(device) 39 | # save wide beam prediction results 40 | y2 = torch.zeros((10, 16, 16)).to(device) 41 | 42 | # first loop for wide beam training numbers 43 | for i in range(10): 44 | 45 | # if i = 0, full wide beam training 46 | if i == 0: 47 | x_test = x[:, :, i, :] 48 | 49 | #CNN 50 | x_test = self.bn0(x_test) 51 | x_test = self.conv1(x_test) 52 | x_test = self.bn1(x_test) 53 | x_test = F.relu(x_test) 54 | x_test = self.conv2(x_test) 55 | x_test = self.bn2(x_test) 56 | x_test = F.relu(x_test) 57 | P_dim_size = x_test.shape[2] 58 | x_test = nn.MaxPool1d(kernel_size=P_dim_size)(x_test) 59 | 60 | x_test = x_test.permute(2, 0, 1) 61 | # predict narrow beam 62 | y_test, (hn, cn) = self.lstm1(x_test) 63 | # predict wide beam 64 | y_test2, (hn2, cn2) = self.lstm2(x_test) 65 | 66 | # else, partial wide beam training 67 | else: 68 | # select partial beams based on MPC 69 | x_test = torch.zeros((batch_size, 2, 16)).to(device) 70 | for b in range(batch_size): 71 | x_test[b, :, max_id[b, :]] = x[b, :, i, max_id[b, :]] 72 | 73 | #CNN 74 | x_test = self.bn0(x_test) 75 | x_test = self.conv1(x_test) 76 | x_test = self.bn1(x_test) 77 | x_test = F.relu(x_test) 78 | x_test = self.conv2(x_test) 79 | x_test = self.bn2(x_test) 80 | x_test = F.relu(x_test) 81 | 82 | P_dim_size = x_test.shape[2] 83 | x_test = nn.MaxPool1d(kernel_size=P_dim_size)(x_test) 84 | 85 | x_test = x_test.permute(2, 0, 1) 86 | # predict narrow beam 87 | y_test, (hn, cn) = self.lstm1(x_test, (hn, cn)) 88 | # predict wide beam 89 | y_test2, (hn2, cn2) = self.lstm2(x_test, (hn2, cn2)) 90 | 91 | # predict wide beam 92 | y_guide = self.drop2(y_test2) 93 | y_guide = self.fc2(y_guide) 94 | # predict narrow beam 95 | y_test = self.drop1(y_test) 96 | y_test = self.fc1(y_test) 97 | # MPC based beam selection 98 | max_value, max_id = torch.topk(y_guide, k) 99 | max_id = torch.squeeze(max_id) 100 | y1[i, :, :] = y_test 101 | y2[i, :, :] = y_guide 102 | 103 | # ONC 104 | # code structure is similar 105 | #if 0: 106 | k = 7 107 | batch_size = 16 108 | y1 = torch.zeros((10, 16, 64)).to(device) 109 | y2 = torch.zeros((10, 16, 16)).to(device) 110 | candidate_beam = torch.linspace(0, 15, steps=16) 111 | candidate_beam = candidate_beam.repeat(16, 1).to(device) 112 | 113 | for i in range(10): 114 | 115 | if i == 0: 116 | x_test = x[:, :, i, :] 117 | 118 | x_test = self.bn0(x_test) 119 | x_test = self.conv1(x_test) 120 | x_test = self.bn1(x_test) 121 | x_test = F.relu(x_test) 122 | x_test = self.conv2(x_test) 123 | x_test = self.bn2(x_test) 124 | x_test = F.relu(x_test) 125 | 126 | P_dim_size = x_test.shape[2] 127 | x_test = nn.MaxPool1d(kernel_size=P_dim_size)(x_test) 128 | 129 | x_test = x_test.permute(2, 0, 1) 130 | y_test, (hn, cn) = self.lstm1(x_test) 131 | y_test2, (hn2, cn2) = self.lstm2(x_test) 132 | 133 | else: 134 | x_test = torch.zeros((batch_size, 2, 16)).to(device) 135 | # ONC based beam selection 136 | for b in range(batch_size): 137 | x_test[b, :, max_id[b, :]] = x[b, :, i, max_id[b, :]] 138 | 139 | x_test = self.bn0(x_test) 140 | x_test = self.conv1(x_test) 141 | x_test = self.bn1(x_test) 142 | x_test = F.relu(x_test) 143 | x_test = self.conv2(x_test) 144 | x_test = self.bn2(x_test) 145 | x_test = F.relu(x_test) 146 | 147 | P_dim_size = x_test.shape[2] 148 | x_test = nn.MaxPool1d(kernel_size=P_dim_size)(x_test) 149 | 150 | x_test = x_test.permute(2, 0, 1) 151 | y_test, (hn, cn) = self.lstm1(x_test, (hn, cn)) 152 | y_test2, (hn2, cn2) = self.lstm2(x_test, (hn2, cn2)) 153 | 154 | y_guide = self.drop2(y_test2) 155 | y_guide = self.fc2(y_guide) 156 | y_test = self.drop1(y_test) 157 | y_test = self.fc1(y_test) 158 | # ONC based beam selection 159 | max_value, max_id = torch.topk(y_guide, 1) 160 | max_id = torch.squeeze(max_id) 161 | max_id = max_id.repeat(16, 1).T 162 | max_value, max_id = torch.topk(-torch.abs(candidate_beam - max_id), k) 163 | y1[i, :, :] = y_test 164 | y2[i, :, :] = y_guide 165 | 166 | return y1, y2 167 | -------------------------------------------------------------------------------- /deep_learning_model/basic_model/train_beam_basic.py: -------------------------------------------------------------------------------- 1 | import torch.optim as optim 2 | import numpy as np 3 | import torch 4 | import torchvision 5 | import torch.nn as nn 6 | import torch.nn.functional as F 7 | import scipy.io as sio 8 | from dataloader import Dataloader 9 | from model_3Dcov_basic import Model_3D 10 | import sys 11 | 12 | def eval(model, loader, device): 13 | # reset dataloader 14 | loader.reset() 15 | # loss function 16 | criterion = nn.CrossEntropyLoss() 17 | # judge whether dataset is finished 18 | done = False 19 | # counting accurate prediction 20 | P = 0 21 | # counting inaccurate prediction 22 | N = 0 23 | # M = 0 24 | # beam power loss 25 | BL = np.zeros((10,1)) 26 | # running loss 27 | running_loss = 0 28 | # count batch number 29 | batch_num = 0 30 | batch_size = 16 31 | # count the rank of the predicted probabilities 32 | rank = np.zeros((10,64)) 33 | # evaluate validation set 34 | while not done: 35 | # read files 36 | channel1, channel2, label_m, beam_power_m, beam_power_nonoise_m, label_nonoise_m, done, count = loader.next_batch() 37 | if count==True: 38 | batch_num += 1 39 | # channel1: baseline 1 by using uniformly sampled beams 40 | # channel2: proposed by using wide beams 41 | out_tensor = model(channel2) 42 | loss = 0 43 | # average loss of all predictions 44 | for loss_count in range(10): 45 | loss += criterion(torch.squeeze(out_tensor[loss_count, :, :]), label_nonoise_m[:, loss_count]) 46 | # predicted probabilities 47 | out_tensor_np = out_tensor.cpu().detach().numpy() 48 | # true label without noise 49 | gt_labels = label_nonoise_m.cpu().detach().numpy() 50 | gt_labels = np.float32(gt_labels) 51 | gt_labels = gt_labels.transpose(1, 0) 52 | # true normalized beam amplitude 53 | beam_power = beam_power_nonoise_m.cpu().detach().numpy() 54 | beam_power = beam_power.transpose(1, 0, 2) 55 | out_shape = gt_labels.shape 56 | for i in range(out_shape[0]): 57 | for j in range(out_shape[1]): 58 | train_ans = np.squeeze(out_tensor_np[i, j, :]) 59 | # find the index with the maximum probability 60 | train_index = np.argmax(train_ans) 61 | # find the rank of the true optimal beam 62 | train_sorted = np.argsort(train_ans) 63 | rank_index = np.where(train_sorted == gt_labels[i, j]) 64 | rank[i, rank_index[0]] = rank[i, rank_index[0]] + 1 65 | # counting accurate and inaccurate prediction 66 | if train_index == gt_labels[i, j]: 67 | P = P + 1 68 | else: 69 | N = N + 1 70 | # calculate beam power loss 71 | BL[i, 0] = BL[i, 0] + (beam_power[i, j, train_index] / max(beam_power[i, j, :])) ** 2 72 | running_loss += loss.data.cpu() 73 | # average accuracy 74 | acur = float(P) / (P + N) 75 | # average loss 76 | losses = running_loss / batch_num 77 | # average loss 78 | BL = BL / batch_num / batch_size 79 | # print results 80 | print("Accuracy: %.3f" % (acur)) 81 | print("Loss: %.3f" % (losses)) 82 | print("Beam power loss:") 83 | print(BL.T) 84 | return acur, losses, rank, BL 85 | 86 | def main(): 87 | # 0 LSTM: baseline 2 or proposed CNN based 88 | # 1 LSTM: proposed LSTM based 89 | version_name = 'v1' 90 | info = 'TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_' + version_name 91 | print(info) 92 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 93 | print(device) 94 | 95 | # training time 96 | t = 5 97 | # training epoch 98 | epoch = 80 99 | # batch size 100 | batch_size = 16 101 | print('batch_size:%d'%(batch_size)) 102 | 103 | # training set and validation set 104 | loader = Dataloader(path='/usr/mk/TCOM/dataset/training_15dBm', batch_size=batch_size, device=device) 105 | eval_loader = Dataloader(path='/usr/mk/TCOM/dataset/testing_15dBm', batch_size=batch_size, device=device) 106 | 107 | # loss function 108 | criterion = nn.CrossEntropyLoss() 109 | 110 | # first loop for training times 111 | for tt in range(t): 112 | print('Train %d times' % (tt)) 113 | # learning rate 114 | lr = 0.0003 115 | # model initialization 116 | model = Model_3D(N=15, K=32, Tx=4, Channel=2) 117 | model.to(device) 118 | # save minimum loss 119 | min_loss = 1000000 120 | # Adam optimizer 121 | optimizer = torch.optim.Adam(model.parameters(), lr / 10, betas=(0.9, 0.999)) # use the sum of 10 losses 122 | # learning rate adaptive decay 123 | lr_decay = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=2, 124 | verbose=True, threshold=0.0001, 125 | threshold_mode='rel', cooldown=0, min_lr=0.0000001, 126 | eps=1e-08) 127 | # print parameters 128 | for name, param in model.named_parameters(): 129 | print('Name:', name, 'Size:', param.size()) 130 | 131 | # second loop for training times 132 | for e in range(epoch): 133 | print('Train %d epoch'%(e)) 134 | # reset the dataloader 135 | loader.reset() 136 | eval_loader.reset() 137 | # judge whether data loading is done 138 | done = False 139 | # running loss 140 | running_loss = 0 141 | # count batch number 142 | batch_num = 0 143 | while not done: 144 | # read files 145 | channel1, channel2, labels, beam_power_m, beam_power_nonoise_m, label_nonoise_m, done, count = loader.next_batch() 146 | if count == True: 147 | batch_num += 1 148 | # predicted probabilities 149 | out_tensor = model(channel2) 150 | loss = 0 151 | # average loss of all predictions 152 | for loss_count in range(10): 153 | loss += criterion(torch.squeeze(out_tensor[loss_count, :, :]), labels[:, loss_count]) 154 | # gradient back propagation 155 | loss.backward() 156 | # parameter optimization 157 | optimizer.step() 158 | running_loss += loss.item() 159 | losses = running_loss / batch_num 160 | #print results 161 | print('[%d] loss: %.3f' % 162 | (e + 1, losses)) 163 | # eval mode, where dropout is off 164 | model.eval() 165 | print('the evaling set:') 166 | acur, losses, rank, BL = eval(model, eval_loader, device) 167 | # save the optimal model 168 | if losses < min_loss: 169 | min_loss = losses 170 | model_name = info + '_' + str(tt) + '_MODEL.pkl' 171 | torch.save(model, model_name) 172 | # learning rate decay 173 | lr_decay.step(losses) 174 | # train mode, where dropout is on 175 | model.train() 176 | 177 | if __name__ == '__main__': 178 | main() -------------------------------------------------------------------------------- /supplementary_material/Model/result/plot_model.m: -------------------------------------------------------------------------------- 1 | clear all; 2 | close all; 3 | clc 4 | 5 | %% Investigation of learning rate 6 | initial_loss = - log(1 / 64); 7 | 8 | %CNN assisted 9 | figure; 10 | grid on; 11 | hold on; 12 | load('..\training_parameter\learning_rate\proposed1_lr0.003.mat'); 13 | plot(0 : 80, [initial_loss; squeeze(mean(loss_eval, 2)) / 10], 'r-', 'LineWidth', 1.5, 'markersize', 2); 14 | 15 | load('..\training_parameter\learning_rate\proposed1_lr0.001.mat'); 16 | plot(0 : 80, [initial_loss; squeeze(mean(loss_eval, 2)) / 10], 'b-.', 'LineWidth', 1.5, 'markersize', 2.5); 17 | 18 | load('..\training_parameter\learning_rate\proposed1_lr0.0003.mat'); 19 | plot(0 : 80, [initial_loss; squeeze(mean(loss_eval, 2)) / 10], 'g:', 'LineWidth', 1.5, 'markersize', 2); 20 | 21 | load('..\training_parameter\learning_rate\proposed1_lr0.0001.mat'); 22 | plot(0 : 80, [initial_loss; squeeze(mean(loss_eval, 2)) / 10], 'm--', 'LineWidth', 1.5, 'markersize', 2); 23 | 24 | legend('$r_{\rm{L}}$ = 0.003', '$r_{\rm{L}}$ = 0.001', '$r_{\rm{L}}$ = 0.0003', '$r_{\rm{L}}$ = 0.0001', 'interpreter', 'latex'); 25 | xlabel('Epoch', 'interpreter', 'latex'); 26 | ylabel('${\rm{loss}}_{\rm{n}}$', 'interpreter', 'latex'); 27 | ylim([1 4.5]); 28 | 29 | %LSTM assisted 30 | figure; 31 | grid on; 32 | hold on; 33 | load('..\training_parameter\learning_rate\proposed2_lr0.003.mat'); 34 | plot(0 : 80, [initial_loss; squeeze(mean(loss_eval, 2)) / 10], 'r-', 'LineWidth', 1.5); 35 | 36 | load('..\training_parameter\learning_rate\proposed2_lr0.001.mat'); 37 | plot(0 : 80, [initial_loss; squeeze(mean(loss_eval, 2)) / 10], 'b-.', 'LineWidth', 1.5); 38 | 39 | load('..\training_parameter\learning_rate\proposed2_lr0.0003.mat'); 40 | plot(0 : 80, [initial_loss; squeeze(mean(loss_eval, 2)) / 10], 'g:', 'LineWidth', 1.5); 41 | 42 | load('..\training_parameter\learning_rate\proposed2_lr0.0001.mat'); 43 | plot(0 : 80, [initial_loss; squeeze(mean(loss_eval, 2)) / 10], 'm--', 'LineWidth', 1.5); 44 | 45 | legend('$r_{\rm{L}}$ = 0.003', '$r_{\rm{L}}$ = 0.001', '$r_{\rm{L}}$ = 0.0003', '$r_{\rm{L}}$ = 0.0001', 'interpreter', 'latex'); 46 | xlabel('Epoch', 'interpreter', 'latex'); 47 | ylabel('${\rm{loss}}_{\rm{n}}$', 'interpreter', 'latex'); 48 | ylim([1 4.5]); 49 | 50 | %Adaptive, K=7 51 | figure; 52 | grid on; 53 | hold on; 54 | load('..\training_parameter\learning_rate\proposed3(basic)_ONC_k=7_lr0.001.mat'); 55 | plot(0 : 80, [initial_loss; squeeze(mean(loss_eval, 2)) / 10], 'r-', 'LineWidth', 1.5); 56 | 57 | load('..\training_parameter\learning_rate\proposed3(basic)_ONC_k=7_lr0.0003.mat'); 58 | plot(0 : 80, [initial_loss; squeeze(mean(loss_eval, 2)) / 10], 'b-', 'LineWidth', 1.5); 59 | 60 | load('..\training_parameter\learning_rate\proposed3(basic)_ONC_k=7_lr0.0001.mat'); 61 | plot(0 : 80, [initial_loss; squeeze(mean(loss_eval, 2)) / 10], 'g-', 'LineWidth', 1.5); 62 | 63 | load('..\training_parameter\learning_rate\proposed3(basic)_ONC_k=7_lr0.00003.mat'); 64 | plot(0 : 80, [initial_loss; squeeze(mean(loss_eval, 2)) / 10], 'm-', 'LineWidth', 1.5); 65 | 66 | % 67 | load('..\training_parameter\learning_rate\proposed3(basic)_MPC_k=7_lr0.001.mat'); 68 | plot(0 : 80, [initial_loss; squeeze(mean(loss_eval, 2)) / 10], 'r--', 'LineWidth', 1.5, 'markersize', 2); 69 | 70 | load('..\training_parameter\learning_rate\proposed3(basic)_MPC_k=7_lr0.0003.mat'); 71 | plot(0 : 80, [initial_loss; squeeze(mean(loss_eval, 2)) / 10], 'b--', 'LineWidth', 1.5, 'markersize', 2); 72 | 73 | load('..\training_parameter\learning_rate\proposed3(basic)_MPC_k=7_lr0.0001.mat'); 74 | plot(0 : 80, [initial_loss; squeeze(mean(loss_eval, 2)) / 10], 'g--', 'LineWidth', 1.5, 'markersize', 2); 75 | 76 | load('..\training_parameter\learning_rate\proposed3(basic)_MPC_k=7_lr0.00003.mat'); 77 | plot(0 : 80, [initial_loss; squeeze(mean(loss_eval, 2)) / 10], 'm--', 'LineWidth', 1.5, 'markersize', 2); 78 | 79 | legend('ONC, $r_{\rm{L}}$ = 0.001', 'ONC, $r_{\rm{L}}$ = 0.0003', 'ONC, $r_{\rm{L}}$ = 0.0001', 'ONC, $r_{\rm{L}}$ = 0.00003',... 80 | 'MPC, $r_{\rm{L}}$ = 0.001', 'MPC, $r_{\rm{L}}$ = 0.0003', 'MPC, $r_{\rm{L}}$ = 0.0001', 'MPC, $r_{\rm{L}}$ = 0.00003',... 81 | 'interpreter', 'latex', 'fontsize', 7); 82 | xlabel('Epoch', 'interpreter', 'latex'); 83 | ylabel('${\rm{loss}}_{\rm{n}}$', 'interpreter', 'latex'); 84 | ylim([1 4.5]); 85 | 86 | %Enhanced adaptive, K=7 87 | figure; 88 | grid on; 89 | hold on; 90 | load('..\training_parameter\learning_rate\proposed3(enhanced)_ONC_k=7_lr0.001.mat'); 91 | plot(0 : 80, [initial_loss; squeeze(mean(loss_eval, 2)) / 10], 'r-', 'LineWidth', 1.5); 92 | 93 | load('..\training_parameter\learning_rate\proposed3(enhanced)_ONC_k=7_lr0.0003.mat'); 94 | plot(0 : 80, [initial_loss; squeeze(mean(loss_eval, 2)) / 10], 'b-', 'LineWidth', 1.5); 95 | 96 | load('..\training_parameter\learning_rate\proposed3(enhanced)_ONC_k=7_lr0.0001.mat'); 97 | plot(0 : 80, [initial_loss; squeeze(mean(loss_eval, 2)) / 10], 'g-', 'LineWidth', 1.5); 98 | 99 | load('..\training_parameter\learning_rate\proposed3(enhanced)_ONC_k=7_lr0.00003.mat'); 100 | plot(0 : 80, [initial_loss; squeeze(mean(loss_eval, 2)) / 10], 'm-', 'LineWidth', 1.5); 101 | 102 | % 103 | load('..\training_parameter\learning_rate\proposed3(enhanced)_MPC_k=7_lr0.001.mat'); 104 | plot(0 : 80, [initial_loss; squeeze(mean(loss_eval, 2)) / 10], 'r--', 'LineWidth', 1.5); 105 | 106 | load('..\training_parameter\learning_rate\proposed3(enhanced)_MPC_k=7_lr0.0003.mat'); 107 | plot(0 : 80, [initial_loss; squeeze(mean(loss_eval, 2)) / 10], 'b--', 'LineWidth', 1.5); 108 | 109 | load('..\training_parameter\learning_rate\proposed3(enhanced)_MPC_k=7_lr0.0001.mat'); 110 | plot(0 : 80, [initial_loss; squeeze(mean(loss_eval, 2)) / 10], 'g--', 'LineWidth', 1.5); 111 | 112 | load('..\training_parameter\learning_rate\proposed3(enhanced)_MPC_k=7_lr0.00003.mat'); 113 | plot(0 : 80, [initial_loss; squeeze(mean(loss_eval, 2)) / 10], 'm--', 'LineWidth', 1.5); 114 | 115 | legend('ONC, $r_{\rm{L}}$ = 0.001', 'ONC, $r_{\rm{L}}$ = 0.0003', 'ONC, $r_{\rm{L}}$ = 0.0001', 'ONC, $r_{\rm{L}}$ = 0.00003',... 116 | 'MPC, $r_{\rm{L}}$ = 0.001', 'MPC, $r_{\rm{L}}$ = 0.0003', 'MPC, $r_{\rm{L}}$ = 0.0001', 'MPC, $r_{\rm{L}}$ = 0.00003',... 117 | 'interpreter', 'latex', 'fontsize', 7); 118 | xlabel('Epoch', 'interpreter', 'latex'); 119 | ylabel('${\rm{loss}}_{\rm{n}}$', 'interpreter', 'latex'); 120 | ylim([1 4.5]); 121 | 122 | 123 | %% 124 | % FLOPs 125 | flop_CNN = 2 * (2 * 64 * 3 * 6) + 2 * (64 * 256 * 3 * 3); 126 | flop_LSTM1 = 2 * (256 * 256 * 8 + 256 * (256 + 20)); 127 | flop_LSTM2 = flop_LSTM1; 128 | flop_fc1 = 2 * 256 * 64; 129 | flop_fc2 = 2 * 256 * 16; 130 | 131 | flop_p1 = flop_CNN + flop_fc1; 132 | flop_p2 = flop_p1 + flop_LSTM1; 133 | flop_p3 = flop_p2 + flop_LSTM2 + flop_fc2; % enhanced 134 | 135 | %% 136 | % Investigation of the impact of mu 137 | figure; 138 | hold on; 139 | grid on; 140 | loss_sery = zeros(25, 1); 141 | count = 0; 142 | for mu = 0.1 : 0.1 : 2.5 143 | count = count + 1; 144 | load(['..\training_parameter\mu\TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_ONC_K=7_mu' num2str(mu) '.mat']); 145 | loss_sery(count) = mean(loss_eval(80, 1 : 5)) / 10; 146 | end 147 | plot(0.1 : 0.1 : 2.5, loss_sery, 'r-o', 'LineWidth', 1.5); 148 | 149 | loss_sery = zeros(10, 1); 150 | count = 0; 151 | for mu = 0.1 : 0.1 : 2.5 152 | count = count + 1; 153 | load(['..\training_parameter\mu\TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_MPC_K=7_mu' num2str(mu) '.mat']); 154 | loss_sery(count) = mean(loss_eval(80, 1 : 5)) / 10; 155 | end 156 | plot(0.1 : 0.1 : 2.5, loss_sery, 'b-*', 'LineWidth', 1.5); 157 | 158 | xlim([0 2.5]); 159 | ylim([1.16 1.21]); 160 | xlabel('Weight coefficient $\mu$', 'interpreter', 'latex'); 161 | ylabel('${\rm{loss}}_{\rm{n}}$', 'interpreter', 'latex'); 162 | legend('ONC', 'MPC', 'interpreter', 'latex'); 163 | 164 | %% 165 | % Execution time 166 | load('..\complexity\TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3_K=7_execution_time.mat'); 167 | batch_size = 16; 168 | time_FC1 = mean(times_FC1) / batch_size; 169 | time_FC2 = mean(times_FC2) / batch_size; 170 | time_CNN = mean(times_CNN) / batch_size; 171 | time_LSTM1 = mean(times_LSTM1) / batch_size; 172 | time_LSTM2 = mean(times_LSTM2) / batch_size; 173 | 174 | time_p1 = time_CNN + time_FC1; 175 | time_p2 = time_CNN + time_LSTM1 + time_FC1; 176 | time_p3 = time_CNN + time_LSTM1 + time_LSTM2 + time_FC1 + time_FC2; % enhanced 177 | 178 | %% 179 | %Training time per epoch 180 | load('..\complexity\TCOM_LOS_64beam_2CNN_0LSTM_256feature_16Tx_RK=8dB_proposed1_testtime.mat'); 181 | average_time_CNN = mean(mean(times)); 182 | 183 | load('..\complexity\TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed2_testtime.mat'); 184 | average_time_LSTM = mean(mean(times)); 185 | 186 | load('..\complexity\TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(basic)_k=7_testtime.mat'); 187 | average_time_adaptive = mean(mean(times)); 188 | 189 | load('..\complexity\TCOM_LOS_64beam_2CNN_1LSTM_256feature_16Tx_RK=8dB_proposed3(enhanced)_k=7_testtime.mat'); 190 | average_time_enhanced_adaptive = mean(mean(times)); --------------------------------------------------------------------------------