├── Bayesian ├── lr5 │ └── pkl_folder │ │ └── 1 │ │ ├── 1_.pkl │ │ └── 1_Bayesian_lr5.csv ├── test.py ├── train.py └── utils.py ├── COV_data ├── test_data.npy ├── test_label.npy ├── train_data.npy ├── train_label.npy ├── val_data.npy └── val_label.npy ├── DLinear ├── __pycache__ │ └── utils.cpython-38.pyc ├── test.py ├── train.py └── utils.py ├── Ensemble ├── lr5 │ └── pkl_folder │ │ ├── 1 │ │ ├── 1_GRU_lr5.csv │ │ └── baseline_1.pkl │ │ ├── 2 │ │ ├── 2_GRU_lr5.csv │ │ └── baseline_2.pkl │ │ ├── 3 │ │ ├── 3_GRU_lr5.csv │ │ └── baseline_3.pkl │ │ ├── 4 │ │ ├── 4_GRU_lr5.csv │ │ └── baseline_4.pkl │ │ ├── 5 │ │ ├── 5_GRU_lr5.csv │ │ └── baseline_5.pkl │ │ ├── 6 │ │ ├── 6_GRU_lr5.csv │ │ └── baseline_6.pkl │ │ ├── 7 │ │ ├── 7_GRU_lr5.csv │ │ └── baseline_7.pkl │ │ ├── 8 │ │ ├── 8_GRU_lr5.csv │ │ └── baseline_8.pkl │ │ ├── 9 │ │ ├── 9_GRU_lr5.csv │ │ └── baseline_9.pkl │ │ ├── 10 │ │ ├── 10_GRU_lr5.csv │ │ └── baseline_10.pkl │ │ ├── 11 │ │ ├── 11_GRU_lr5.csv │ │ └── baseline_11.pkl │ │ ├── 12 │ │ ├── 12_GRU_lr5.csv │ │ └── baseline_12.pkl │ │ ├── 13 │ │ ├── 13_GRU_lr5.csv │ │ └── baseline_13.pkl │ │ ├── 14 │ │ ├── 14_GRU_lr5.csv │ │ └── baseline_14.pkl │ │ ├── 15 │ │ ├── 15_GRU_lr5.csv │ │ └── baseline_15.pkl │ │ ├── 16 │ │ ├── 16_GRU_lr5.csv │ │ └── baseline_16.pkl │ │ ├── 17 │ │ ├── 17_GRU_lr5.csv │ │ └── baseline_17.pkl │ │ ├── 18 │ │ ├── 18_GRU_lr5.csv │ │ └── baseline_18.pkl │ │ ├── 19 │ │ ├── 19_GRU_lr5.csv │ │ └── baseline_19.pkl │ │ ├── 20 │ │ ├── 20_GRU_lr5.csv │ │ └── baseline_20.pkl │ │ ├── 21 │ │ ├── 21_GRU_lr5.csv │ │ └── baseline_21.pkl │ │ ├── 22 │ │ ├── 22_GRU_lr5.csv │ │ └── baseline_22.pkl │ │ ├── 23 │ │ ├── 23_GRU_lr5.csv │ │ └── baseline_23.pkl │ │ ├── 24 │ │ ├── 24_GRU_lr5.csv │ │ └── baseline_24.pkl │ │ ├── 25 │ │ ├── 25_GRU_lr5.csv │ │ └── baseline_25.pkl │ │ ├── 26 │ │ ├── 26_GRU_lr5.csv │ │ └── baseline_26.pkl │ │ ├── 27 │ │ ├── 27_GRU_lr5.csv │ │ └── baseline_27.pkl │ │ ├── 28 │ │ ├── 28_GRU_lr5.csv │ │ └── baseline_28.pkl │ │ ├── 29 │ │ ├── 29_GRU_lr5.csv │ │ └── baseline_29.pkl │ │ ├── 30 │ │ ├── 30_GRU_lr5.csv │ │ └── baseline_30.pkl │ │ ├── 31 │ │ ├── 31_GRU_lr5.csv │ │ └── baseline_31.pkl │ │ ├── 32 │ │ ├── 32_GRU_lr5.csv │ │ └── baseline_32.pkl │ │ ├── 33 │ │ ├── 33_GRU_lr5.csv │ │ └── baseline_33.pkl │ │ ├── 34 │ │ ├── 34_GRU_lr5.csv │ │ └── baseline_34.pkl │ │ ├── 35 │ │ ├── 35_GRU_lr5.csv │ │ └── baseline_35.pkl │ │ ├── 36 │ │ ├── 36_GRU_lr5.csv │ │ └── baseline_36.pkl │ │ ├── 37 │ │ ├── 37_GRU_lr5.csv │ │ └── baseline_37.pkl │ │ ├── 38 │ │ ├── 38_GRU_lr5.csv │ │ └── baseline_38.pkl │ │ ├── 39 │ │ ├── 39_GRU_lr5.csv │ │ └── baseline_39.pkl │ │ ├── 40 │ │ ├── 40_GRU_lr5.csv │ │ └── baseline_40.pkl │ │ ├── 41 │ │ ├── 41_GRU_lr5.csv │ │ └── baseline_41.pkl │ │ ├── 42 │ │ ├── 42_GRU_lr5.csv │ │ └── baseline_42.pkl │ │ ├── 43 │ │ ├── 43_GRU_lr5.csv │ │ └── baseline_43.pkl │ │ ├── 44 │ │ ├── 44_GRU_lr5.csv │ │ └── baseline_44.pkl │ │ ├── 45 │ │ ├── 45_GRU_lr5.csv │ │ └── baseline_45.pkl │ │ ├── 46 │ │ ├── 46_GRU_lr5.csv │ │ └── baseline_46.pkl │ │ ├── 47 │ │ ├── 47_GRU_lr5.csv │ │ └── baseline_47.pkl │ │ ├── 48 │ │ ├── 48_GRU_lr5.csv │ │ └── baseline_48.pkl │ │ ├── 49 │ │ ├── 49_GRU_lr5.csv │ │ └── baseline_49.pkl │ │ ├── 50 │ │ ├── 50_GRU_lr5.csv │ │ └── baseline_50.pkl │ │ ├── 51 │ │ ├── 51_GRU_lr5.csv │ │ └── baseline_51.pkl │ │ ├── 52 │ │ ├── 52_GRU_lr5.csv │ │ └── baseline_52.pkl │ │ ├── 53 │ │ ├── 53_GRU_lr5.csv │ │ └── baseline_53.pkl │ │ ├── 54 │ │ ├── 54_GRU_lr5.csv │ │ └── baseline_54.pkl │ │ ├── 55 │ │ ├── 55_GRU_lr5.csv │ │ └── baseline_55.pkl │ │ ├── 56 │ │ ├── 56_GRU_lr5.csv │ │ └── baseline_56.pkl │ │ ├── 57 │ │ ├── 57_GRU_lr5.csv │ │ └── baseline_57.pkl │ │ ├── 58 │ │ ├── 58_GRU_lr5.csv │ │ └── baseline_58.pkl │ │ ├── 59 │ │ ├── 59_GRU_lr5.csv │ │ └── baseline_59.pkl │ │ ├── 60 │ │ ├── 60_GRU_lr5.csv │ │ └── baseline_60.pkl │ │ ├── 61 │ │ ├── 61_GRU_lr5.csv │ │ └── baseline_61.pkl │ │ ├── 62 │ │ ├── 62_GRU_lr5.csv │ │ └── baseline_62.pkl │ │ ├── 63 │ │ ├── 63_GRU_lr5.csv │ │ └── baseline_63.pkl │ │ ├── 64 │ │ ├── 64_GRU_lr5.csv │ │ └── baseline_64.pkl │ │ ├── 65 │ │ ├── 65_GRU_lr5.csv │ │ └── baseline_65.pkl │ │ ├── 66 │ │ ├── 66_GRU_lr5.csv │ │ └── baseline_66.pkl │ │ ├── 67 │ │ ├── 67_GRU_lr5.csv │ │ └── baseline_67.pkl │ │ ├── 68 │ │ ├── 68_GRU_lr5.csv │ │ └── baseline_68.pkl │ │ ├── 69 │ │ ├── 69_GRU_lr5.csv │ │ └── baseline_69.pkl │ │ ├── 70 │ │ ├── 70_GRU_lr5.csv │ │ └── baseline_70.pkl │ │ ├── 71 │ │ ├── 71_GRU_lr5.csv │ │ └── baseline_71.pkl │ │ ├── 72 │ │ ├── 72_GRU_lr5.csv │ │ └── baseline_72.pkl │ │ ├── 73 │ │ ├── 73_GRU_lr5.csv │ │ └── baseline_73.pkl │ │ ├── 74 │ │ ├── 74_GRU_lr5.csv │ │ └── baseline_74.pkl │ │ ├── 75 │ │ ├── 75_GRU_lr5.csv │ │ └── baseline_75.pkl │ │ ├── 76 │ │ ├── 76_GRU_lr5.csv │ │ └── baseline_76.pkl │ │ ├── 77 │ │ ├── 77_GRU_lr5.csv │ │ └── baseline_77.pkl │ │ ├── 78 │ │ ├── 78_GRU_lr5.csv │ │ └── baseline_78.pkl │ │ ├── 79 │ │ ├── 79_GRU_lr5.csv │ │ └── baseline_79.pkl │ │ ├── 80 │ │ ├── 80_GRU_lr5.csv │ │ └── baseline_80.pkl │ │ ├── 81 │ │ ├── 81_GRU_lr5.csv │ │ └── baseline_81.pkl │ │ ├── 82 │ │ ├── 82_GRU_lr5.csv │ │ └── baseline_82.pkl │ │ ├── 83 │ │ ├── 83_GRU_lr5.csv │ │ └── baseline_83.pkl │ │ ├── 84 │ │ ├── 84_GRU_lr5.csv │ │ └── baseline_84.pkl │ │ ├── 85 │ │ ├── 85_GRU_lr5.csv │ │ └── baseline_85.pkl │ │ ├── 86 │ │ ├── 86_GRU_lr5.csv │ │ └── baseline_86.pkl │ │ ├── 87 │ │ ├── 87_GRU_lr5.csv │ │ └── baseline_87.pkl │ │ ├── 88 │ │ ├── 88_GRU_lr5.csv │ │ └── baseline_88.pkl │ │ ├── 89 │ │ ├── 89_GRU_lr5.csv │ │ └── baseline_89.pkl │ │ ├── 90 │ │ ├── 90_GRU_lr5.csv │ │ └── baseline_90.pkl │ │ ├── 91 │ │ ├── 91_GRU_lr5.csv │ │ └── baseline_91.pkl │ │ ├── 92 │ │ ├── 92_GRU_lr5.csv │ │ └── baseline_92.pkl │ │ ├── 93 │ │ ├── 93_GRU_lr5.csv │ │ └── baseline_93.pkl │ │ ├── 94 │ │ ├── 94_GRU_lr5.csv │ │ └── baseline_94.pkl │ │ ├── 95 │ │ ├── 95_GRU_lr5.csv │ │ └── baseline_95.pkl │ │ ├── 96 │ │ ├── 96_GRU_lr5.csv │ │ └── baseline_96.pkl │ │ ├── 97 │ │ ├── 97_GRU_lr5.csv │ │ └── baseline_97.pkl │ │ ├── 98 │ │ ├── 98_GRU_lr5.csv │ │ └── baseline_98.pkl │ │ ├── 99 │ │ ├── 99_GRU_lr5.csv │ │ └── baseline_99.pkl │ │ └── 100 │ │ ├── 100_GRU_lr5.csv │ │ └── baseline_100.pkl ├── test.py ├── train.py └── utils.py ├── GEF_data ├── test_data.npy ├── test_label.npy ├── train_data.npy ├── train_label.npy ├── val_data.npy └── val_label.npy ├── GRU ├── lr5 │ └── pkl_folder │ │ └── 1 │ │ └── 1_.pkl ├── test.py ├── train.py └── utils.py ├── LICENSE ├── NBEATSX ├── __pycache__ │ └── utils.cpython-38.pyc ├── test.py ├── train.py └── utils.py ├── README.md ├── building_data ├── Almeda_data │ ├── test_data.npy │ ├── test_label.npy │ ├── train_data.npy │ ├── train_label.npy │ ├── val_data.npy │ └── val_label.npy ├── Bessie_data │ ├── test_data.npy │ ├── test_label.npy │ ├── train_data.npy │ ├── train_label.npy │ ├── val_data.npy │ └── val_label.npy ├── Bill_data │ ├── test_data.npy │ ├── test_label.npy │ ├── train_data.npy │ ├── train_label.npy │ ├── val_data.npy │ └── val_label.npy ├── Dona_data │ ├── test_data.npy │ ├── test_label.npy │ ├── train_data.npy │ ├── train_label.npy │ ├── val_data.npy │ └── val_label.npy ├── Jeremy_data │ ├── test_data.npy │ ├── test_label.npy │ ├── train_data.npy │ ├── train_label.npy │ ├── val_data.npy │ └── val_label.npy ├── Jewel_data │ ├── test_data.npy │ ├── test_label.npy │ ├── train_data.npy │ ├── train_label.npy │ ├── val_data.npy │ └── val_label.npy ├── Jordan_data │ ├── test_data.npy │ ├── test_label.npy │ ├── train_data.npy │ ├── train_label.npy │ ├── val_data.npy │ └── val_label.npy ├── Lizzie_data │ ├── test_data.npy │ ├── test_label.npy │ ├── train_data.npy │ ├── train_label.npy │ ├── val_data.npy │ └── val_label.npy ├── Mariah_data │ ├── test_data.npy │ ├── test_label.npy │ ├── train_data.npy │ ├── train_label.npy │ ├── val_data.npy │ └── val_label.npy └── Nikki_data │ ├── test_data.npy │ ├── test_label.npy │ ├── train_data.npy │ ├── train_label.npy │ ├── val_data.npy │ └── val_label.npy ├── deepAR ├── lr5 │ └── pkl_folder │ │ └── 1 │ │ └── 1_.pkl ├── test.py ├── train.py └── utils.py ├── deepARdropout ├── lr5 │ └── pkl_folder │ │ ├── 1 │ │ └── 1_.pkl │ │ └── .DS_Store ├── test.py ├── train.py └── utils.py ├── seq2seq_diffusion ├── lr5_5 │ └── pkl_folder │ │ └── 1 │ │ └── 1_.pkl ├── test.py ├── train.py └── utils.py └── seq2seq_diffusion_normal ├── __pycache__ └── utils.cpython-38.pyc ├── lr5_5 └── pkl_folder │ └── 1 │ └── 1_.pkl ├── test.py ├── train.py └── utils.py /Bayesian/lr5/pkl_folder/1/1_.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hkuedl/DiffLoad-Uncertainty-Quantification-Load-Forecasting/4032d38c93cddfd395259cec40a2c049046cfc34/Bayesian/lr5/pkl_folder/1/1_.pkl -------------------------------------------------------------------------------- /Bayesian/lr5/pkl_folder/1/1_Bayesian_lr5.csv: -------------------------------------------------------------------------------- 1 | 0 2 | 7629.156781673431 3 | -------------------------------------------------------------------------------- /Bayesian/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import random 3 | from copy import deepcopy 4 | import numpy as np 5 | import pandas as pd 6 | import torch 7 | import torch.nn as nn 8 | from torch.utils.data import Dataset, DataLoader 9 | from torchinfo import summary 10 | import os 11 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 12 | torch.cuda.set_device(1) 13 | import properscoring as ps 14 | from sklearn.preprocessing import StandardScaler 15 | from scipy.stats import norm 16 | from scipy.stats import cauchy 17 | import time 18 | from blitz.modules import BayesianLSTM,BayesianGRU,BayesianLinear 19 | from blitz.utils import variational_estimator 20 | 21 | import sys 22 | sys.path.append('.') 23 | from utils import * 24 | 25 | if __name__ == '__main__': 26 | root_path = './lr5' 27 | metric_list = [[],[],[],[],[],[]] 28 | for t in range(1): 29 | t = t+1 30 | setup_seed(t) 31 | print("begin loading data") 32 | train_data_path = '../GEF_data/train_data.npy' 33 | train_label_path = '../GEF_data/train_label.npy' 34 | test_data_path = '../GEF_data/test_data.npy' 35 | test_label_path = '../GEF_data/test_label.npy' 36 | mytrain_data = np.load(train_data_path) 37 | cov = mytrain_data.shape[2] 38 | day = mytrain_data.shape[1] 39 | mytrain_data = mytrain_data.reshape(-1,cov) 40 | mytest_data = np.load(test_data_path) 41 | mytest_data = mytest_data.reshape(-1,cov) 42 | mytrain_label = np.load(train_label_path).reshape(-1,1) 43 | mytest_label = np.load(test_label_path).reshape(-1,1) 44 | sd_data = StandardScaler().fit(mytrain_data) 45 | sd_label = StandardScaler().fit(mytrain_label) 46 | mytrain_data = sd_data.transform(mytrain_data) 47 | mytest_data = sd_data.transform(mytest_data) 48 | mytrain_label = sd_label.transform(mytrain_label) 49 | mytest_label = sd_label.transform(mytest_label) 50 | mytrain_data = mytrain_data.reshape(-1,day,cov) 51 | mytest_data = mytest_data.reshape(-1,day,cov) 52 | mytrain_label = mytrain_label.reshape(-1) 53 | mytest_label = mytest_label.reshape(-1) 54 | # new_test = mydataset(mytest_data,mytest_label) 55 | # test_loader = DataLoader(new_test, shuffle=False, batch_size=1) 56 | print("finish loading data") 57 | path,model_path = create_result(root_path,t) 58 | model = torch.load(model_path).to(device) 59 | my_test_data = torch.Tensor(mytest_data).to(device) 60 | my_test_label = torch.Tensor(mytest_label).reshape(-1,1).to(device) 61 | mu_list = torch.zeros_like(my_test_label.reshape(-1)) 62 | sigma_list = torch.zeros_like(my_test_label.reshape(-1)) 63 | 64 | temp_list = [] 65 | for i in range(100): 66 | with torch.no_grad(): 67 | mu, sigma = model(my_test_data) 68 | mu_list = mu_list + mu 69 | sigma_list = sigma_list + sigma 70 | temp_list.append(mu.cpu().detach().numpy()) 71 | mu = mu_list/100 72 | sigma = sigma_list/100 73 | 74 | mu_std = np.std(np.array(temp_list),0) 75 | mu_std = sd_label.scale_*mu_std 76 | mu = mu.reshape(-1,1) 77 | sigma = sigma.reshape(-1,1) 78 | 79 | test_label = sd_label.inverse_transform(my_test_label.cpu().detach().numpy()).reshape(-1) 80 | mu = sd_label.inverse_transform((mu).cpu().detach().numpy()).reshape(-1) 81 | sigma = (sd_label.scale_*sigma.cpu().detach().numpy()).reshape(-1) 82 | 83 | test_label = np.array(test_label,dtype=np.float64) 84 | mu = np.array(mu,dtype=np.float64) 85 | sigma = np.array(sigma,dtype=np.float64) + mu_std 86 | 87 | test_pinball = eval_pinball(test_label,mu,sigma,[0.125,0.875])/0.25 88 | test_pinball2 = eval_pinball(test_label,mu,sigma,[0.25,0.75])/0.5 89 | test_pinball3 = eval_pinball(test_label,mu,sigma,[0.375,0.625])/0.75 90 | test_CRPS = CRPS(test_label,mu,sigma) 91 | test_MAE = MAE(test_label,mu) 92 | test_MAPE = MAPE(test_label,mu) 93 | print(t) 94 | print('CRPS',test_CRPS) 95 | print('MAPE',test_MAPE) 96 | print('MAE',test_MAE) 97 | print('pinball_loss75',test_pinball) 98 | print('pinball_loss50',test_pinball2) 99 | print('pinball_loss25',test_pinball3) 100 | np.save(path+'/test_result_mean.npy',mu) 101 | np.save(path+'/test_label.npy',test_label) 102 | np.save(path+'/test_result_sigma.npy',sigma) 103 | metric_list[0].append(test_CRPS) 104 | metric_list[1].append(test_MAE) 105 | metric_list[2].append(test_MAPE) 106 | metric_list[3].append(test_pinball) 107 | metric_list[4].append(test_pinball2) 108 | metric_list[5].append(test_pinball3) 109 | print('DeepAR') 110 | result_dict = {} 111 | result_dict['GEF'] = [np.mean(metric_list[i]) for i in range(len(metric_list))] 112 | result_dict = pd.DataFrame(result_dict) 113 | result_dict.to_csv(root_path+'/result_deepAR_lr5.csv',index=False,sep = ',') 114 | 115 | 116 | 117 | 118 | 119 | -------------------------------------------------------------------------------- /Bayesian/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | import random 3 | from copy import deepcopy 4 | import numpy as np 5 | import pandas as pd 6 | import torch 7 | import torch.nn as nn 8 | from torch.utils.data import Dataset, DataLoader 9 | from torchinfo import summary 10 | import os 11 | torch.cuda.set_device(3) 12 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 13 | from sklearn.preprocessing import StandardScaler 14 | from blitz.modules import BayesianLSTM,BayesianGRU,BayesianLinear 15 | from blitz.utils import variational_estimator 16 | import time 17 | import sys 18 | sys.path.append('.') 19 | from utils import * 20 | 21 | 22 | if __name__ == '__main__': 23 | root_path = './lr5' 24 | for t in range(1): 25 | t = t+1 26 | setup_seed(t) 27 | batch_size = 256 28 | print("begin loading data") 29 | train_data_path = '../GEF_data/train_data.npy' 30 | train_label_path = '../GEF_data/train_label.npy' 31 | val_data_path = '../GEF_data/val_data.npy' 32 | val_label_path = '../GEF_data/val_label.npy' 33 | mytrain_data = np.load(train_data_path) 34 | cov = mytrain_data.shape[2] 35 | day = mytrain_data.shape[1] 36 | mytrain_data = mytrain_data.reshape(-1,cov) 37 | myval_data = np.load(val_data_path) 38 | myval_data = myval_data.reshape(-1,cov) 39 | mytrain_label = np.load(train_label_path).reshape(-1,1) 40 | myval_label = np.load(val_label_path).reshape(-1,1) 41 | sd_data = StandardScaler().fit(mytrain_data) 42 | sd_label = StandardScaler().fit(mytrain_label) 43 | mytrain_data = sd_data.transform(mytrain_data) 44 | myval_data = sd_data.transform(myval_data) 45 | mytrain_label = sd_label.transform(mytrain_label) 46 | myval_label = sd_label.transform(myval_label) 47 | mytrain_data = mytrain_data.reshape(-1,day,cov) 48 | myval_data = myval_data.reshape(-1,day,cov) 49 | mytrain_label = mytrain_label.reshape(-1) 50 | myval_label = myval_label.reshape(-1) 51 | new_train = my_dataset(mytrain_data,mytrain_label) 52 | new_val = my_dataset(myval_data,myval_label) 53 | train_loader = DataLoader(new_train, shuffle=True, batch_size=batch_size) 54 | val_loader = DataLoader(new_val, shuffle=True, batch_size=batch_size) 55 | print("finish loading data") 56 | path = create_folder(root_path,t) 57 | input_size, hidden_size, num_layers, output_size = 6, [64,64], 2, 1 58 | model = GRU(input_size, hidden_size, num_layers, output_size).to(device) 59 | total = sum([param.nelement() for param in model.parameters()]) 60 | print("Number of parameter: %.2fM" % (total/1e6)) 61 | loss_function = likelihood().to(device) 62 | optimizer = torch.optim.Adam(model.parameters(), lr=0.0005) 63 | epochs = 300 64 | 65 | count = 15 66 | val_min = np.inf 67 | start_time = time.time() 68 | for i in range(epochs): 69 | if count>15: 70 | break 71 | losses = [] 72 | val_RMSE = [] 73 | print('current_epoch', i) 74 | 75 | for (data, label) in train_loader: 76 | train_data = data.to(device) 77 | train_label = label.reshape(-1).to(device) 78 | 79 | 80 | 81 | # mu, sigma = model(train_data) 82 | # mu = mu.reshape(-1) 83 | # sigma = sigma.reshape(-1) 84 | loss = model.sample_elbo(inputs=train_data, 85 | labels=train_label, 86 | criterion=loss_function, 87 | sample_nbr=100, 88 | complexity_cost_weight=1/mytrain_data.shape[0]) 89 | 90 | 91 | optimizer.zero_grad() 92 | loss.backward() 93 | optimizer.step() 94 | 95 | losses.append(loss.item()) 96 | 97 | for (data,label) in val_loader: 98 | val_data = data.to(device) 99 | val_label = label.reshape(-1).to(device) 100 | with torch.no_grad(): 101 | val_mu, val_sigma = model(val_data) 102 | # val_mu = model(val_data) 103 | val_mu = val_mu.reshape(-1) 104 | val_sigma = val_sigma.reshape(-1) 105 | val_loss = RMSE(val_label,val_mu) 106 | val_RMSE.append(val_loss.item()) 107 | 108 | loss_av = np.mean(losses) 109 | val_av = np.mean(val_RMSE) 110 | print('train_loss:',loss_av) 111 | print('val_RMSE:',val_av) 112 | 113 | 114 | if (val_av=norm.ppf(quantiles[0],mu[i],sigma[i])) 137 | seq.append(metric) 138 | return(np.mean(seq)) 139 | 140 | def QCI(true,mu,sigma): 141 | seq = [] 142 | for i in range(len(true)): 143 | term1 = np.abs(true[i]-mu[i]) 144 | term2 = norm.ppf(0.75,mu[i],sigma[i])-norm.ppf(0.25,mu[i],sigma[i]) 145 | metric = np.abs(term1-term2) 146 | seq.append(metric) 147 | return(np.mean(seq)) 148 | 149 | def accuracy(pred,true,mu_std,interval = 24): 150 | length = len(pred) 151 | test_accuracy = [] 152 | for i in np.arange(interval,length,interval): 153 | error1 = np.mean(np.abs(pred[i-interval:i]-true[i-interval:i])/np.abs(true[i-interval:i])) 154 | error2 = np.mean(np.abs(pred[i:i+interval]-true[i:i+interval])/np.abs(true[i:i+interval])) 155 | mu_std1 = np.mean(mu_std[i-interval:i]/pred[i-interval:i]) 156 | mu_std2 = np.mean(mu_std[i:i+interval]/pred[i:i+interval]) 157 | if (error1-error2)*(mu_std1-mu_std2)>0: 158 | test_accuracy.append(1) 159 | else: 160 | test_accuracy.append(0) 161 | return(np.mean(test_accuracy)) 162 | 163 | 164 | @variational_estimator 165 | class GRU(nn.Module): 166 | def __init__(self, input_size, n_hiddens, num_layers, output_size): 167 | super().__init__() 168 | self.n_input = input_size 169 | input_size = input_size 170 | self.num_layers = num_layers 171 | self.hiddens = n_hiddens 172 | self.n_output = output_size 173 | 174 | features = nn.ModuleList() 175 | for hidden in n_hiddens: 176 | rnn = BayesianGRU( 177 | in_features=input_size, 178 | out_features=hidden 179 | ) 180 | features.append(rnn) 181 | input_size = hidden 182 | self.features = nn.Sequential(*features) 183 | self.distribution_presigma = nn.Linear(n_hiddens[-1], output_size) 184 | self.distribution_mu = nn.Linear(n_hiddens[-1], output_size) 185 | self.distribution_sigma = nn.Softplus() 186 | 187 | def forward(self, input_seq): 188 | batch_size = input_seq.shape[0] 189 | seq_len = input_seq.shape[1] 190 | input_seq = input_seq.view(batch_size, seq_len, self.n_input) 191 | out = self.gru_features(input_seq) 192 | fea = out[0] 193 | hidden_permute = out[1] 194 | # hidden_permute = hidden.permute(1, 2, 0).contiguous().view(hidden.shape[1], -1) 195 | pre_sigma = self.distribution_presigma(hidden_permute) 196 | mu = self.distribution_mu(hidden_permute) 197 | sigma = self.distribution_sigma(pre_sigma) 198 | mu = torch.squeeze(mu) 199 | sigma = torch.squeeze(sigma) 200 | return mu,sigma 201 | 202 | def gru_features(self, x, predict=False): 203 | x_input = x 204 | out = None 205 | out_lis = [] 206 | for i in range(self.num_layers): 207 | out, hidden = self.features[i](x_input.float()) 208 | x_input = out 209 | out_lis.append(out) 210 | return out, hidden, out_lis -------------------------------------------------------------------------------- /COV_data/test_data.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hkuedl/DiffLoad-Uncertainty-Quantification-Load-Forecasting/4032d38c93cddfd395259cec40a2c049046cfc34/COV_data/test_data.npy -------------------------------------------------------------------------------- /COV_data/test_label.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hkuedl/DiffLoad-Uncertainty-Quantification-Load-Forecasting/4032d38c93cddfd395259cec40a2c049046cfc34/COV_data/test_label.npy -------------------------------------------------------------------------------- /COV_data/train_data.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hkuedl/DiffLoad-Uncertainty-Quantification-Load-Forecasting/4032d38c93cddfd395259cec40a2c049046cfc34/COV_data/train_data.npy -------------------------------------------------------------------------------- /COV_data/train_label.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hkuedl/DiffLoad-Uncertainty-Quantification-Load-Forecasting/4032d38c93cddfd395259cec40a2c049046cfc34/COV_data/train_label.npy -------------------------------------------------------------------------------- /COV_data/val_data.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hkuedl/DiffLoad-Uncertainty-Quantification-Load-Forecasting/4032d38c93cddfd395259cec40a2c049046cfc34/COV_data/val_data.npy -------------------------------------------------------------------------------- /COV_data/val_label.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hkuedl/DiffLoad-Uncertainty-Quantification-Load-Forecasting/4032d38c93cddfd395259cec40a2c049046cfc34/COV_data/val_label.npy -------------------------------------------------------------------------------- /DLinear/__pycache__/utils.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hkuedl/DiffLoad-Uncertainty-Quantification-Load-Forecasting/4032d38c93cddfd395259cec40a2c049046cfc34/DLinear/__pycache__/utils.cpython-38.pyc -------------------------------------------------------------------------------- /DLinear/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import random 3 | from copy import deepcopy 4 | import numpy as np 5 | import pandas as pd 6 | import torch 7 | import torch.nn as nn 8 | from torch.utils.data import Dataset, DataLoader 9 | from torchinfo import summary 10 | import os 11 | torch.cuda.set_device(2) 12 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 13 | from sklearn.preprocessing import StandardScaler 14 | from typing import Tuple 15 | from torch.nn import functional as F 16 | import pickle 17 | import random 18 | from time import time 19 | from typing import Union 20 | 21 | import numpy as np 22 | import torch 23 | from torch import nn, optim 24 | import torch.nn 25 | from torch.nn.functional import mse_loss, l1_loss, binary_cross_entropy, cross_entropy 26 | from torch.optim import Optimizer 27 | from utils import * 28 | 29 | 30 | 31 | if __name__ == '__main__': 32 | root_path = './lr5' 33 | metric_list = [[],[],[]] 34 | for t in range(1): 35 | t = t+1 36 | setup_seed(t) 37 | print("begin loading data") 38 | train_data_path = '../GEF_data/train_data.npy' 39 | train_label_path = '../GEF_data/train_label.npy' 40 | test_data_path = '../GEF_data/val_data.npy' 41 | test_label_path = '../GEF_data/val_label.npy' 42 | mytrain_data = np.load(train_data_path) 43 | cov = mytrain_data.shape[2] 44 | day = mytrain_data.shape[1] 45 | mytrain_data = mytrain_data.reshape(-1,1) 46 | mytest_data = np.load(test_data_path) 47 | mytest_data = mytest_data.reshape(-1,1) 48 | mytrain_label = np.load(train_label_path).reshape(-1,1) 49 | mytest_label = np.load(test_label_path).reshape(-1,1) 50 | sd_data = StandardScaler().fit(mytrain_data) 51 | sd_label = StandardScaler().fit(mytrain_label) 52 | mytrain_data = sd_data.transform(mytrain_data) 53 | mytest_data = sd_data.transform(mytest_data) 54 | mytrain_label = sd_label.transform(mytrain_label) 55 | mytest_label = sd_label.transform(mytest_label) 56 | mytrain_data = mytrain_data.reshape(-1,day,cov) 57 | mytest_data = mytest_data.reshape(-1,day,cov) 58 | mytrain_label = mytrain_label.reshape(-1) 59 | mytest_label = mytest_label.reshape(-1) 60 | print("finish loading data") 61 | path,model_path = create_result(root_path,t) 62 | model = torch.load(model_path).to(device) 63 | test_data = torch.Tensor(mytest_data).to(device) 64 | test_label = torch.Tensor(mytest_label).reshape(-1,1).to(device) 65 | with torch.no_grad(): 66 | y_pred = model(test_data) 67 | y_pred = y_pred.reshape(-1,1) 68 | test_label = sd_label.inverse_transform(test_label.cpu().detach().numpy()).reshape(-1) 69 | y_pred = sd_label.inverse_transform(y_pred.cpu().detach().numpy()).reshape(-1) 70 | test_label = np.array(test_label,dtype=np.float64) 71 | y_pred = np.array(y_pred,dtype=np.float64) 72 | test_MAE = MAE(test_label,y_pred) 73 | test_MAPE = MAPE(test_label,y_pred) 74 | print(t) 75 | print('MAPE',test_MAPE) 76 | print('MAE',test_MAE) 77 | true = pd.DataFrame(test_label) 78 | value = pd.DataFrame(y_pred) 79 | 80 | metric_list[0].append(test_MAE) 81 | metric_list[1].append(test_MAPE) 82 | 83 | true.to_csv(path+'/'+str(t)+'_true.csv',index=False) 84 | value.to_csv(path+'/'+str(t)+'_value.csv',index=False) 85 | print('nbeats') 86 | result_dict = {} 87 | print('test_MAE:',np.mean(metric_list[0])) 88 | print('test_MAPE:',np.mean(metric_list[1])) 89 | result_dict['GEF'] = [np.mean(metric_list[0]),np.mean(metric_list[1])] 90 | result_dict = pd.DataFrame(result_dict) 91 | result_dict.to_csv(root_path+'/result_DLinear.csv',index=False,sep = ',') 92 | 93 | -------------------------------------------------------------------------------- /DLinear/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | import random 3 | from copy import deepcopy 4 | import numpy as np 5 | import pandas as pd 6 | import torch 7 | import torch.nn as nn 8 | from torch.utils.data import Dataset, DataLoader 9 | from torchinfo import summary 10 | import os 11 | torch.cuda.set_device(0) 12 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 13 | from sklearn.preprocessing import StandardScaler 14 | from typing import Tuple 15 | from torch.nn import functional as F 16 | import pickle 17 | import random 18 | from time import time 19 | from typing import Union 20 | 21 | import numpy as np 22 | import torch 23 | from torch import nn, optim 24 | import torch.nn 25 | from torch.nn.functional import mse_loss, l1_loss, binary_cross_entropy, cross_entropy 26 | from torch.optim import Optimizer 27 | 28 | from utils import * 29 | 30 | if __name__ == '__main__': 31 | root_path = './lr5' 32 | for v in range(1): 33 | v = v+1 34 | setup_seed(v) 35 | batch_size = 256 36 | print("begin loading data") 37 | train_data_path = '../GEF_data/train_data.npy' 38 | train_label_path = '../GEF_data/train_label.npy' 39 | val_data_path = '../GEF_data/val_data.npy' 40 | val_label_path = '../GEF_data/val_label.npy' 41 | mytrain_data = np.load(train_data_path) 42 | cov = mytrain_data.shape[2] 43 | day = mytrain_data.shape[1] 44 | mytrain_data = mytrain_data.reshape(-1,1) 45 | myval_data = np.load(val_data_path) 46 | myval_data = myval_data.reshape(-1,1) 47 | mytrain_label = np.load(train_label_path).reshape(-1,1) 48 | myval_label = np.load(val_label_path).reshape(-1,1) 49 | sd_data = StandardScaler().fit(mytrain_data) 50 | sd_label = StandardScaler().fit(mytrain_label) 51 | mytrain_data = sd_data.transform(mytrain_data) 52 | myval_data = sd_data.transform(myval_data) 53 | mytrain_label = sd_label.transform(mytrain_label) 54 | myval_label = sd_label.transform(myval_label) 55 | mytrain_data = mytrain_data.reshape(-1,day,cov) 56 | myval_data = myval_data.reshape(-1,day,cov) 57 | mytrain_label = mytrain_label.reshape(-1) 58 | myval_label = myval_label.reshape(-1) 59 | new_train = my_dataset(mytrain_data,mytrain_label) 60 | new_val = my_dataset(myval_data,myval_label) 61 | train_loader = DataLoader(new_train, shuffle=True, batch_size=batch_size) 62 | val_loader = DataLoader(new_val, shuffle=True, batch_size=batch_size) 63 | print("finish loading data") 64 | path = create_folder(root_path,v) 65 | pred_len = 1 66 | seq_len = 7 67 | model = DLinear(seq_len,pred_len,enc_in = cov,enc_out = 1,individual=False).to(device) 68 | loss_function = nn.MSELoss().to(device) 69 | optimizer = torch.optim.Adam(model.parameters(), lr=0.0005) 70 | epochs = 300 71 | count = 15 72 | val_min = np.inf 73 | 74 | for i in range(epochs): 75 | if count>15: 76 | break 77 | losses = [] 78 | val_RMSE = [] 79 | print('current_epoch', i) 80 | 81 | for (data, label) in train_loader: 82 | 83 | train_data = data.to(device) 84 | train_label = label.reshape(-1).to(device) 85 | 86 | 87 | y_pred = model(train_data) 88 | y_pred = y_pred.reshape(-1) 89 | 90 | loss = loss_function(y_pred, train_label) 91 | optimizer.zero_grad() 92 | loss.backward() 93 | optimizer.step() 94 | 95 | losses.append(loss.item()) 96 | 97 | for (data, label) in val_loader: 98 | val_data = data.to(device) 99 | val_label = label.reshape(-1).to(device) 100 | 101 | 102 | 103 | 104 | with torch.no_grad(): 105 | 106 | val_pred = model(val_data) 107 | val_pred = val_pred.reshape(-1) 108 | 109 | 110 | val_loss = RMSE(val_label,val_pred) 111 | val_RMSE.append(val_loss.item()) 112 | 113 | loss_av = np.mean(losses) 114 | val_av = np.mean(val_RMSE) 115 | print('train_MSE:',loss_av) 116 | print('val_RMSE:',val_av) 117 | if (val_av15: 68 | break 69 | losses = [] 70 | val_RMSE = [] 71 | print('current_epoch', i) 72 | 73 | for (data, label) in train_loader: 74 | 75 | train_data = data.to(device) 76 | train_label = label.reshape(-1).to(device) 77 | 78 | 79 | 80 | mu, sigma = model(train_data) 81 | mu = mu.reshape(-1) 82 | sigma = sigma.reshape(-1) 83 | loss = loss_function(train_label,mu,sigma) 84 | 85 | optimizer.zero_grad() 86 | loss.backward() 87 | optimizer.step() 88 | 89 | losses.append(loss.item()) 90 | 91 | for (data, label) in val_loader: 92 | val_data = data.to(device) 93 | val_label = label.reshape(-1).to(device) 94 | with torch.no_grad(): 95 | val_mu, val_sigma = model(val_data) 96 | val_mu = val_mu.reshape(-1) 97 | val_sigma = val_sigma.reshape(-1) 98 | val_loss = RMSE(val_label,val_mu) 99 | val_RMSE.append(val_loss.item()) 100 | loss_av = np.mean(losses) 101 | val_av = np.mean(val_RMSE) 102 | print('train_MSE:',loss_av) 103 | print('val_RMSE:',val_av) 104 | if (val_av