├── .idea └── FISMF.iml ├── README.md ├── generate_data.py ├── model ├── LTFE.py ├── STFE.py ├── ST_Norm.py ├── cell.py ├── loss.py ├── model.py └── supervisor.py ├── requirements.txt ├── train.py └── utils.py /.idea/FISMF.iml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # FCDNet: Frequency-Guided Complementary Dependency Modeling for Multivariate Time-Series Forecasting 2 | ## Requirements 3 | - Python 3.8.3 4 | - see `requirements.txt` 5 | 6 | 7 | ## Data Preparation 8 | 9 | #### TXT File 10 | Download Solar-Energy datasets from [https://github.com/laiguokun/multivariate-time-series-data](https://github.com/laiguokun/multivariate-time-series-data). Put into the `data/{solar_AL}` folder. 11 | 12 | #### NPZ File 13 | 14 | Download PEMS03, PEMS04, PEMS07, PEMS08 datasets from [https://github.com/Davidham3/ASTGCN/tree/master/data). Put into the `data/{PEMS03,PEMS04,PEMS07,PEMS08}` folder. 15 | 16 | ## Split dataset 17 | 18 | Run the following commands to generate train/validation/test dataset at `data/{solar_AL,PEMS03,PEMS04,PEMS07,PEMS08}/{train,val,test}.npz`. 19 | 20 | ``` 21 | python generate_data.py --dataset PEMS03 --train_rate 0.6 --val_rate 0.2 22 | 23 | python generate_data.py --dataset PEMS04 --train_rate 0.6 --val_rate 0.2 24 | 25 | python generate_data.py --dataset PEMS07 --train_rate 0.6 --val_rate 0.2 26 | 27 | python generate_data.py --dataset PEMS08 --train_rate 0.6 --val_rate 0.2 28 | 29 | python generate_data.py --dataset Solar_AL 30 | ``` 31 | 32 | ## Train Commands 33 | 34 | * Solar-Energy 35 | ``` 36 | # Use Solar-Energy dataset 37 | python train.py --dataset_dir=data/solar_AL 38 | ``` 39 | * PEMS03 40 | ``` 41 | # Use PEMS03 dataset 42 | python train.py --dataset_dir=data/PEMS03 43 | ``` 44 | * PEMS04 45 | ``` 46 | # Use PEMS04 dataset 47 | python train.py --dataset_dir=data/PEMS04 48 | ``` 49 | * PEMS07 50 | ``` 51 | # Use PEMS07 dataset 52 | python train.py --dataset_dir=data/PEMS07 53 | ``` 54 | * PEMS08 55 | ``` 56 | # Use PEMS08 dataset 57 | python train.py --dataset_dir=data/PEMS08 58 | ``` 59 | 60 | # Citation Format 61 | If you find this codebase helpful for your research, please consider citing the following paper: 62 | 63 | ```plaintext 64 | @article{chen2023fcdnet, 65 | title={FCDNet: Frequency-Guided Complementary Dependency Modeling for Multivariate Time-Series Forecasting}, 66 | author={Chen, Weijun and Wang, Heyuan and Tian, Ye and Guan, Shijie and Liu, Ning}, 67 | journal={arXiv preprint arXiv:2312.16450}, 68 | year={2023} 69 | } 70 | 71 | -------------------------------------------------------------------------------- /generate_data.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | import numpy as np 4 | import pandas as pd 5 | import math 6 | def generate_data(args, path_input, path_output): 7 | if 'txt' in path_input or 'csv' in path_input: 8 | file = open(path_input) 9 | rawdata = np.loadtxt(file, delimiter=',') 10 | times, num_nodes = rawdata.shape # (times, num_nodes) 11 | print("times: ", times, ", num_nodes: ", num_nodes) 12 | elif 'npz' in path_input: 13 | file = np.load(path_input) 14 | if len(file['data'].shape) == 3: 15 | if args.dataset == 'stock': 16 | rawdata = np.squeeze(file['data'][:, :, 1]) 17 | else: 18 | rawdata = np.squeeze(file['data'][:, :, 0]) 19 | else: 20 | rawdata = np.squeeze(file['data']) 21 | if len(rawdata.shape)==2: 22 | times, num_nodes = rawdata.shape 23 | print("times: ", times, ", num_nodes: ", num_nodes) 24 | else: 25 | times, num_nodes, features_dim = rawdata.shape 26 | print("times: ", times, ", num_nodes: ", num_nodes, ", features_dim: ", features_dim) 27 | elif 'npy' in path_input: 28 | file = np.load(path_input) 29 | if len(file.shape) == 3: 30 | if args.dataset == 'stock': 31 | rawdata = np.squeeze(file[:, :, 1]) 32 | else: 33 | rawdata = np.squeeze(file[:, :, 0]) 34 | else: 35 | rawdata = np.squeeze(file) 36 | if len(rawdata.shape)==2: 37 | times, num_nodes = rawdata.shape 38 | print("times: ", times, ", num_nodes: ", num_nodes) 39 | else: 40 | times, num_nodes, features_dim = rawdata.shape 41 | print("times: ", times, ", num_nodes: ", num_nodes, ", features_dim: ", features_dim) 42 | 43 | P = args.window 44 | h = args.horizon 45 | 46 | train_end = int(times * args.train_rate) 47 | val_end = int(times * (args.train_rate + args.val_rate)) 48 | train_set = range(0, train_end) 49 | val_set = range(train_end, val_end) 50 | test_set = range(val_end, times) 51 | 52 | # train 53 | x_train, y_train = split_x_y(rawdata, train_set, P, h) 54 | # val 55 | x_val, y_val = split_x_y(rawdata, val_set, P, h) 56 | # test 57 | x_test, y_test = split_x_y(rawdata, test_set, P, h) 58 | 59 | # x: (num_samples, input_length, num_nodes, input_dim) 60 | # y: (num_samples, output_length, num_nodes, output_dim) 61 | # Write the data into npz file. 62 | for cat in ["train", "val", "test"]: 63 | _x, _y = locals()["x_" + cat], locals()["y_" + cat] 64 | print(cat, "x: ", _x.shape, "y:", _y.shape) 65 | np.savez_compressed(os.path.join(path_output, "%s.npz" % cat), x=_x, y=_y) 66 | 67 | def split_x_y(rawdata, idx_set, P, h): 68 | x, y = [], [] 69 | samples = len(idx_set) - P - h + 1 70 | for i in range(samples): 71 | start = idx_set[i] 72 | endx = start + P 73 | endy = endx + h 74 | x.append(rawdata[start:endx,...]) 75 | y.append(rawdata[endx:endy,...]) 76 | x = np.stack(x, axis=0) # (samples, P, num_nodes) 77 | y = np.stack(y, axis=0) # (samples, h, num_nodes) 78 | 79 | if len(x.shape)==3: 80 | return np.expand_dims(x, axis = -1), np.expand_dims(y, axis = -1) 81 | else: 82 | return x, y 83 | 84 | def generate_data_npz(args, path_input, path_output): 85 | if 'txt' in path_input or 'csv' in path_input: 86 | file = open(path_input) 87 | rawdata = np.loadtxt(file, delimiter=',') 88 | df = pd.DataFrame(rawdata) 89 | times, num_nodes = rawdata.shape # (times, num_nodes) 90 | print("times: ", times, ", num_nodes: ", num_nodes) 91 | elif 'npz' in path_input: 92 | file = np.load(path_input) 93 | if len(file['data'].shape) == 3: 94 | rawdata = np.squeeze(file['data'][:,:,0]) 95 | else: 96 | rawdata = np.squeeze(file['data']) 97 | print(rawdata.shape) 98 | df = pd.DataFrame(rawdata) 99 | print("df:{}".format(type(df))) 100 | x_offsets = np.arange(-11, 1, 1) # array([-11,-10,...,0]) 101 | y_offsets = np.arange(1, 13, 1) # array([1,2,...,12]) 102 | 103 | # x: (num_samples, input_length, num_nodes, input_dim) 104 | # y: (num_samples, output_length, num_nodes, output_dim) 105 | x, y = generate_npz_seq2seq_io_data( 106 | df, 107 | x_offsets=x_offsets, 108 | y_offsets=y_offsets, 109 | add_time_in_day=args.add_time_in_day, 110 | add_day_in_week=args.add_day_in_week, 111 | ) 112 | 113 | print("x shape: ", x.shape, ", y shape: ", y.shape) 114 | 115 | # Write the data into npz file. 116 | # train/val/test: 7/1/2 117 | num_samples = x.shape[0] 118 | num_train = round(num_samples * args.train_rate) 119 | num_val = round(num_samples * args.val_rate) 120 | num_test = num_samples - num_train - num_val 121 | 122 | # train 123 | x_train, y_train = x[:num_train], y[:num_train] 124 | # val 125 | x_val, y_val = x[num_train: num_train + num_val], y[num_train: num_train + num_val] 126 | # test 127 | x_test, y_test = x[-num_test:], y[-num_test:] 128 | 129 | for cat in ["train", "val", "test"]: 130 | _x, _y = locals()["x_" + cat], locals()["y_" + cat] 131 | print(cat, "x: ", _x.shape, "y:", _y.shape) 132 | np.savez_compressed( 133 | os.path.join(path_output, "%s.npz" % cat), 134 | x=_x, 135 | y=_y, 136 | x_offsets=x_offsets.reshape(list(x_offsets.shape) + [1]), 137 | y_offsets=y_offsets.reshape(list(y_offsets.shape) + [1]) 138 | ) 139 | 140 | 141 | def generate_npz_seq2seq_io_data(df, x_offsets, y_offsets, add_time_in_day=True, add_day_in_week=True, scaler=None): 142 | num_samples, num_nodes = df.shape # (times, num_nodes) 143 | data = np.expand_dims(df.values, axis=-1) 144 | data_list = [data] 145 | if add_time_in_day: # True 146 | # df.index.values = np.array(df.index.values) 147 | time_ind = np.array(df.index.values % 288 ) / np.array([288]) 148 | print("time_ind:{}".format(time_ind)) 149 | time_in_day = np.tile(time_ind, [1, num_nodes, 1]).transpose((2, 1, 0)) 150 | # (1,1,times)->copy->(1,num_nodes,times)->transpose->(times,num_nodes,1) 151 | data_list.append(time_in_day) 152 | if add_day_in_week: # False 153 | dayofweek = np.zeros(df.shape[0], dtype=int) 154 | week_time = 5*12*24*7 155 | day_time = 5*12*24 156 | summ = 0 157 | for i in range(df.shape[0]): 158 | summ += 5 159 | dayofweek[i] = int((summ%week_time)//day_time) 160 | day_in_week = np.zeros(shape=(num_samples, num_nodes, 7)) # (times,num_nodes,7) 161 | day_in_week[np.arange(num_samples), :, dayofweek] = 1 162 | data_list.append(day_in_week) 163 | 164 | data = np.concatenate(data_list, axis=-1) # (times,num_nodes,2) 165 | 166 | x, y = [], [] 167 | min_t = abs(min(x_offsets)) 168 | max_t = abs(num_samples - max(y_offsets)) 169 | for t in range(min_t, max_t): # times-11-12 = samples 170 | x_t = data[t + x_offsets, ...] # (12,num_nodes,2) 171 | y_t = data[t + y_offsets, ...] # (12,num_nodes,2) 172 | x.append(x_t) 173 | y.append(y_t) 174 | x = np.stack(x, axis=0) 175 | y = np.stack(y, axis=0) # x,y: (samples,12,num_nodes,2) 176 | return x, y 177 | 178 | def generate_data_h5(args, path_input, path_output): 179 | df = pd.read_hdf(path_input) 180 | x_offsets = np.arange(-11, 1, 1) # array([-11,-10,...,0]) 181 | y_offsets = np.arange(1, 13, 1) # array([1,2,...,12]) 182 | 183 | # x: (num_samples, input_length, num_nodes, input_dim) 184 | # y: (num_samples, output_length, num_nodes, output_dim) 185 | x, y = generate_graph_seq2seq_io_data( 186 | df, 187 | x_offsets=x_offsets, 188 | y_offsets=y_offsets, 189 | add_time_in_day=args.add_time_in_day, 190 | add_day_in_week=args.add_day_in_week, 191 | ) 192 | 193 | print("x shape: ", x.shape, ", y shape: ", y.shape) 194 | 195 | # Write the data into npz file. 196 | # train/val/test: 7/1/2 197 | num_samples = x.shape[0] 198 | num_train = round(num_samples * args.train_rate) 199 | num_val = round(num_samples * args.val_rate) 200 | num_test = num_samples - num_train - num_val 201 | 202 | # train 203 | x_train, y_train = x[:num_train], y[:num_train] 204 | # val 205 | x_val, y_val = x[num_train: num_train + num_val], y[num_train: num_train + num_val] 206 | # test 207 | x_test, y_test = x[-num_test:], y[-num_test:] 208 | 209 | for cat in ["train", "val", "test"]: 210 | _x, _y = locals()["x_" + cat], locals()["y_" + cat] 211 | print(cat, "x: ", _x.shape, "y:", _y.shape) 212 | np.savez_compressed( 213 | os.path.join(path_output, "%s.npz" % cat), 214 | x=_x, 215 | y=_y, 216 | x_offsets=x_offsets.reshape(list(x_offsets.shape) + [1]), 217 | y_offsets=y_offsets.reshape(list(y_offsets.shape) + [1]) 218 | ) 219 | 220 | def generate_graph_seq2seq_io_data(df, x_offsets, y_offsets, add_time_in_day=True, add_day_in_week=True, scaler=None): 221 | num_samples, num_nodes = df.shape # (times, num_nodes) 222 | data = np.expand_dims(df.values, axis=-1) 223 | data_list = [data] 224 | if add_time_in_day: # True 225 | time_ind = (df.index.values - df.index.values.astype("datetime64[D]")) / np.timedelta64(1, "D") 226 | time_in_day = np.tile(time_ind, [1, num_nodes, 1]).transpose((2, 1, 0)) 227 | # (1,1,times)->copy->(1,num_nodes,times)->transpose->(times,num_nodes,1) 228 | data_list.append(time_in_day) 229 | if add_day_in_week: # False 230 | day_in_week = np.zeros(shape=(num_samples, num_nodes, 7)) # (times,num_nodes,7) 231 | day_in_week[np.arange(num_samples), :, df.index.dayofweek] = 1 232 | data_list.append(day_in_week) 233 | 234 | data = np.concatenate(data_list, axis=-1) # (times,num_nodes,2) 235 | 236 | x, y = [], [] 237 | min_t = abs(min(x_offsets)) 238 | max_t = abs(num_samples - max(y_offsets)) 239 | for t in range(min_t, max_t): # times-11-12 = samples 240 | x_t = data[t + x_offsets, ...] # (12,num_nodes,2) 241 | y_t = data[t + y_offsets, ...] # (12,num_nodes,2) 242 | x.append(x_t) 243 | y.append(y_t) 244 | x = np.stack(x, axis=0) 245 | y = np.stack(y, axis=0) # x,y: (samples,12,num_nodes,2) 246 | return x, y 247 | 248 | def main(args): 249 | print("Generating training data:") 250 | if args.dataset == "Solar_AL": 251 | print("Solar_AL:") 252 | generate_data(args, "./data/solar_AL/solar_AL.txt", "./data/solar_AL/") 253 | if args.dataset == "PEMS04": 254 | print("PEMS04:") 255 | generate_data_npz(args, "./data/PEMS04/pems04.npz", "./data/PEMS04/") 256 | if args.dataset == "PEMS08": 257 | print("PEMS08:") 258 | generate_data_npz(args, "./data/PEMS08/pems08.npz", "./data/PEMS08/") 259 | if args.dataset == "PEMS03": 260 | print("PEMS03") 261 | generate_data(args, "./data/PEMS03/PEMS03.npz", "./data/PEMS03/") 262 | if args.dataset == "PEMS07": 263 | print("PEMS07") 264 | generate_data(args, "./data/PEMS07/PEMS07.npz", "./data/PEMS07/") 265 | if args.dataset == "stock": 266 | print("stock") 267 | generate_data(args, "./data/stock/stock.npz","./data/stock") 268 | print("Finish!") 269 | 270 | if __name__ == "__main__": 271 | parser = argparse.ArgumentParser() 272 | parser.add_argument("--window", type=int, default=12) 273 | parser.add_argument("--horizon", type=int, default=12) 274 | parser.add_argument("--train_rate", type=float, default=0.7) 275 | parser.add_argument("--val_rate", type=float, default=0.1) 276 | parser.add_argument("--add_time_in_day", type=bool, default=True) 277 | parser.add_argument("--add_day_in_week", type=bool, default=True) 278 | parser.add_argument("--dataset",type=str, default="Solar_AL") 279 | args = parser.parse_args() 280 | main(args) -------------------------------------------------------------------------------- /model/LTFE.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.nn import functional as F 4 | from model.cell import SmoothSparseUnit 5 | import numpy as np 6 | import math 7 | import pywt 8 | from .ST_Norm import * 9 | import math 10 | minm = -99999999 11 | 12 | def WT_his(device, level, Stats): 13 | array = np.array([ 14 | [1.0, 0.0, 0.0, 0.0, 0.0], 15 | [0.0, 1.0, 0.0, 0.0, 0.0], 16 | [0.0, 0.0, 0.5, 0.0, 0.0], 17 | [0.0, 0.0, 0.0, 0.05, 0.0], 18 | [0.0, 0.0, 0.0, 0.0, 0.05] 19 | ]) 20 | Stats = Stats.detach().cpu().numpy() 21 | if len(Stats.shape) == 2: 22 | Stats = np.expand_dims(Stats, axis=2) 23 | time_steps, num_nodes, feas_dim = Stats.shape 24 | result = [] 25 | for feas in range(feas_dim): 26 | Data = Stats[:, :, feas] 27 | info = [] 28 | for i in range(num_nodes): 29 | Wavelet_res = [] 30 | SingleSampleDataWavelet = Data[:, i] 31 | coeffs = pywt.wavedec(SingleSampleDataWavelet, 'db{}'.format(level), level=level) 32 | for index in range(level + 1): 33 | factors = [] 34 | for j in range(len(coeffs)): 35 | factors.append(coeffs[j] * array[index][j]) 36 | y = pywt.waverec(factors, 'db{}'.format(level)) 37 | Wavelet_res.append(torch.Tensor(y)) 38 | res = torch.stack(Wavelet_res, dim=1).to(device) 39 | info.append(res) 40 | result.append(torch.stack(info, dim=1)) 41 | results = torch.stack(result, dim=3) 42 | return results 43 | 44 | class ShortAdj_generator(nn.Module): 45 | def __init__(self, args, time_series, reduction_ratio=16, alpha=0.2, dropout_rate=0.5): 46 | super(ShortAdj_generator, self).__init__() 47 | self.freq = args.freq 48 | self.kernel_size = args.kernel_size 49 | self.num_nodes = args.num_nodes 50 | self.embedding = args.embedding_size 51 | self.time_series = time_series 52 | self.seq_len = args.seq_len 53 | self.feas_dim = args.graph_feas_dim 54 | self.input_dim = args.graph_input_dim 55 | self.segm = int(self.time_series.shape[0] // self.freq) 56 | self.graphs = args.requires_graph 57 | self.device = args.device 58 | self.level = args.level 59 | self.expand_dim = self.level + 1 60 | self.delta_series = torch.zeros_like(self.time_series).to(self.device) 61 | self.conv1d = nn.Conv1d(in_channels=self.expand_dim * self.segm * self.feas_dim, out_channels=self.graphs, kernel_size=self.kernel_size, padding=0) 62 | self.fc_1 = nn.Linear(self.freq - self.kernel_size + 1, self.embedding) 63 | self.fc_2 = nn.Linear(self.embedding, self.embedding // reduction_ratio) 64 | self.fc_3 = nn.Linear(self.embedding // reduction_ratio, self.num_nodes) 65 | self.snorm = SNorm(self.freq) 66 | self.tnorm = TNorm(self.num_nodes, self.freq) 67 | self.pre_process() 68 | 69 | def pre_process(self): 70 | for i in range(self.time_series.shape[0]): 71 | if i == 0: 72 | self.delta_series[i] = self.time_series[i] 73 | else: 74 | self.delta_series[i] = self.time_series[i]-self.time_series[i-1] 75 | self.wave_list = [] 76 | for i in range(self.segm): 77 | time_seg = self.delta_series[i * self.freq + 1: (i + 1) * self.freq + 1] # [self.freq, self.num_nodes, self.input_dim] 78 | feas = WT_his(self.device, self.level, time_seg) 79 | self.wave_list.append(feas) 80 | self.His_data = torch.stack(self.wave_list, dim=0) # [segm, freq, num_nodes, expand_dim, feas_dim] 81 | 82 | 83 | def forward(self, node_feas): # input: (seq_len, batch_size, num_sensor * input_dim) 84 | t = self.His_data.reshape(self.segm, self.freq, self.num_nodes, -1) 85 | t = self.snorm(t) 86 | t = self.tnorm(t) 87 | self.times = t.permute(2, 0, 3, 1).reshape(self.num_nodes, -1, self.freq) 88 | mid_input = self.conv1d(self.times).permute(1, 0, 2) # (graphs, num_nodes, freq-kernel_size+1) 89 | mid_output = torch.stack([F.relu(self.fc_1(mid_input[i,...])) for i in range(self.graphs)], dim=0) 90 | mid_output = torch.sigmoid(self.fc_2(mid_output)) 91 | output = SmoothSparseUnit(self.fc_3(mid_output), 1, 0.02) 92 | return output 93 | 94 | class LongAdj_generator(nn.Module): 95 | def __init__(self, args, time_series, reduction_ratio=16, alpha=0.2, dropout_rate=0.5): 96 | super(LongAdj_generator, self).__init__() 97 | self.freq = args.freq 98 | self.kernel_size = args.kernel_size 99 | self.num_nodes = args.num_nodes 100 | self.embedding = args.embedding_size 101 | self.time_series = time_series 102 | self.seq_len = args.seq_len 103 | self.feas_dim = args.graph_feas_dim 104 | self.input_dim = args.graph_input_dim 105 | self.segm = int(self.time_series.shape[0] // self.freq) 106 | self.graphs = args.requires_graph 107 | self.device = args.device 108 | self.level = args.level 109 | self.expand_dim = self.level + 1 110 | self.delta_series = torch.zeros_like(self.time_series).to(self.device) 111 | self.conv1d = nn.Conv1d(in_channels=self.expand_dim * self.freq * self.feas_dim, out_channels=self.graphs, kernel_size=self.kernel_size, padding=0) 112 | self.fc_1 = nn.Linear(self.segm - self.kernel_size + 1, self.embedding) 113 | self.fc_2 = nn.Linear(self.embedding, self.embedding // reduction_ratio) 114 | self.fc_3 = nn.Linear(self.embedding // reduction_ratio, self.num_nodes) 115 | self.snorm = SNorm(self.segm) 116 | self.tnorm = TNorm(self.num_nodes, self.segm) 117 | self.pre_process() 118 | 119 | def pre_process(self): 120 | for i in range(self.time_series.shape[0]): 121 | if i == 0: 122 | self.delta_series[i] = self.time_series[i] 123 | else: 124 | self.delta_series[i] = self.time_series[i] - self.time_series[i - 1] 125 | self.wave_list = [] 126 | for i in range(self.segm): 127 | time_seg = self.delta_series[i * self.freq + 1: (i + 1) * self.freq + 1] # [self.freq, self.num_nodes, self.input_dim] 128 | feas = WT_his(self.device, self.level, time_seg) 129 | self.wave_list.append(feas) 130 | self.His_data = torch.stack(self.wave_list, dim=0) # [segm, freq, num_nodes, expand_dim, feas_dim] 131 | 132 | def forward(self, node_feas): # input: (seq_len, batch_size, num_sensor * input_dim) 133 | t = self.His_data.reshape(self.segm, self.freq, self.num_nodes, -1).permute(1, 0, 2, 3) 134 | t = self.snorm(t) 135 | t = self.tnorm(t) 136 | self.times = t.permute(2, 0, 3, 1).reshape(self.num_nodes, -1, self.segm) 137 | mid_input = self.conv1d(self.times).permute(1, 0, 2) # (graphs, num_nodes, freq-kernel_size+1) 138 | mid_output = torch.stack([F.relu(self.fc_1(mid_input[i,...])) for i in range(self.graphs)], dim=0) 139 | mid_output = torch.sigmoid(self.fc_2(mid_output)) 140 | output = SmoothSparseUnit(self.fc_3(mid_output), 1, 0.02) 141 | return output 142 | 143 | 144 | 145 | -------------------------------------------------------------------------------- /model/STFE.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.nn import functional as F 4 | from model.cell import SmoothSparseUnit 5 | import numpy as np 6 | import torch.fft 7 | import math 8 | import pywt 9 | from .ST_Norm import * 10 | 11 | class nconv(nn.Module): 12 | def __init__(self): 13 | super(nconv,self).__init__() 14 | 15 | def forward(self,x, A): 16 | x = torch.einsum('ncvl,vw->ncwl',(x,A)) 17 | return x.contiguous() 18 | 19 | class linear(nn.Module): 20 | def __init__(self,c_in,c_out): 21 | super(linear,self).__init__() 22 | self.mlp = torch.nn.Conv2d(c_in, c_out, kernel_size=(1, 1), padding=(0,0), stride=(1,1), bias=True) 23 | 24 | def forward(self,x): 25 | return self.mlp(x) 26 | 27 | class FAGCN(nn.Module): 28 | def __init__(self,args,c_in,c_out,dropout=0.0,order=2, eps=0.3): 29 | super(FAGCN,self).__init__() 30 | self.nconv = nconv() 31 | c_in = (order*2)*c_in 32 | self.mlp = linear(c_in,c_out) 33 | self.dropout = dropout 34 | self.order = order 35 | self.eps = eps 36 | self.eye = torch.eye(args.num_nodes).to(args.device) 37 | 38 | def forward(self,x,adj): 39 | out = [] 40 | L = self.eps * self.eye + adj 41 | H = self.eps * self.eye - adj 42 | support = [L,H] 43 | for a in support: 44 | x1 = self.nconv(x,a) 45 | out.append(x1) 46 | for k in range(2, self.order + 1): 47 | x2 = self.nconv(x1,a) 48 | out.append(x2) 49 | x1 = x2 50 | 51 | h = torch.cat(out,dim=1) 52 | h = self.mlp(h) 53 | h = F.dropout(h, self.dropout, training=self.training) 54 | return h 55 | 56 | class SpectralConv1d(nn.Module): 57 | def __init__(self, args): 58 | super(SpectralConv1d, self).__init__() 59 | self.num_nodes = args.num_nodes 60 | self.seq_len = args.seq_len 61 | self.in_channels = args.input_dim * self.seq_len 62 | self.out_channels = args.dgraphs 63 | self.batch_size = args.batch_size 64 | self.Lin = nn.Linear(self.in_channels, self.out_channels) 65 | 66 | def forward(self, x): 67 | # [num_nodes, seq_len, input_dim*batch_size] 68 | # [B, F, V, T] 69 | B, F, V, T = x.shape 70 | x = x.permute(0, 2, 3, 1).reshape(B, V, -1) 71 | ffted = torch.view_as_real(torch.fft.fft(x, dim=-1)) 72 | # ffted.shape: (batch, seq_len, input_dim*batch_size) 73 | # print('ffted.shape:{}'.format(ffted.shape)) 74 | real = self.Lin(ffted[..., 0].contiguous()) 75 | img = self.Lin(ffted[..., 1].contiguous()) 76 | time_step_as_inner = torch.cat([real.unsqueeze(-1), img.unsqueeze(-1)], dim=-1) 77 | iffted = torch.fft.irfft(torch.view_as_complex(time_step_as_inner), n=time_step_as_inner.shape[1], dim=1) 78 | # print('iffted.shape:{}'.format(iffted.shape)) 79 | Am = torch.sqrt(real*real+img*img) 80 | S = torch.atan(real/(img+0.0001)) 81 | return iffted,Am,S 82 | 83 | class Instant_graph(nn.Module): 84 | def __init__(self, args): 85 | super(Instant_graph, self).__init__() 86 | self.num_nodes = args.num_nodes 87 | self.seq_len = args.seq_len 88 | self.output_channel = args.input_dim 89 | self.time_step = self.seq_len 90 | self.dgraphs = args.dgraphs 91 | self.batch_size = args.batch_size 92 | self.input_dim = args.input_dim 93 | self.device = args.device 94 | self.requires_graph = args.requires_graph 95 | self.kernel_size = args.kernel_size 96 | self.spectconv = SpectralConv1d(args) 97 | self.fc1 = nn.Linear(self.dgraphs, self.num_nodes) 98 | self.fc2 = nn.Linear(self.dgraphs, self.num_nodes) 99 | self.fc3 = nn.Linear(self.dgraphs, self.num_nodes) 100 | 101 | 102 | def forward(self, x): 103 | # x.shape: [num_nodes, batch_size*input_dim, seq_len] 104 | # x.shape: [B, F, V, T] 105 | out, Am, S = self.spectconv(x) 106 | mid_input = torch.sigmoid((self.fc1(out)+self.fc2(Am)+self.fc3(S))/3.0) 107 | graph = torch.sigmoid((torch.sum(mid_input, dim=0) / mid_input.shape[0])) 108 | return graph 109 | 110 | 111 | class Instant_forecasting(nn.Module): 112 | def __init__(self, args, channels=16, kernel_size=2): 113 | super(Instant_forecasting, self).__init__() 114 | self.dropout = args.dropout 115 | self.blocks = args.blocks 116 | self.layers = args.layers 117 | self.filter_convs = nn.ModuleList() 118 | self.gate_convs = nn.ModuleList() 119 | self.residual_convs = nn.ModuleList() 120 | self.skip_convs = nn.ModuleList() 121 | self.num_nodes = args.num_nodes 122 | self.seq_len = args.seq_len 123 | self.batch_size = args.batch_size 124 | self.input_dim = args.input_dim 125 | self.out_dim = args.horizon 126 | self.horizon = args.horizon 127 | self.gconv = nn.ModuleList() 128 | self.bn = nn.ModuleList() 129 | self.device = args.device 130 | self.start_conv = nn.Conv2d(in_channels=self.input_dim, 131 | out_channels=channels, 132 | kernel_size=(1, 1)) 133 | self.graphs = args.requires_graph 134 | self.instant_graph = Instant_graph(args) 135 | receptive_field = 1 136 | 137 | self.supports_len = 0 138 | 139 | for b in range(self.blocks): 140 | additional_scope = kernel_size - 1 141 | new_dilation = 1 142 | for i in range(self.layers): 143 | # dilated convolutions 144 | self.filter_convs.append(nn.Conv2d(in_channels=channels, 145 | out_channels=channels, 146 | kernel_size=(1, kernel_size), dilation=new_dilation)) 147 | 148 | self.gate_convs.append(nn.Conv2d(in_channels=channels, 149 | out_channels=channels, 150 | kernel_size=(1, kernel_size), dilation=new_dilation)) 151 | 152 | 153 | # 1x1 convolution for skip connection 154 | self.skip_convs.append(nn.Conv2d(in_channels=channels, 155 | out_channels=channels, 156 | kernel_size=(1, 1))) 157 | new_dilation *= 2 158 | receptive_field += additional_scope 159 | additional_scope *= 2 160 | self.bn.append(nn.BatchNorm2d(channels)) 161 | self.gconv.append(FAGCN(args,channels, channels)) 162 | 163 | self.end_conv_1 = nn.Conv2d(in_channels=channels, 164 | out_channels=channels, 165 | kernel_size=(1, 1), 166 | bias=True) 167 | 168 | self.end_conv_2 = nn.Conv2d(in_channels=channels, 169 | out_channels=self.out_dim, 170 | kernel_size=(1, 1), 171 | bias=True) 172 | 173 | self.receptive_field = receptive_field 174 | 175 | def forward(self, input): 176 | # the required input shape: [batch_size, self.input_dim, self.num_nodes, self.seq_len] 177 | input = input.reshape(self.seq_len, input.shape[1], self.num_nodes, -1) 178 | input = input.permute(1, 3, 2, 0) 179 | adj = self.instant_graph(input) 180 | 181 | in_len = input.size(3) 182 | if in_len < self.receptive_field: 183 | x = nn.functional.pad(input, (self.receptive_field - in_len, 0, 0, 0)) 184 | else: 185 | x = input 186 | x = self.start_conv(x) 187 | skip = 0 188 | 189 | # WaveNet layers 190 | for i in range(self.blocks * self.layers): 191 | 192 | residual = x 193 | # dilated convolution 194 | # x.shape:[batch_size, residual_channels, num_nodes, seq_len] 195 | filter = self.filter_convs[i](x) 196 | filter = torch.tanh(filter) 197 | gate = self.gate_convs[i](x) 198 | gate = torch.sigmoid(gate) 199 | x = filter * gate 200 | 201 | s = x 202 | s = self.skip_convs[i](s) 203 | try: 204 | skip = skip[:, :, :, -s.size(3):] 205 | except: 206 | skip = 0 207 | skip = s + skip 208 | x = self.gconv[i](x, adj) 209 | x = x + residual[:, :, :, -x.size(3):] 210 | 211 | x = self.bn[i](x) 212 | 213 | x = F.relu(skip) 214 | x = F.relu(self.end_conv_1(x)) 215 | x = self.end_conv_2(x) 216 | # x.shape: [batch_size, output_dim, num_nodes, 1] 217 | return x, adj 218 | -------------------------------------------------------------------------------- /model/ST_Norm.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | from torch.autograd import Variable 5 | from torch.nn import Parameter 6 | 7 | class SNorm(nn.Module): 8 | def __init__(self, channels): 9 | super(SNorm, self).__init__() 10 | self.beta = nn.Parameter(torch.zeros(channels)) 11 | self.gamma = nn.Parameter(torch.ones(channels)) 12 | 13 | def forward(self, x): 14 | # x: [batch_size, channels, num_nodes, seq_len] 15 | x_norm = (x - x.mean(2, keepdims=True)) / (x.var(2, keepdims=True, unbiased=True) + 0.00001) ** 0.5 16 | 17 | out = x_norm * self.gamma.view(1, -1, 1, 1) + self.beta.view(1, -1, 1, 1) 18 | return out 19 | 20 | 21 | class TNorm(nn.Module): 22 | def __init__(self, num_nodes, channels, track_running_stats=True, momentum=0.1): 23 | super(TNorm, self).__init__() 24 | # 注意TNorm这里要求channels在第2维, 节点数目在第三维 25 | self.track_running_stats = track_running_stats 26 | self.beta = nn.Parameter(torch.zeros(1, channels, num_nodes, 1)) 27 | self.gamma = nn.Parameter(torch.ones(1, channels, num_nodes, 1)) 28 | self.register_buffer('running_mean', torch.zeros(1, channels, num_nodes, 1)) 29 | self.register_buffer('running_var', torch.ones(1, channels, num_nodes, 1)) 30 | self.momentum = momentum 31 | 32 | def forward(self, x): 33 | if self.track_running_stats: 34 | mean = x.mean((0, 3), keepdims=True) 35 | var = x.var((0, 3), keepdims=True, unbiased=False) 36 | if self.training: 37 | n = x.shape[3] * x.shape[0] 38 | with torch.no_grad(): 39 | self.running_mean = self.momentum * mean + (1 - self.momentum) * self.running_mean 40 | self.running_var = self.momentum * var * n / (n - 1) + (1 - self.momentum) * self.running_var 41 | else: 42 | mean = self.running_mean 43 | var = self.running_var 44 | else: 45 | mean = x.mean((3), keepdims=True) 46 | var = x.var((3), keepdims=True, unbiased=True) 47 | x_norm = (x - mean) / (var + 0.00001) ** 0.5 48 | out = x_norm * self.gamma + self.beta 49 | return out -------------------------------------------------------------------------------- /model/cell.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | from torch.autograd import Function 4 | import torch.nn as nn 5 | import utils 6 | 7 | class SSU(Function): # Smooth Sparse Units "SSU":"ssu" 8 | 9 | @staticmethod 10 | def forward(ctx, input, alpha, epsilon): # ctx:"context" input:x_tensor 11 | 12 | # t = f(1-x)/f(x) x>0 13 | t = torch.where((input > 0.) & (input < 1.), torch.exp(1. / input - 1. / (1 - input)), torch.zeros_like(input)) 14 | # tx = 1/pow(x,2) + 1/pow(1-x,2) 15 | tx = torch.where(t > 0., 1. / pow(input, 2) + 1. / pow(1 - input, 2), torch.zeros_like(input)) 16 | 17 | output = torch.where(input <= 0., torch.zeros_like(input), input) 18 | output = torch.where(input >= 1., torch.ones_like(input), output) 19 | output = torch.where(t > 0., alpha / (alpha + t), output) 20 | 21 | ctx.save_for_backward(t, tx, output, alpha, epsilon) 22 | 23 | return output 24 | 25 | @staticmethod 26 | def backward(ctx, grad_output): 27 | # dloss / dx = (dloss / doutput) * (doutput / dx) 28 | # grad_output: dloss / doutput 29 | 30 | t, tx, output, alpha, epsilon = ctx.saved_tensors 31 | 32 | grad_input = alpha * pow(output, 2) * t * tx * grad_output.clone() 33 | 34 | sup = alpha * epsilon / (1 - epsilon) 35 | inf = alpha * (1 - epsilon) / epsilon 36 | grad_input[t > inf] = grad_output[t > inf] 37 | grad_input[(t < sup) & (t > 0)] = grad_output[(t < sup) & (t > 0)]*2 38 | 39 | return grad_input, None, None 40 | 41 | 42 | def SmoothSparseUnit(x, alpha, epsilon=0.05): 43 | alpha = torch.tensor(alpha) 44 | epsilon = torch.tensor(epsilon) 45 | return SSU.apply(x, alpha, epsilon) 46 | 47 | 48 | class LayerParams: 49 | def __init__(self, rnn_network: torch.nn.Module, layer_type: str, device): 50 | self._rnn_network = rnn_network 51 | self._params_dict = {} 52 | self._biases_dict = {} 53 | self._type = layer_type 54 | self.device = device 55 | 56 | def get_weights(self, shape): 57 | if shape not in self._params_dict: 58 | nn_param = torch.nn.Parameter(torch.empty(*shape, device=self.device)) 59 | torch.nn.init.xavier_normal_(nn_param) 60 | self._params_dict[shape] = nn_param 61 | self._rnn_network.register_parameter('{}_weight_{}'.format(self._type, str(shape)), 62 | nn_param) 63 | return self._params_dict[shape] 64 | 65 | def get_biases(self, length, bias_start=0.0): 66 | if length not in self._biases_dict: 67 | biases = torch.nn.Parameter(torch.empty(length, device=self.device)) 68 | torch.nn.init.constant_(biases, bias_start) 69 | self._biases_dict[length] = biases 70 | self._rnn_network.register_parameter('{}_biases_{}'.format(self._type, str(length)), 71 | biases) 72 | 73 | return self._biases_dict[length] 74 | 75 | 76 | class FAGRUCell(torch.nn.Module): 77 | def __init__(self, num_units, max_diffusion_step, num_nodes, nonlinearity='tanh', 78 | filter_type="laplacian", use_gc_for_ru=True, device='cuda'): 79 | """ 80 | :param num_units: 81 | :param adj_mx: 82 | :param max_diffusion_step: 83 | :param num_nodes: 84 | :param nonlinearity: 85 | :param filter_type: "laplacian", "random_walk", "dual_random_walk". 86 | :param use_gc_for_ru: whether to use Graph convolution to calculate the reset and update gates. 87 | """ 88 | 89 | super().__init__() 90 | self._activation = torch.tanh if nonlinearity == 'tanh' else torch.relu 91 | # support other nonlinearities up here? 92 | self._num_nodes = num_nodes 93 | self._num_units = num_units 94 | self._max_diffusion_step = max_diffusion_step 95 | self._supports = [] 96 | self._use_gc_for_ru = use_gc_for_ru 97 | self.device = device 98 | self.ones = torch.eye(self._num_nodes).to(self.device) 99 | self._fc_params = LayerParams(self, 'fc', self.device) 100 | self._gconv_params = LayerParams(self, 'gconv', self.device) 101 | self._fagcn_beta = torch.nn.Parameter(torch.tensor([0.80]), requires_grad=True).to(self.device) 102 | self._eps = 0.3 103 | 104 | @staticmethod 105 | def _build_sparse_matrix(L): 106 | L = L.tocoo() 107 | indices = np.column_stack((L.row, L.col)) 108 | # this is to ensure row-major ordering to equal torch.sparse.sparse_reorder(L) 109 | indices = indices[np.lexsort((indices[:, 0], indices[:, 1]))] 110 | L = torch.sparse_coo_tensor(indices.T, L.data, L.shape, device=self.device) 111 | return L 112 | 113 | def forward(self, inputs, hx, adj): 114 | """Gated recurrent unit (GRU) with Graph Convolution. 115 | :param inputs: (B, num_nodes * input_dim) 116 | :param hx: (B, num_nodes * rnn_units) 117 | :return 118 | - Output: A `2-D` tensor with shape `(B, num_nodes * rnn_units)`. 119 | """ 120 | adj_mx = self._calculate_frequency_adaption_matrix(adj).t() 121 | # adj_mx = adj 122 | output_size = 2 * self._num_units 123 | if self._use_gc_for_ru: 124 | fn = self._gconv 125 | else: 126 | fn = self._fc 127 | value = torch.sigmoid(fn(inputs, adj_mx, hx, output_size, bias_start=1.0)) 128 | # print("value_1.shape:{}".format(value.shape)) 129 | value = torch.reshape(value, (-1, self._num_nodes, output_size)) 130 | # print("value_2.shape:{}".format(value.shape)) 131 | r, u = torch.split(tensor=value, split_size_or_sections=self._num_units, dim=-1) 132 | # print("r.shape:{} u.shape:{}".format(r.shape, u.shape)) 133 | r = torch.reshape(r, (-1, self._num_nodes * self._num_units)) 134 | u = torch.reshape(u, (-1, self._num_nodes * self._num_units)) 135 | 136 | c = self._gconv(inputs, adj_mx, r * hx, self._num_units) 137 | if self._activation is not None: 138 | c = self._activation(c) 139 | 140 | new_state = u * hx + (1.0 - u) * c 141 | return new_state 142 | 143 | @staticmethod 144 | def _concat(x, x_): 145 | x_ = x_.unsqueeze(0) 146 | return torch.cat([x, x_], dim=0) 147 | 148 | def _calculate_frequency_adaption_matrix(self, adj_mx): 149 | adj_mx = adj_mx + torch.eye(int(adj_mx.shape[0])).to(self.device) 150 | d = torch.sum(adj_mx, 1) 151 | d_inv = 1. / d 152 | d_inv = torch.where(torch.isinf(d_inv), torch.zeros(d_inv.shape).to(self.device), d_inv) 153 | d_mat_inv = torch.sqrt(torch.diag(d_inv)) 154 | random_walk_mx_lf = self._eps * torch.eye(int(adj_mx.shape[0])).to(self.device) + torch.mm(torch.mm(d_mat_inv, adj_mx), d_mat_inv) 155 | random_walk_mx_hf = self._eps * torch.eye(int(adj_mx.shape[0])).to(self.device) - torch.mm(torch.mm(d_mat_inv, adj_mx), d_mat_inv) 156 | random_walk_mx = self._fagcn_beta * random_walk_mx_lf + (1-self._fagcn_beta) * random_walk_mx_hf 157 | return random_walk_mx 158 | 159 | 160 | def _fc(self, inputs, state, output_size, bias_start=0.0): 161 | batch_size = inputs.shape[0] 162 | inputs = torch.reshape(inputs, (batch_size * self._num_nodes, -1)) 163 | state = torch.reshape(state, (batch_size * self._num_nodes, -1)) 164 | inputs_and_state = torch.cat([inputs, state], dim=-1) 165 | input_size = inputs_and_state.shape[-1] 166 | weights = self._fc_params.get_weights((input_size, output_size)) 167 | value = torch.sigmoid(torch.matmul(inputs_and_state, weights)) 168 | biases = self._fc_params.get_biases(output_size, bias_start) 169 | value += biases 170 | return value 171 | 172 | def _gconv(self, inputs, adj_mx, state, output_size, bias_start=0.0): 173 | # Reshape input and state to (batch_size, num_nodes, input_dim/state_dim) 174 | batch_size = inputs.shape[0] 175 | inputs = torch.reshape(inputs, (batch_size, self._num_nodes, -1)) 176 | state = torch.reshape(state, (batch_size, self._num_nodes, -1)) 177 | inputs_and_state = torch.cat([inputs, state], dim=2) 178 | input_size = inputs_and_state.size(2) 179 | 180 | x = inputs_and_state 181 | x0 = x.permute(1, 2, 0) # (num_nodes, total_arg_size, batch_size) 182 | x0 = torch.reshape(x0, shape=[self._num_nodes, input_size * batch_size]) 183 | x = torch.unsqueeze(x0, 0) 184 | 185 | if self._max_diffusion_step == 0: 186 | pass 187 | else: 188 | x1 = torch.mm(adj_mx, x0) 189 | x = self._concat(x, x1) 190 | 191 | for k in range(2, self._max_diffusion_step + 1): 192 | x2 = 2 * torch.mm(adj_mx, x1) - x0 193 | x = self._concat(x, x2) 194 | x1, x0 = x2, x1 195 | 196 | num_matrices = self._max_diffusion_step + 1 # Adds for x itself. 197 | x = torch.reshape(x, shape=[num_matrices, self._num_nodes, input_size, batch_size]) 198 | x = x.permute(3, 1, 2, 0) # (batch_size, num_nodes, input_size, order) 199 | x = torch.reshape(x, shape=[batch_size * self._num_nodes, input_size * num_matrices]) 200 | 201 | weights = self._gconv_params.get_weights((input_size * num_matrices, output_size)) 202 | x = torch.matmul(x, weights) # (batch_size * self._num_nodes, output_size) 203 | 204 | biases = self._gconv_params.get_biases(output_size, bias_start) 205 | x += biases 206 | # Reshape res back to 2D: (batch_size, num_node, state_dim) -> (batch_size, num_node * state_dim) 207 | return torch.reshape(x, [batch_size, self._num_nodes * output_size]) -------------------------------------------------------------------------------- /model/loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | 4 | def masked_mae_loss(y_pred, y_true): 5 | mask = (y_true != 0).float() 6 | mask /= mask.mean() 7 | loss = torch.abs(y_pred - y_true) 8 | loss = loss * mask 9 | loss[loss != loss] = 0 # delete Nan 10 | return loss.mean() 11 | 12 | 13 | def masked_mape_loss(y_pred, y_true, dataset): 14 | if dataset == 'METR_LA' or dataset == 'PEMS-BAY': 15 | "Follow the mape loss in DCRNN" 16 | mask = (y_true != 0).float() 17 | mask /= mask.mean() 18 | loss = torch.abs(torch.div(y_true - y_pred, y_true)) 19 | loss = loss * mask 20 | loss[loss != loss] = 0 21 | return loss.mean() 22 | else: 23 | "Follow the metrics in other papers for fair comparison" 24 | mask = torch.gt(y_true, 0) 25 | pred = torch.masked_select(y_pred, mask) 26 | true = torch.masked_select(y_true, mask) 27 | return torch.mean(torch.abs(torch.div((true - pred), true))) 28 | 29 | 30 | def masked_rmse_loss(y_pred, y_true): 31 | mask = (y_true != 0).float() 32 | mask /= mask.mean() 33 | loss = torch.pow(y_true - y_pred, 2) 34 | loss = loss * mask 35 | loss[loss != loss] = 0 36 | return torch.sqrt(loss.mean()) 37 | 38 | def masked_mse_loss(y_pred, y_true): 39 | mask = (y_true != 0).float() 40 | mask /= mask.mean() 41 | loss = torch.pow(y_true - y_pred, 2) 42 | loss = loss * mask 43 | loss[loss != loss] = 0 44 | return loss.mean() -------------------------------------------------------------------------------- /model/model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.nn import functional as F 4 | from model.cell import FAGRUCell, SmoothSparseUnit 5 | import numpy as np 6 | import math 7 | import pywt 8 | from .STFE import * 9 | from .LTFE import * 10 | 11 | class Seq2SeqAttrs: 12 | def __init__(self, args): 13 | #self.adj_mx = adj_mx 14 | self.max_diffusion_step = args.max_diffusion_step 15 | self.cl_decay_steps = args.cl_decay_steps 16 | self.filter_type = args.filter_type 17 | self.num_nodes = args.num_nodes 18 | self.num_rnn_layers = args.num_rnn_layers 19 | self.rnn_units = args.rnn_units 20 | self.output_dim = args.output_dim 21 | self.hidden_state_size = self.num_nodes * self.rnn_units 22 | 23 | 24 | class EncoderModel(nn.Module, Seq2SeqAttrs): 25 | def __init__(self, args): 26 | nn.Module.__init__(self) 27 | Seq2SeqAttrs.__init__(self, args) 28 | self.input_dim = args.input_dim 29 | self.output_dim = args.output_dim 30 | self.seq_len = args.seq_len # for the encoder 31 | self.device = args.device 32 | self.fagru_layers = nn.ModuleList( 33 | [FAGRUCell(self.rnn_units, self.max_diffusion_step, self.num_nodes, 34 | filter_type=self.filter_type, device=self.device) for _ in range(self.num_rnn_layers)]) 35 | 36 | 37 | def forward(self, inputs, adj, hidden_state=None): 38 | """ 39 | Encoder forward pass. 40 | :param inputs: shape (batch_size, self.num_nodes * self.input_dim) 41 | :param hidden_state: (num_layers, batch_size, self.hidden_state_size) 42 | optional, zeros if not provided 43 | :return: output: # shape (batch_size, self.hidden_state_size) 44 | hidden_state # shape (num_layers, batch_size, self.hidden_state_size) 45 | (lower indices mean lower layers) 46 | """ 47 | batch_size, _ = inputs.size() 48 | if hidden_state is None: 49 | hidden_state = torch.zeros((self.num_rnn_layers, batch_size, self.hidden_state_size), 50 | device=self.device) 51 | hidden_states = [] 52 | output = inputs 53 | for layer_num, fagru_layer in enumerate(self.fagru_layers): 54 | next_hidden_state = fagru_layer(output, hidden_state[layer_num], adj) 55 | hidden_states.append(next_hidden_state) 56 | output = next_hidden_state 57 | 58 | return output, torch.stack(hidden_states) # runs in O(num_layers) so not too slow 59 | 60 | 61 | class DecoderModel(nn.Module, Seq2SeqAttrs): 62 | def __init__(self, args): 63 | # super().__init__(is_training, adj_mx, **model_kwargs) 64 | nn.Module.__init__(self) 65 | Seq2SeqAttrs.__init__(self, args) 66 | self.output_dim = args.output_dim 67 | self.horizon = args.horizon # for the decoder 68 | self.projection_layer = nn.Linear(self.rnn_units, self.output_dim) 69 | self.device = args.device 70 | self.fagru_layers = nn.ModuleList( 71 | [FAGRUCell(self.rnn_units, self.max_diffusion_step, self.num_nodes, 72 | filter_type=self.filter_type, device=self.device) for _ in range(self.num_rnn_layers)]) 73 | 74 | def forward(self, inputs, adj, hidden_state=None): 75 | """ 76 | :param inputs: shape (batch_size, self.num_nodes * self.output_dim) 77 | :param hidden_state: (num_layers, batch_size, self.hidden_state_size) 78 | optional, zeros if not provided 79 | :return: output: # shape (batch_size, self.num_nodes * self.output_dim) 80 | hidden_state # shape (num_layers, batch_size, self.hidden_state_size) 81 | (lower indices mean lower layers) 82 | """ 83 | hidden_states = [] 84 | output = inputs 85 | for layer_num, fagru_layer in enumerate(self.fagru_layers): 86 | next_hidden_state = fagru_layer(output, hidden_state[layer_num], adj) 87 | hidden_states.append(next_hidden_state) 88 | output = next_hidden_state 89 | 90 | projected = self.projection_layer(output.view(-1, self.rnn_units)) 91 | output = projected.view(-1, self.num_nodes * self.output_dim) 92 | 93 | return output, torch.stack(hidden_states) 94 | 95 | 96 | class FCDNetModel(nn.Module, Seq2SeqAttrs): 97 | def __init__(self, node_feas, logger, args): 98 | super().__init__() 99 | Seq2SeqAttrs.__init__(self, args) 100 | self.args = args 101 | self.encoder_model = EncoderModel(args) 102 | self.decoder_model = DecoderModel(args) 103 | self.cl_decay_steps = args.cl_decay_steps 104 | self.use_curriculum_learning = args.use_curriculum_learning 105 | self._logger = logger 106 | self.embedding_size = args.embedding_size 107 | self.seq_len = args.seq_len 108 | self.feas_dim = args.feas_dim 109 | self.input_dim = args.input_dim 110 | self.kernel_size = args.kernel_size 111 | self.batch_size= args.batch_size 112 | self.freq = args.freq 113 | self.requires_graph = args.requires_graph 114 | self.level = args.level 115 | self.num_nodes = args.num_nodes 116 | self.device = args.device 117 | self.graphs = self.requires_graph 118 | self.instant_forecasting = Instant_forecasting(args) 119 | self.output_dim = args.output_dim 120 | self.dataset = args.dataset 121 | self.epis = [] 122 | for index in range(self.requires_graph): 123 | self.epis.append(nn.Parameter(torch.tensor([0.30], device=self.device), requires_grad=True)) 124 | self.ShortAdj_generator = ShortAdj_generator(args, node_feas) 125 | self.LongAdj_generator = LongAdj_generator(args, node_feas) 126 | self.lambdax = torch.nn.Parameter(torch.tensor([0.30], device=self.device), requires_grad=True) 127 | self.beta = torch.nn.Parameter(torch.tensor([0.10]), requires_grad=True).to(self.device) 128 | 129 | def _compute_sampling_threshold(self, batches_seen): 130 | return self.cl_decay_steps / ( 131 | self.cl_decay_steps + np.exp(batches_seen / self.cl_decay_steps)) 132 | 133 | def encoder(self, inputs, adj): 134 | """ 135 | Encoder forward pass 136 | :param inputs: shape (seq_len, batch_size, num_sensor * input_dim) 137 | :return: encoder_hidden_state: (num_layers, batch_size, self.hidden_state_size) 138 | """ 139 | encoder_hidden_state = None 140 | for t in range(self.args.seq_len): 141 | _, encoder_hidden_state = self.encoder_model(inputs[t], adj, encoder_hidden_state) 142 | 143 | return encoder_hidden_state 144 | 145 | def decoder(self, encoder_hidden_state, adj, labels=None, batches_seen=None): 146 | """ 147 | Decoder forward pass 148 | :param encoder_hidden_state: (num_layers, batch_size, self.hidden_state_size) 149 | :param labels: (self.horizon, batch_size, self.num_nodes * self.output_dim) [optional, not exist for inference] 150 | :param batches_seen: global step [optional, not exist for inference] 151 | :return: output: (self.horizon, batch_size, self.num_nodes * self.output_dim) 152 | """ 153 | batch_size = encoder_hidden_state.size(1) 154 | go_symbol = torch.zeros((batch_size, self.num_nodes * self.decoder_model.output_dim), 155 | device=self.device) 156 | decoder_hidden_state = encoder_hidden_state 157 | decoder_input = go_symbol 158 | outputs = [] 159 | 160 | for t in range(self.decoder_model.horizon): 161 | decoder_output, decoder_hidden_state = self.decoder_model(decoder_input, adj, decoder_hidden_state) 162 | 163 | decoder_input = decoder_output 164 | 165 | outputs.append(decoder_output) 166 | if self.training and self.use_curriculum_learning: 167 | c = np.random.uniform(0, 1) 168 | if c < self._compute_sampling_threshold(batches_seen): 169 | decoder_input = labels[t] 170 | outputs = torch.stack(outputs) 171 | return outputs 172 | 173 | def forward(self, inputs, labels=None, batches_seen=None, epoch=None): 174 | """ 175 | :param inputs: shape (seq_len, batch_size, num_sensor * input_dim) 176 | :param labels: shape (horizon, batch_size, num_sensor * output) 177 | :param batches_seen: batches seen till now 178 | :return: output: (self.horizon, batch_size, self.num_nodes * self.output_dim) 179 | """ 180 | 181 | ShortAdj = self.ShortAdj_generator(inputs) 182 | Long_Adj = self.LongAdj_generator(inputs) 183 | adj_list = (ShortAdj + self.lambdax * Long_Adj)/ (1.0+self.lambdax) 184 | adj = adj_list[0] 185 | for index in range(1, self.requires_graph): 186 | adj = adj + self.epis[index] * adj_list[index] 187 | encoder_hidden_state = self.encoder(inputs, adj) 188 | outputs = self.decoder(encoder_hidden_state, adj, labels, batches_seen=batches_seen) 189 | res, adj = self.instant_forecasting(inputs) 190 | res = res.squeeze(dim=3).permute(1, 0, 2) 191 | outputs = (outputs + self.beta*res)/ (1.0+self.beta) 192 | return outputs, adj 193 | -------------------------------------------------------------------------------- /model/supervisor.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import utils 4 | from model.model import FCDNetModel 5 | from model.loss import masked_mae_loss, masked_mape_loss, masked_rmse_loss, masked_mse_loss 6 | import pandas as pd 7 | import os 8 | import time 9 | 10 | def count_parameters(model): 11 | return sum(p.numel() for p in model.parameters() if p.requires_grad) 12 | 13 | class FCDNetSupervisor: 14 | def __init__(self, args): 15 | self.args = args 16 | self.opt = args.optimizer 17 | self.max_grad_norm = args.max_grad_norm 18 | self.num_sample = args.num_sample 19 | self.device = args.device 20 | # logging. 21 | self._log_dir = self._get_log_dir(args) 22 | log_level = args.log_level 23 | self._logger = utils.get_logger(self._log_dir, __name__, 'info.log', level=log_level) 24 | # data set 25 | self._data = utils.load_dataset(args.dataset_dir, args.batch_size) 26 | self.standard_scaler = self._data['scaler'] 27 | 28 | ### Feas 29 | # initialize input_dim:1 feas_dim:1 graph_input_dim:1 graph_feas_dim:1 30 | Array = False 31 | if args.dataset_dir == 'data/solar_AL': 32 | df = pd.read_csv('./data/solar_AL/solar_AL.txt', delimiter=',') 33 | self.dataset = "solal_AL" 34 | elif args.dataset_dir == 'data/PEMS04': 35 | file = np.load('./data/PEMS04/pems04.npz') 36 | arr = file['data'][:,:,0] 37 | Array = True 38 | self.dataset = "PEMS04" 39 | args.input_dim = 9 40 | args.feas_dim = 9 41 | elif args.dataset_dir == 'data/PEMS08': 42 | file = np.load('./data/PEMS08/pems08.npz') 43 | arr = file['data'][:,:,0] 44 | Array = True 45 | self.dataset = "PEMS08" 46 | args.input_dim = 9 47 | args.feas_dim = 9 48 | elif args.dataset_dir == 'data/PEMS03': 49 | file = np.load('./data/PEMS03/PEMS03.npz') 50 | arr = file['data'] 51 | Array = True 52 | self.dataset = "PEMS03" 53 | elif args.dataset_dir == 'data/PEMS07': 54 | file = np.load('./data/PEMS07/PEMS07.npz') 55 | arr = file['data'] 56 | Array = True 57 | self.dataset = "PEMS07" 58 | elif args.dataset_dir == 'data/stock': 59 | self.dataset = 'stock' 60 | file = np.load('./data/stock/stockx.npy') 61 | arr = file[:,:,1] 62 | Array = True 63 | 64 | if not Array: 65 | arr = df.values 66 | num_samples = arr.shape[0] 67 | args.dataset = self.dataset 68 | num_train = round(num_samples * 0.7) 69 | arr = arr[:num_train] 70 | if len(arr.shape)==3: 71 | p = arr.shape[2] 72 | arr_mean = [] 73 | arr_std = [] 74 | for i in range(p): 75 | arr_mean.append(np.mean(arr[...,i])) 76 | arr_std.append(np.std(arr[...,i])) 77 | scaler = utils.StandardScaler(mean=arr_mean, std=arr_std, p=p) 78 | else: 79 | scaler = utils.StandardScaler(mean=arr.mean(), std=arr.std()) 80 | self.input_dim = args.input_dim 81 | train_feas = scaler.transform(arr) 82 | self._train_feas = torch.Tensor(train_feas).to(self.device) 83 | if len(self._train_feas)<3: 84 | self._train_feas = torch.unsqueeze(self._train_feas, dim=-1) 85 | args.num_nodes = arr.shape[1] 86 | self.num_nodes = args.num_nodes 87 | self.seq_len = args.seq_len # for the encoder 88 | self.output_dim = args.output_dim 89 | self.use_curriculum_learning = args.use_curriculum_learning 90 | self.horizon = args.horizon # for the decoder 91 | self.best_val_loss = 9999 92 | print(args) 93 | 94 | # setup model 95 | FCDNet_model = FCDNetModel(self._train_feas, self._logger, args) 96 | self.FCDNet_model = FCDNet_model.to(self.device) 97 | self._logger.info("Model created") 98 | print("Total Trainable Parameters: {}".format(count_parameters(self.FCDNet_model))) 99 | self._epoch_num = args.epoch 100 | if self._epoch_num > 0: 101 | self.load_initial_model() 102 | 103 | @staticmethod 104 | def _get_log_dir(args): 105 | log_dir = args.log_dir 106 | if log_dir is None: 107 | batch_size = args.batch_size 108 | learning_rate = args.base_lr 109 | max_diffusion_step = args.max_diffusion_step 110 | num_rnn_layers = args.num_rnn_layers 111 | rnn_units = args.rnn_units 112 | structure = '-'.join( 113 | ['%d' % rnn_units for _ in range(num_rnn_layers)]) 114 | horizon = args.horizon 115 | filter_type = args.filter_type 116 | filter_type_abbr = 'L' 117 | if filter_type == 'random_walk': 118 | filter_type_abbr = 'R' 119 | elif filter_type == 'dual_random_walk': 120 | filter_type_abbr = 'DR' 121 | run_id = 'FCDNet_%s_%d_h_%d_%s_lr_%g_bs_%d_%s/' % ( 122 | filter_type_abbr, max_diffusion_step, horizon, 123 | structure, learning_rate, batch_size, 124 | time.strftime('%m%d%H%M%S')) 125 | base_dir = args.base_dir 126 | log_dir = os.path.join(base_dir, run_id) 127 | if not os.path.exists(log_dir): 128 | os.makedirs(log_dir) 129 | return log_dir 130 | 131 | def save_test_model(self, dataset, epoch): 132 | if not os.path.exists('models_{}/'.format(dataset)): 133 | os.makedirs('models_{}/'.format(dataset)) 134 | 135 | config = {} 136 | config['model_state_dict'] = self.FCDNet_model.state_dict() 137 | config['epoch'] = epoch 138 | torch.save(config, 'models_{}/epo{}.tar'.format(dataset, epoch)) 139 | self._logger.info("Saved model at {}".format(epoch)) 140 | return 'models_{}/epo{}.tar'.format(dataset, epoch) 141 | 142 | def load_initial_model(self, dataset): 143 | self._setup_graph() 144 | assert os.path.exists('models_{}/epo{}.tar'.format(dataset, self._epoch_num)), 'Weights at epoch %d not found' % self._epoch_num 145 | checkpoint = torch.load('models_{}/epo{}.tar'.format(dataset, self._epoch_num), map_location='cpu') 146 | self.FCDNet_model.load_state_dict(checkpoint['model_state_dict']) 147 | self._logger.info("Loaded model at {}".format(self._epoch_num)) 148 | 149 | def load_test_model(self, dataset, epoch): 150 | self._setup_graph() 151 | assert os.path.exists('models_{}/epo{}.tar'.format(dataset, epoch)), 'Weights at epoch %d not found' % epoch 152 | checkpoint = torch.load('models_{}/epo{}.tar'.format(dataset, epoch), map_location='cpu') 153 | self.FCDNet_model.load_state_dict(checkpoint['model_state_dict']) 154 | self._logger.info("Loaded model at {}".format(epoch)) 155 | 156 | def _setup_graph(self): 157 | with torch.no_grad(): 158 | self.FCDNet_model = self.FCDNet_model.eval() 159 | 160 | val_iterator = self._data['val_loader'].get_iterator() 161 | 162 | for _, (x, y) in enumerate(val_iterator): 163 | x, y = self._prepare_data(x, y) 164 | output = self.FCDNet_model(x) 165 | break 166 | 167 | def train(self, args): 168 | return self._train(args) 169 | 170 | def evaluate(self, dataset='val', batches_seen=0): 171 | """ 172 | Computes mean L1Loss 173 | :return: mean L1Loss 174 | """ 175 | with torch.no_grad(): 176 | self.FCDNet_model = self.FCDNet_model.eval() 177 | 178 | val_iterator = self._data['{}_loader'.format(dataset)].get_iterator() 179 | losses = [] 180 | mapes = [] 181 | # rmses = [] 182 | mses = [] 183 | lenx = self.horizon 184 | l = [[] for i in range(lenx)] 185 | m = [[] for i in range(lenx)] 186 | r = [[] for i in range(lenx)] 187 | 188 | for batch_idx, (x, y) in enumerate(val_iterator): 189 | x, y = self._prepare_data(x, y) 190 | output, adj = self.FCDNet_model(x) 191 | loss = self._compute_loss(y, output) 192 | y_true = self.standard_scaler.inverse_transform(y) 193 | y_pred = self.standard_scaler.inverse_transform(output) 194 | mapes.append(masked_mape_loss(y_pred, y_true, self.dataset).item()) 195 | mses.append(masked_mse_loss(y_pred, y_true).item()) 196 | # rmses.append(masked_rmse_loss(y_pred, y_true).item()) 197 | losses.append(loss.item()) 198 | 199 | for i in range(lenx): 200 | l[i].append(masked_mae_loss(y_pred[i:i + 1], y_true[i:i + 1]).item()) 201 | m[i].append(masked_mape_loss(y_pred[i:i + 1], y_true[i:i + 1], self.dataset).item()) 202 | r[i].append(masked_mse_loss(y_pred[i:i + 1], y_true[i:i + 1]).item()) 203 | 204 | mean_loss = np.mean(losses) 205 | mean_mape = np.mean(mapes) 206 | mean_rmse = np.sqrt(np.mean(mses)) 207 | # mean_rmse = np.mean(rmses) #another option 208 | 209 | if dataset == 'test': 210 | for i in range(lenx): 211 | message = 'Horizon {}: mae: {:.4f}, mape: {:.4f}, rmse: {:.4f}'.format(i + 1, np.mean(l[i]), np.mean(m[i]), 212 | np.sqrt(np.mean(r[i]))) 213 | self._logger.info(message) 214 | 215 | return mean_loss, mean_mape, mean_rmse 216 | 217 | def test(self, args, epoch_num): 218 | 219 | with torch.no_grad(): 220 | 221 | self.load_test_model(self.dataset, epoch_num) 222 | 223 | test_iterator = self._data['test_loader'].get_iterator() 224 | losses = [] 225 | mapes = [] 226 | # rmses = [] 227 | mses = [] 228 | lenx = args.horizon 229 | l = [[] for i in range(lenx)] 230 | m = [[] for i in range(lenx)] 231 | r = [[] for i in range(lenx)] 232 | 233 | for batch_idx, (x, y) in enumerate(test_iterator): 234 | x, y = self._prepare_data(x, y) 235 | 236 | output, adj = self.FCDNet_model(x) 237 | loss = self._compute_loss(y, output) 238 | y_true = self.standard_scaler.inverse_transform(y) 239 | y_pred = self.standard_scaler.inverse_transform(output) 240 | mapes.append(masked_mape_loss(y_pred, y_true, self.dataset).item()) 241 | mses.append(masked_mse_loss(y_pred, y_true).item()) 242 | # rmses.append(masked_rmse_loss(y_pred, y_true).item()) 243 | losses.append(loss.item()) 244 | 245 | for i in range(lenx): 246 | l[i].append(masked_mae_loss(y_pred[i:i+1], y_true[i:i+1]).item()) 247 | m[i].append(masked_mape_loss(y_pred[i:i+1], y_true[i:i+1], self.dataset).item()) 248 | r[i].append(masked_mse_loss(y_pred[i:i+1], y_true[i:i+1]).item()) 249 | 250 | 251 | mean_loss = np.mean(losses) 252 | mean_mape = np.mean(mapes) 253 | mean_rmse = np.sqrt(np.mean(mses)) 254 | # mean_rmse = np.mean(rmses) #another option 255 | 256 | for i in range(lenx): 257 | message = 'Horizon {}: mae: {:.4f}, mape: {:.4f}, rmse: {:.4f}'.format(i+1, np.mean(l[i]), np.mean(m[i]), 258 | np.sqrt(np.mean(r[i]))) 259 | self._logger.info(message) 260 | 261 | message = 'test_mae: {:.4f}, test_mape: {:.4f}, test_rmse: {:.4f} ' .format(mean_loss, mean_mape, mean_rmse) 262 | 263 | self._logger.info(message) 264 | 265 | return mean_loss, mean_mape, mean_rmse 266 | 267 | 268 | def _train(self, args): 269 | # steps is used in learning rate - will see if need to use it? 270 | min_val_loss = float('inf') 271 | wait = 0 272 | base_lr = args.base_lr 273 | steps = args.steps 274 | patience = args.patience 275 | epochs = args.epochs 276 | lr_decay_ratio = args.lr_decay_ratio 277 | log_every = args.log_every 278 | save_model = args.save_model 279 | test_every_n_epochs = args.test_every_n_epochs 280 | epsilon = args.epsilon 281 | best_idx = 0 282 | 283 | if self.opt == 'adam': 284 | optimizer = torch.optim.Adam(self.FCDNet_model.parameters(), lr=base_lr, eps=epsilon) 285 | elif self.opt == 'sgd': 286 | optimizer = torch.optim.SGD(self.FCDNet_model.parameters(), lr=base_lr) 287 | else: 288 | optimizer = torch.optim.Adam(self.FCDNet_model.parameters(), lr=base_lr, eps=epsilon) 289 | 290 | lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=steps, gamma=float(lr_decay_ratio)) 291 | 292 | self._logger.info('Start training ...') 293 | 294 | # this will fail if model is loaded with a changed batch_size 295 | num_batches = self._data['train_loader'].num_batch 296 | self._logger.info("num_batches:{}".format(num_batches)) 297 | 298 | batches_seen = num_batches * self._epoch_num 299 | 300 | for epoch_num in range(self._epoch_num, epochs): 301 | print("Num of epoch:", epoch_num) 302 | self.FCDNet_model = self.FCDNet_model.train() 303 | train_iterator = self._data['train_loader'].get_iterator() 304 | losses = [] 305 | start_time = time.time() 306 | 307 | for batch_idx, (x, y) in enumerate(train_iterator): 308 | optimizer.zero_grad() 309 | x, y = self._prepare_data(x, y) 310 | output, adj = self.FCDNet_model(x, y, batches_seen, epoch_num) 311 | if (epoch_num % epochs) == epochs - 1: 312 | output, adj = self.FCDNet_model(x, y, batches_seen, epoch_num) 313 | if batches_seen == 0: 314 | if self.opt == 'adam': 315 | optimizer = torch.optim.Adam(self.FCDNet_model.parameters(), lr=base_lr, eps=epsilon) 316 | elif self.opt == 'sgd': 317 | optimizer = torch.optim.SGD(self.FCDNet_model.parameters(), lr=base_lr) 318 | else: 319 | optimizer = torch.optim.Adam(self.FCDNet_model.parameters(), lr=base_lr, eps=epsilon) 320 | 321 | 322 | loss = self._compute_loss(y, output) 323 | 324 | losses.append(loss.item()) 325 | 326 | self._logger.debug(loss.item()) 327 | batches_seen += 1 328 | 329 | loss.backward() 330 | # loss.backward() 331 | # gradient clipping - this does it in place 332 | torch.nn.utils.clip_grad_norm_(self.FCDNet_model.parameters(), self.max_grad_norm) 333 | 334 | optimizer.step() 335 | lr_scheduler.step() 336 | 337 | 338 | 339 | self._logger.info("evaluating now!") 340 | end_time = time.time() 341 | 342 | val_loss, val_mape, val_rmse = self.evaluate(dataset='val', batches_seen=batches_seen) 343 | 344 | end_time2 = time.time() 345 | 346 | 347 | if (epoch_num % log_every) == log_every - 1: 348 | message = 'Epoch [{}/{}] ({}) train_mae: {:.4f}, val_mae: {:.4f}, val_mape: {:.4f}, val_rmse: {:.4f}, lr: {:.6f}, ' \ 349 | '{:.1f}s, {:.1f}s'.format(epoch_num, epochs, batches_seen, 350 | np.mean(losses), val_loss, val_mape, val_rmse, 351 | lr_scheduler.get_last_lr()[0], 352 | (end_time - start_time), (end_time2 - start_time)) 353 | self._logger.info(message) 354 | 355 | if (epoch_num % test_every_n_epochs) == test_every_n_epochs - 1: 356 | test_loss, test_mape, test_rmse = self.evaluate(dataset='test', batches_seen=batches_seen) 357 | 358 | message = 'Epoch [{}/{}] ({}) train_mae: {:.4f}, test_mae: {:.4f}, test_mape: {:.4f}, test_rmse: {:.4f}, lr: {:.6f}, ' \ 359 | '{:.1f}s, {:.1f}s'.format(epoch_num, epochs, batches_seen, 360 | np.mean(losses), test_loss, test_mape, test_rmse, 361 | lr_scheduler.get_last_lr()[0], 362 | (end_time - start_time), (end_time2 - start_time)) 363 | self._logger.info(message) 364 | 365 | if val_loss < self.best_val_loss: 366 | wait = 0 367 | model_file_name = self.save_test_model(self.dataset, epoch_num) 368 | best_idx = epoch_num 369 | self._logger.info( 370 | 'Val loss decrease from {:.4f} to {:.4f}, ' 371 | 'saving to {}'.format(self.best_val_loss, val_loss, model_file_name)) 372 | self.best_val_loss = val_loss 373 | 374 | elif val_loss >= self.best_val_loss: 375 | wait += 1 376 | if wait == patience: 377 | self._logger.warning('Early stopping at epoch: %d' % epoch_num) 378 | break 379 | 380 | self.test(args, best_idx) 381 | 382 | 383 | def _prepare_data(self, x, y): 384 | x, y = self._get_x_y(x, y) 385 | x, y = self._get_x_y_in_correct_dims(x, y) 386 | return x.to(self.device), y.to(self.device) 387 | 388 | def _get_x_y(self, x, y): 389 | """ 390 | :param x: shape (batch_size, seq_len, num_sensor, input_dim) 391 | :param y: shape (batch_size, horizon, num_sensor, input_dim) 392 | :returns x shape (seq_len, batch_size, num_sensor, input_dim) 393 | y shape (horizon, batch_size, num_sensor, input_dim) 394 | """ 395 | x = torch.from_numpy(x).float() 396 | y = torch.from_numpy(y).float() 397 | self._logger.debug("X: {}".format(x.size())) 398 | self._logger.debug("y: {}".format(y.size())) 399 | x = x.permute(1, 0, 2, 3) 400 | y = y.permute(1, 0, 2, 3) 401 | return x, y 402 | 403 | def _get_x_y_in_correct_dims(self, x, y): 404 | """ 405 | :param x: shape (seq_len, batch_size, num_sensor, input_dim) 406 | :param y: shape (horizon, batch_size, num_sensor, input_dim) 407 | :return: x: shape (seq_len, batch_size, num_sensor * input_dim) 408 | y: shape (horizon, batch_size, num_sensor * output_dim) 409 | """ 410 | batch_size = x.size(1) 411 | x = x.view(self.seq_len, batch_size, self.num_nodes * self.input_dim) 412 | y = y[..., :self.output_dim].view(self.horizon, batch_size, 413 | self.num_nodes * self.output_dim) 414 | return x, y 415 | 416 | def _compute_loss(self, y_true, y_predicted): 417 | y_true = self.standard_scaler.inverse_transform(y_true) 418 | y_predicted = self.standard_scaler.inverse_transform(y_predicted) 419 | return masked_mae_loss(y_predicted, y_true) 420 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | scipy>=0.19.0 2 | 3 | numpy>=1.12.1 4 | 5 | pandas>=0.19.2 6 | 7 | pyyaml 8 | 9 | statsmodels 10 | 11 | PyWavelets 12 | 13 | wrapt 14 | 15 | tensorflow>=1.3.0 16 | 17 | torch 18 | 19 | tables 20 | 21 | future 22 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | import argparse 6 | from model.supervisor import FCDNetSupervisor 7 | 8 | parser = argparse.ArgumentParser() 9 | # basic settings 10 | parser.add_argument('--device',default='cuda:0',type=str) 11 | parser.add_argument('--log_dir',default='data/model',type=str,help='') 12 | parser.add_argument('--log_level',default='INFO',type=str) 13 | parser.add_argument('--log_every',default=1,type=int) 14 | parser.add_argument('--save_model',default=0,type=int) 15 | #data settings 16 | parser.add_argument('--batch_size',default=64,type=int) 17 | parser.add_argument('--dataset_dir',default='data/solar_AL',type=str) 18 | # model settings 19 | parser.add_argument('--cl_decay_steps',default=2000,type=int) 20 | parser.add_argument('--filter_type',default='dual_random_walk',type=str) 21 | parser.add_argument('--horizon',default=12,type=int) 22 | parser.add_argument('--seq_len',default=12,type=int) 23 | parser.add_argument('--feas_dim',default=1,type=int) 24 | parser.add_argument('--input_dim',default=1,type=int) 25 | parser.add_argument('--ll_decay',default=0,type=int) 26 | parser.add_argument('--max_diffusion_step',default=2,type=int) 27 | parser.add_argument('--num_rnn_layers',default=1,type=int) 28 | parser.add_argument('--output_dim',default=1,type=int) 29 | parser.add_argument('--rnn_units',default=96,type=int) 30 | parser.add_argument('--use_curriculum_learning',default=True,type=bool) 31 | parser.add_argument('--embedding_size',default=256,type=int) 32 | parser.add_argument('--kernel_size',default=5,type=int) 33 | parser.add_argument('--freq',default=288,type=int) 34 | parser.add_argument('--requires_graph',default=2,type=int) 35 | parser.add_argument('--blocks',default=4,type=int) 36 | parser.add_argument('--layers',default=2,type=int) 37 | parser.add_argument('--level',default=4,type=int) 38 | parser.add_argument('--dgraphs',default=10,type=float) 39 | parser.add_argument('--graph_input_dim',default=1,type=int) 40 | parser.add_argument('--graph_feas_dim',default=1,type=int) 41 | parser.add_argument('--dataset',default='',type=str) 42 | # train settings 43 | parser.add_argument('--base_lr',default=0.003,type=float) 44 | parser.add_argument('--dropout',default=0.0,type=float) 45 | parser.add_argument('--epoch',default=0,type=int) 46 | parser.add_argument('--epochs',default=250,type=int) 47 | parser.add_argument('--epsilon',default=1.0e-3,type=float) 48 | parser.add_argument('--global_step',default=0,type=int) 49 | parser.add_argument('--lr_decay_ratio',default=0.1,type=float) 50 | parser.add_argument('--max_grad_norm',default=5,type=int) 51 | parser.add_argument('--max_to_keep',default=100,type=int) 52 | parser.add_argument('--min_learning_rate',default=2.0e-05,type=float) 53 | parser.add_argument('--optimizer',default='adam',type=str) 54 | parser.add_argument('--patience',default=50,type=int) 55 | parser.add_argument('--steps',default=[20, 30, 40],type=list) 56 | parser.add_argument('--test_every_n_epochs', default=10, type=int) 57 | parser.add_argument('--num_sample', default=10, type=int) 58 | args = parser.parse_args() 59 | 60 | 61 | if __name__ == '__main__': 62 | supervisor = FCDNetSupervisor(args=args) 63 | supervisor.train(args) 64 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import numpy as np 3 | import os 4 | import pickle 5 | import scipy.sparse as sp 6 | import sys 7 | import tensorflow as tf 8 | 9 | from scipy.sparse import linalg 10 | 11 | class DataLoader(object): 12 | def __init__(self, xs, ys, batch_size, pad_with_last_sample=True, shuffle=False): 13 | """ 14 | 15 | :param xs: 16 | :param ys: 17 | :param batch_size: 18 | :param pad_with_last_sample: pad with the last sample to make number of samples divisible to batch_size. 19 | """ 20 | self.batch_size = batch_size 21 | self.current_ind = 0 22 | if pad_with_last_sample: 23 | num_padding = (batch_size - (len(xs) % batch_size)) % batch_size 24 | x_padding = np.repeat(xs[-1:], num_padding, axis=0) 25 | y_padding = np.repeat(ys[-1:], num_padding, axis=0) 26 | xs = np.concatenate([xs, x_padding], axis=0) 27 | ys = np.concatenate([ys, y_padding], axis=0) 28 | self.size = len(xs) 29 | self.num_batch = int(self.size // self.batch_size) 30 | if shuffle: 31 | permutation = np.random.permutation(self.size) 32 | xs, ys = xs[permutation], ys[permutation] 33 | self.xs = xs 34 | self.ys = ys 35 | 36 | def get_iterator(self): 37 | self.current_ind = 0 38 | 39 | def _wrapper(): 40 | while self.current_ind < self.num_batch: 41 | start_ind = self.batch_size * self.current_ind 42 | end_ind = min(self.size, self.batch_size * (self.current_ind + 1)) 43 | x_i = self.xs[start_ind: end_ind, ...] 44 | y_i = self.ys[start_ind: end_ind, ...] 45 | yield (x_i, y_i) 46 | self.current_ind += 1 47 | 48 | return _wrapper() 49 | 50 | 51 | class StandardScaler: 52 | """ 53 | Standard the input 54 | """ 55 | 56 | def __init__(self, mean, std, p=None): 57 | self.mean = mean 58 | self.std = std 59 | self.p = p 60 | 61 | def transform(self, data): 62 | if self.p and data.shape[-1] == self.p: 63 | for i in range(self.p): 64 | data[...,i] = (data[...,i] - self.mean[i]) / self.std[i] 65 | else: 66 | data = (data - self.mean) / self.std 67 | return data 68 | 69 | def inverse_transform(self, data): 70 | if self.p and data.shape[-1] == self.p: 71 | for i in range(self.p): 72 | data[...,i] = (data[...,i] * self.std[i]) + self.mean[i] 73 | elif self.p and self.p > 1: 74 | data = (data * self.std[0]) + self.mean[0] 75 | else: 76 | data = (data * self.std) + self.mean 77 | return data 78 | 79 | 80 | def add_simple_summary(writer, names, values, global_step): 81 | """ 82 | Writes summary for a list of scalars. 83 | :param writer: 84 | :param names: 85 | :param values: 86 | :param global_step: 87 | :return: 88 | """ 89 | for name, value in zip(names, values): 90 | summary = tf.Summary() 91 | summary_value = summary.value.add() 92 | summary_value.simple_value = value 93 | summary_value.tag = name 94 | writer.add_summary(summary, global_step) 95 | 96 | 97 | def calculate_normalized_laplacian(adj): 98 | """ 99 | # L = D^-1/2 (D-A) D^-1/2 = I - D^-1/2 A D^-1/2 100 | # D = diag(A 1) 101 | :param adj: 102 | :return: 103 | """ 104 | adj = sp.coo_matrix(adj) 105 | d = np.array(adj.sum(1)) 106 | d_inv_sqrt = np.power(d, -0.5).flatten() 107 | d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0. 108 | d_mat_inv_sqrt = sp.diags(d_inv_sqrt) 109 | normalized_laplacian = sp.eye(adj.shape[0]) - adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo() 110 | return normalized_laplacian 111 | 112 | 113 | def calculate_random_walk_matrix(adj_mx): 114 | adj_mx = sp.coo_matrix(adj_mx) 115 | d = np.array(adj_mx.sum(1)) 116 | d_inv = np.power(d, -1).flatten() 117 | d_inv[np.isinf(d_inv)] = 0. 118 | d_mat_inv = sp.diags(d_inv) 119 | random_walk_mx = d_mat_inv.dot(adj_mx).tocoo() 120 | return random_walk_mx 121 | 122 | 123 | def calculate_reverse_random_walk_matrix(adj_mx): 124 | return calculate_random_walk_matrix(np.transpose(adj_mx)) 125 | 126 | 127 | def calculate_scaled_laplacian(adj_mx, lambda_max=2, undirected=True): 128 | if undirected: 129 | adj_mx = np.maximum.reduce([adj_mx, adj_mx.T]) 130 | L = calculate_normalized_laplacian(adj_mx) 131 | if lambda_max is None: 132 | lambda_max, _ = linalg.eigsh(L, 1, which='LM') 133 | lambda_max = lambda_max[0] 134 | L = sp.csr_matrix(L) 135 | M, _ = L.shape 136 | I = sp.identity(M, format='csr', dtype=L.dtype) 137 | L = (2 / lambda_max * L) - I 138 | return L.astype(np.float32) 139 | 140 | 141 | def config_logging(log_dir, log_filename='info.log', level=logging.INFO): 142 | # Add file handler and stdout handler 143 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 144 | # Create the log directory if necessary. 145 | try: 146 | os.makedirs(log_dir) 147 | except OSError: 148 | pass 149 | file_handler = logging.FileHandler(os.path.join(log_dir, log_filename)) 150 | file_handler.setFormatter(formatter) 151 | file_handler.setLevel(level=level) 152 | # Add console handler. 153 | console_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') 154 | console_handler = logging.StreamHandler(sys.stdout) 155 | console_handler.setFormatter(console_formatter) 156 | console_handler.setLevel(level=level) 157 | logging.basicConfig(handlers=[file_handler, console_handler], level=level) 158 | 159 | 160 | def get_logger(log_dir, name, log_filename='info.log', level=logging.INFO): 161 | logger = logging.getLogger(name) 162 | logger.setLevel(level) 163 | # Add file handler and stdout handler 164 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 165 | file_handler = logging.FileHandler(os.path.join(log_dir, log_filename)) 166 | file_handler.setFormatter(formatter) 167 | # Add console handler. 168 | console_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') 169 | console_handler = logging.StreamHandler(sys.stdout) 170 | console_handler.setFormatter(console_formatter) 171 | logger.addHandler(file_handler) 172 | logger.addHandler(console_handler) 173 | # Add google cloud log handler 174 | logger.info('Log directory: %s', log_dir) 175 | return logger 176 | 177 | 178 | def get_total_trainable_parameter_size(): 179 | """ 180 | Calculates the total number of trainable parameters in the current graph. 181 | :return: 182 | """ 183 | total_parameters = 0 184 | for variable in tf.trainable_variables(): 185 | # shape is an array of tf.Dimension 186 | total_parameters += np.product([x.value for x in variable.get_shape()]) 187 | return total_parameters 188 | 189 | 190 | def load_dataset(dataset_dir, batch_size): 191 | data = {} 192 | for category in ['train', 'val', 'test']: 193 | cat_data = np.load(os.path.join(dataset_dir, category + '.npz')) 194 | data['x_' + category] = cat_data['x'] 195 | data['y_' + category] = cat_data['y'] 196 | scaler = StandardScaler(mean=data['x_train'][..., 0].mean(), std=data['x_train'][..., 0].std()) 197 | # Data format 198 | for category in ['train', 'val', 'test']: 199 | data['x_' + category][..., 0] = scaler.transform(data['x_' + category][..., 0]) 200 | data['y_' + category][..., 0] = scaler.transform(data['y_' + category][..., 0]) 201 | data['train_loader'] = DataLoader(data['x_train'], data['y_train'], batch_size, shuffle=True) 202 | data['val_loader'] = DataLoader(data['x_val'], data['y_val'], batch_size, shuffle=False) 203 | data['test_loader'] = DataLoader(data['x_test'], data['y_test'], batch_size, shuffle=False) 204 | data['scaler'] = scaler 205 | 206 | return data 207 | 208 | 209 | def load_dataset_with_time(dataset_dir, batch_size, **kwargs): 210 | data = {} 211 | for category in ['train', 'val', 'test']: 212 | cat_data = np.load(os.path.join(dataset_dir, category + '.npz')) 213 | data['x_' + category] = cat_data['x'] 214 | data['y_' + category] = cat_data['y'] 215 | data['time_' + category] = cat_data['time'] 216 | scaler = StandardScaler(mean=data['x_train'][..., 0].mean(), std=data['x_train'][..., 0].std()) 217 | # Data format 218 | for category in ['train', 'val', 'test']: 219 | data['x_' + category][..., 0] = scaler.transform(data['x_' + category][..., 0]) 220 | data['y_' + category][..., 0] = scaler.transform(data['y_' + category][..., 0]) 221 | data['train_loader'] = DataLoader(data['x_train'], data['y_train'], data['time_train'], batch_size, shuffle=True) 222 | data['val_loader'] = DataLoader(data['x_val'], data['y_val'], data['time_val'], batch_size, shuffle=False) 223 | data['test_loader'] = DataLoader(data['x_test'], data['y_test'], data['time_test'], batch_size, shuffle=False) 224 | data['scaler'] = scaler 225 | 226 | return data 227 | 228 | 229 | def load_graph_data(pkl_filename): 230 | sensor_ids, sensor_id_to_ind, adj_mx = load_pickle(pkl_filename) 231 | return sensor_ids, sensor_id_to_ind, adj_mx 232 | 233 | 234 | def load_pickle(pickle_file): 235 | try: 236 | with open(pickle_file, 'rb') as f: 237 | pickle_data = pickle.load(f) 238 | except UnicodeDecodeError as e: 239 | with open(pickle_file, 'rb') as f: 240 | pickle_data = pickle.load(f, encoding='latin1') 241 | except Exception as e: 242 | print('Unable to load data ', pickle_file, ':', e) 243 | raise 244 | return pickle_data 245 | --------------------------------------------------------------------------------