├── .gitignore ├── LICENSE ├── README.md ├── data_provider ├── __init__.py ├── data_factory.py └── data_loader.py ├── environment.yml ├── exp ├── __init__.py ├── exp_basic.py └── exp_main.py ├── layers ├── AutoCorrelation.py ├── Autoformer_EncDec.py ├── Embed.py ├── MTGNN_layer.py ├── SelfAttention_Family.py ├── Transformer_EncDec.py └── __init__.py ├── models ├── Autoformer.py ├── Graph_WaveNet.py ├── Informer.py ├── LSTNet.py ├── MTGNN.py ├── Reformer.py ├── Transformer.py └── __init__.py ├── run.py ├── scripts ├── Autoformer │ ├── Autoformer_COVID19_world.sh │ ├── Autoformer_PEMS03.sh │ ├── Autoformer_Stock_apple.sh │ ├── Autoformer_WTH.sh │ └── Autoformer_myETT.sh ├── Graph_WaveNet │ ├── Graph_WaveNet_COVID19_world.sh │ ├── Graph_WaveNet_PEMS03.sh │ ├── Graph_WaveNet_Stock_apple.sh │ ├── Graph_WaveNet_WTH.sh │ └── Graph_WaveNet_myETT.sh ├── LSTNet │ ├── LSTNet_COVID19_world.sh │ ├── LSTNet_PEMS03.sh │ ├── LSTNet_Stock_apple.sh │ ├── LSTNet_WTH.sh │ └── LSTNet_myETT.sh ├── MTGNN │ ├── MTGNN_COVID19.sh │ ├── MTGNN_PEMS03.sh │ ├── MTGNN_Stcok_apple.sh │ ├── MTGNN_WTH.sh │ └── MTGNN_myETT.sh ├── Reformer │ ├── Reformer_COVID19_world.sh │ ├── Reformer_PEMS03.sh │ ├── Reformer_Stock_apple.sh │ ├── Reformer_WTH.sh │ └── Reformer_myETT.sh └── Transformer │ ├── Transformer_COVID19_world.sh │ ├── Transformer_PEMS03.sh │ ├── Transformer_Stock_apple.sh │ ├── Transformer_WTH.sh │ └── Transformer_myETT.sh └── utils ├── __init__.py ├── masking.py ├── metrics.py ├── timefeatures.py └── tools.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | test_results/ 132 | 133 | results/ 134 | 135 | dataset/ 136 | 137 | checkpoints/ 138 | 139 | *result.txt 140 | 141 | /.idea 142 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Masterleia 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # TSF_LSTF_Compare 2 | Time series forecasting especially in LSTF compare,include Informer, Autoformer, Reformer, Pyraformer, FEDformer, Transformer, MTGNN, LSTNet, Graph WaveNet 3 | Waiting for updates. Please looking forward to 4 | -------------------------------------------------------------------------------- /data_provider/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /data_provider/data_factory.py: -------------------------------------------------------------------------------- 1 | from data_provider.data_loader import Dataset_ETT_hour, Dataset_ETT_minute, Dataset_Custom, Dataset_Pred 2 | from torch.utils.data import DataLoader 3 | 4 | data_dict = { 5 | 'ETTh1': Dataset_ETT_hour, 6 | 'ETTh2': Dataset_ETT_hour, 7 | 'ETTm1': Dataset_ETT_minute, 8 | 'ETTm2': Dataset_ETT_minute, 9 | 'custom': Dataset_Custom, 10 | 'PEMS03_deal': Dataset_Custom, 11 | } 12 | 13 | 14 | def data_provider(args, flag): 15 | Data = data_dict[args.data] 16 | timeenc = 0 if args.embed != 'timeF' else 1 17 | 18 | if flag == 'test': 19 | shuffle_flag = False 20 | drop_last = True 21 | batch_size = args.batch_size 22 | freq = args.freq 23 | elif flag == 'pred': 24 | shuffle_flag = False 25 | drop_last = False 26 | batch_size = 1 27 | freq = args.freq 28 | Data = Dataset_Pred 29 | else: 30 | shuffle_flag = True 31 | drop_last = True 32 | batch_size = args.batch_size 33 | freq = args.freq 34 | 35 | data_set = Data( 36 | root_path=args.root_path, 37 | data_path=args.data_path, 38 | flag=flag, 39 | size=[args.seq_len, args.label_len, args.pred_len], 40 | features=args.features, 41 | target=args.target, 42 | timeenc=timeenc, 43 | freq=freq 44 | ) 45 | print(flag, len(data_set)) 46 | data_loader = DataLoader( 47 | data_set, 48 | batch_size=batch_size, 49 | shuffle=shuffle_flag, 50 | num_workers=args.num_workers, 51 | drop_last=drop_last) 52 | return data_set, data_loader 53 | -------------------------------------------------------------------------------- /data_provider/data_loader.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import pandas as pd 4 | import os 5 | import torch 6 | from torch.utils.data import Dataset, DataLoader 7 | from sklearn.preprocessing import StandardScaler 8 | from utils.timefeatures import time_features 9 | import warnings 10 | 11 | warnings.filterwarnings('ignore') 12 | 13 | 14 | class Dataset_ETT_hour(Dataset): 15 | def __init__(self, root_path, flag='train', size=None, 16 | features='S', data_path='ETTh1.csv', 17 | target='OT', scale=True, timeenc=0, freq='h'): 18 | # size [seq_len, label_len, pred_len] 19 | # info 20 | if size == None: 21 | self.seq_len = 24 * 4 * 4 22 | self.label_len = 24 * 4 23 | self.pred_len = 24 * 4 24 | else: 25 | self.seq_len = size[0] 26 | self.label_len = size[1] 27 | self.pred_len = size[2] 28 | # init 29 | assert flag in ['train', 'test', 'val'] 30 | type_map = {'train': 0, 'val': 1, 'test': 2} 31 | self.set_type = type_map[flag] 32 | 33 | self.features = features 34 | self.target = target 35 | self.scale = scale 36 | self.timeenc = timeenc 37 | self.freq = freq 38 | 39 | self.root_path = root_path 40 | self.data_path = data_path 41 | self.__read_data__() 42 | 43 | def __read_data__(self): 44 | self.scaler = StandardScaler() 45 | df_raw = pd.read_csv(os.path.join(self.root_path, 46 | self.data_path)) 47 | 48 | border1s = [0, 12 * 30 * 24 - self.seq_len, 12 * 30 * 24 + 4 * 30 * 24 - self.seq_len] 49 | border2s = [12 * 30 * 24, 12 * 30 * 24 + 4 * 30 * 24, 12 * 30 * 24 + 8 * 30 * 24] 50 | border1 = border1s[self.set_type] 51 | border2 = border2s[self.set_type] 52 | 53 | if self.features == 'M' or self.features == 'MS': 54 | cols_data = df_raw.columns[1:] 55 | df_data = df_raw[cols_data] 56 | elif self.features == 'S': 57 | df_data = df_raw[[self.target]] 58 | 59 | if self.scale: 60 | train_data = df_data[border1s[0]:border2s[0]] 61 | self.scaler.fit(train_data.values) 62 | data = self.scaler.transform(df_data.values) 63 | else: 64 | data = df_data.values 65 | 66 | df_stamp = df_raw[['date']][border1:border2] 67 | df_stamp['date'] = pd.to_datetime(df_stamp.date) 68 | if self.timeenc == 0: 69 | df_stamp['month'] = df_stamp.date.apply(lambda row: row.month, 1) 70 | df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1) 71 | df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1) 72 | df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1) 73 | data_stamp = df_stamp.drop(['date'], 1).values 74 | elif self.timeenc == 1: 75 | data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq) 76 | data_stamp = data_stamp.transpose(1, 0) 77 | 78 | self.data_x = data[border1:border2] 79 | self.data_y = data[border1:border2] 80 | self.data_stamp = data_stamp 81 | 82 | def __getitem__(self, index): 83 | s_begin = index 84 | s_end = s_begin + self.seq_len 85 | r_begin = s_end - self.label_len 86 | r_end = r_begin + self.label_len + self.pred_len 87 | 88 | seq_x = self.data_x[s_begin:s_end] 89 | seq_y = self.data_y[r_begin:r_end] 90 | seq_x_mark = self.data_stamp[s_begin:s_end] 91 | seq_y_mark = self.data_stamp[r_begin:r_end] 92 | 93 | return seq_x, seq_y, seq_x_mark, seq_y_mark 94 | 95 | def __len__(self): 96 | return len(self.data_x) - self.seq_len - self.pred_len + 1 97 | 98 | def inverse_transform(self, data): 99 | return self.scaler.inverse_transform(data) 100 | 101 | 102 | class Dataset_ETT_minute(Dataset): 103 | def __init__(self, root_path, flag='train', size=None, 104 | features='S', data_path='ETTm1.csv', 105 | target='OT', scale=True, timeenc=0, freq='t'): 106 | # size [seq_len, label_len, pred_len] 107 | # info 108 | if size == None: 109 | self.seq_len = 24 * 4 * 4 110 | self.label_len = 24 * 4 111 | self.pred_len = 24 * 4 112 | else: 113 | self.seq_len = size[0] 114 | self.label_len = size[1] 115 | self.pred_len = size[2] 116 | # init 117 | assert flag in ['train', 'test', 'val'] 118 | type_map = {'train': 0, 'val': 1, 'test': 2} 119 | self.set_type = type_map[flag] 120 | 121 | self.features = features 122 | self.target = target 123 | self.scale = scale 124 | self.timeenc = timeenc 125 | self.freq = freq 126 | 127 | self.root_path = root_path 128 | self.data_path = data_path 129 | self.__read_data__() 130 | 131 | def __read_data__(self): 132 | self.scaler = StandardScaler() 133 | df_raw = pd.read_csv(os.path.join(self.root_path, 134 | self.data_path)) 135 | 136 | border1s = [0, 12 * 30 * 24 * 4 - self.seq_len, 12 * 30 * 24 * 4 + 4 * 30 * 24 * 4 - self.seq_len] 137 | border2s = [12 * 30 * 24 * 4, 12 * 30 * 24 * 4 + 4 * 30 * 24 * 4, 12 * 30 * 24 * 4 + 8 * 30 * 24 * 4] 138 | border1 = border1s[self.set_type] 139 | border2 = border2s[self.set_type] 140 | 141 | if self.features == 'M' or self.features == 'MS': 142 | cols_data = df_raw.columns[1:] 143 | df_data = df_raw[cols_data] 144 | elif self.features == 'S': 145 | df_data = df_raw[[self.target]] 146 | 147 | if self.scale: 148 | train_data = df_data[border1s[0]:border2s[0]] 149 | self.scaler.fit(train_data.values) 150 | data = self.scaler.transform(df_data.values) 151 | else: 152 | data = df_data.values 153 | 154 | df_stamp = df_raw[['date']][border1:border2] 155 | df_stamp['date'] = pd.to_datetime(df_stamp.date) 156 | if self.timeenc == 0: 157 | df_stamp['month'] = df_stamp.date.apply(lambda row: row.month, 1) 158 | df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1) 159 | df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1) 160 | df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1) 161 | df_stamp['minute'] = df_stamp.date.apply(lambda row: row.minute, 1) 162 | df_stamp['minute'] = df_stamp.minute.map(lambda x: x // 15) 163 | data_stamp = df_stamp.drop(['date'], 1).values 164 | elif self.timeenc == 1: 165 | data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq) 166 | data_stamp = data_stamp.transpose(1, 0) 167 | 168 | self.data_x = data[border1:border2] 169 | self.data_y = data[border1:border2] 170 | self.data_stamp = data_stamp 171 | 172 | def __getitem__(self, index): 173 | s_begin = index 174 | s_end = s_begin + self.seq_len 175 | r_begin = s_end - self.label_len 176 | r_end = r_begin + self.label_len + self.pred_len 177 | 178 | seq_x = self.data_x[s_begin:s_end] 179 | seq_y = self.data_y[r_begin:r_end] 180 | seq_x_mark = self.data_stamp[s_begin:s_end] 181 | seq_y_mark = self.data_stamp[r_begin:r_end] 182 | 183 | return seq_x, seq_y, seq_x_mark, seq_y_mark 184 | 185 | def __len__(self): 186 | return len(self.data_x) - self.seq_len - self.pred_len + 1 187 | 188 | def inverse_transform(self, data): 189 | return self.scaler.inverse_transform(data) 190 | 191 | 192 | class Dataset_Custom(Dataset): 193 | def __init__(self, root_path, flag='train', size=None, 194 | features='S', data_path='ETTh1.csv', 195 | target='OT', scale=True, timeenc=0, freq='h'): 196 | # size [seq_len, label_len, pred_len] 197 | # info 198 | if size == None: 199 | self.seq_len = 24 * 4 * 4 200 | self.label_len = 24 * 4 201 | self.pred_len = 24 * 4 202 | else: 203 | self.seq_len = size[0] 204 | self.label_len = size[1] 205 | self.pred_len = size[2] 206 | # init 207 | assert flag in ['train', 'test', 'val'] 208 | type_map = {'train': 0, 'val': 1, 'test': 2} 209 | self.set_type = type_map[flag] 210 | 211 | self.features = features 212 | self.target = target 213 | self.scale = scale 214 | self.timeenc = timeenc 215 | self.freq = freq 216 | 217 | self.root_path = root_path 218 | self.data_path = data_path 219 | self.__read_data__() 220 | 221 | def __read_data__(self): 222 | self.scaler = StandardScaler() 223 | df_raw = pd.read_csv(os.path.join(self.root_path, 224 | self.data_path)) 225 | 226 | ''' 227 | df_raw.columns: ['date', ...(other features), target feature] 228 | ''' 229 | cols = list(df_raw.columns) 230 | cols.remove(self.target) 231 | cols.remove('date') 232 | df_raw = df_raw[['date'] + cols + [self.target]] 233 | # print(cols) 234 | num_train = int(len(df_raw) * 0.7) 235 | num_test = int(len(df_raw) * 0.2) 236 | num_vali = len(df_raw) - num_train - num_test 237 | border1s = [0, num_train - self.seq_len, len(df_raw) - num_test - self.seq_len] 238 | border2s = [num_train, num_train + num_vali, len(df_raw)] 239 | border1 = border1s[self.set_type] 240 | border2 = border2s[self.set_type] 241 | 242 | if self.features == 'M' or self.features == 'MS': 243 | cols_data = df_raw.columns[1:] 244 | df_data = df_raw[cols_data] 245 | elif self.features == 'S': 246 | df_data = df_raw[[self.target]] 247 | 248 | if self.scale: 249 | train_data = df_data[border1s[0]:border2s[0]] 250 | self.scaler.fit(train_data.values) 251 | data = self.scaler.transform(df_data.values) 252 | else: 253 | data = df_data.values 254 | 255 | df_stamp = df_raw[['date']][border1:border2] 256 | df_stamp['date'] = pd.to_datetime(df_stamp.date) 257 | if self.timeenc == 0: 258 | df_stamp['month'] = df_stamp.date.apply(lambda row: row.month, 1) 259 | df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1) 260 | df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1) 261 | df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1) 262 | data_stamp = df_stamp.drop(['date'], 1).values 263 | elif self.timeenc == 1: 264 | data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq) 265 | data_stamp = data_stamp.transpose(1, 0) 266 | 267 | self.data_x = data[border1:border2] 268 | self.data_y = data[border1:border2] 269 | self.data_stamp = data_stamp 270 | 271 | def __getitem__(self, index): 272 | s_begin = index 273 | s_end = s_begin + self.seq_len 274 | r_begin = s_end - self.label_len 275 | r_end = r_begin + self.label_len + self.pred_len 276 | 277 | 278 | seq_x = self.data_x[s_begin:s_end] 279 | seq_y = self.data_y[r_begin:r_end] 280 | seq_x_mark = self.data_stamp[s_begin:s_end] 281 | seq_y_mark = self.data_stamp[r_begin:r_end] 282 | # HI = self.data_y[r_begin-1:r_end-1] 283 | 284 | return seq_x, seq_y, seq_x_mark, seq_y_mark 285 | 286 | def __len__(self): 287 | return len(self.data_x) - self.seq_len - self.pred_len + 1 288 | 289 | def inverse_transform(self, data): 290 | return self.scaler.inverse_transform(data) 291 | 292 | 293 | class Dataset_Pred(Dataset): 294 | def __init__(self, root_path, flag='pred', size=None, 295 | features='S', data_path='ETTh1.csv', 296 | target='OT', scale=True, inverse=False, timeenc=0, freq='15min', cols=None): 297 | # size [seq_len, label_len, pred_len] 298 | # info 299 | if size == None: 300 | self.seq_len = 24 * 4 * 4 301 | self.label_len = 24 * 4 302 | self.pred_len = 24 * 4 303 | else: 304 | self.seq_len = size[0] 305 | self.label_len = size[1] 306 | self.pred_len = size[2] 307 | # init 308 | assert flag in ['pred'] 309 | 310 | self.features = features 311 | self.target = target 312 | self.scale = scale 313 | self.inverse = inverse 314 | self.timeenc = timeenc 315 | self.freq = freq 316 | self.cols = cols 317 | self.root_path = root_path 318 | self.data_path = data_path 319 | self.__read_data__() 320 | 321 | def __read_data__(self): 322 | self.scaler = StandardScaler() 323 | df_raw = pd.read_csv(os.path.join(self.root_path, 324 | self.data_path)) 325 | ''' 326 | df_raw.columns: ['date', ...(other features), target feature] 327 | ''' 328 | if self.cols: 329 | cols = self.cols.copy() 330 | cols.remove(self.target) 331 | else: 332 | cols = list(df_raw.columns) 333 | cols.remove(self.target) 334 | cols.remove('date') 335 | df_raw = df_raw[['date'] + cols + [self.target]] 336 | border1 = len(df_raw) - self.seq_len 337 | border2 = len(df_raw) 338 | 339 | if self.features == 'M' or self.features == 'MS': 340 | cols_data = df_raw.columns[1:] 341 | df_data = df_raw[cols_data] 342 | elif self.features == 'S': 343 | df_data = df_raw[[self.target]] 344 | 345 | if self.scale: 346 | self.scaler.fit(df_data.values) 347 | data = self.scaler.transform(df_data.values) 348 | else: 349 | data = df_data.values 350 | 351 | tmp_stamp = df_raw[['date']][border1:border2] 352 | tmp_stamp['date'] = pd.to_datetime(tmp_stamp.date) 353 | pred_dates = pd.date_range(tmp_stamp.date.values[-1], periods=self.pred_len + 1, freq=self.freq) 354 | 355 | df_stamp = pd.DataFrame(columns=['date']) 356 | df_stamp.date = list(tmp_stamp.date.values) + list(pred_dates[1:]) 357 | if self.timeenc == 0: 358 | df_stamp['month'] = df_stamp.date.apply(lambda row: row.month, 1) 359 | df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1) 360 | df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1) 361 | df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1) 362 | df_stamp['minute'] = df_stamp.date.apply(lambda row: row.minute, 1) 363 | df_stamp['minute'] = df_stamp.minute.map(lambda x: x // 15) 364 | data_stamp = df_stamp.drop(['date'], 1).values 365 | elif self.timeenc == 1: 366 | data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq) 367 | data_stamp = data_stamp.transpose(1, 0) 368 | 369 | self.data_x = data[border1:border2] 370 | if self.inverse: 371 | self.data_y = df_data.values[border1:border2] 372 | else: 373 | self.data_y = data[border1:border2] 374 | self.data_stamp = data_stamp 375 | 376 | def __getitem__(self, index): 377 | s_begin = index 378 | s_end = s_begin + self.seq_len 379 | r_begin = s_end - self.label_len 380 | r_end = r_begin + self.label_len + self.pred_len 381 | 382 | seq_x = self.data_x[s_begin:s_end] 383 | if self.inverse: 384 | seq_y = self.data_x[r_begin:r_begin + self.label_len] 385 | else: 386 | seq_y = self.data_y[r_begin:r_begin + self.label_len] 387 | seq_x_mark = self.data_stamp[s_begin:s_end] 388 | seq_y_mark = self.data_stamp[r_begin:r_end] 389 | 390 | return seq_x, seq_y, seq_x_mark, seq_y_mark 391 | 392 | def __len__(self): 393 | return len(self.data_x) - self.seq_len + 1 394 | 395 | def inverse_transform(self, data): 396 | return self.scaler.inverse_transform(data) 397 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: TSF_LSTF_Compare 2 | channels: 3 | - conda-forge 4 | - https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main 5 | - https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free 6 | - https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/r 7 | - https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/pytorch/ 8 | - https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/msys2 9 | - http://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/ 10 | - http://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/conda-forge/ 11 | - http://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/msys2/ 12 | - defaults 13 | dependencies: 14 | - brotli=1.0.9=h8ffe710_6 15 | - brotli-bin=1.0.9=h8ffe710_6 16 | - ca-certificates=2021.10.8=h5b45459_0 17 | - certifi=2021.10.8=py37h03978a9_2 18 | - cycler=0.11.0=pyhd8ed1ab_0 19 | - fonttools=4.31.2=py37hcc03f2d_0 20 | - freetype=2.10.4=h546665d_1 21 | - icu=69.1=h0e60522_0 22 | - intel-openmp=2022.0.0=h57928b3_3663 23 | - jbig=2.1=h8d14728_2003 24 | - joblib=1.1.0=pyhd8ed1ab_0 25 | - jpeg=9e=h8ffe710_0 26 | - kiwisolver=1.4.2=py37h8c56517_1 27 | - lcms2=2.12=h2a16943_0 28 | - lerc=3.0=h0e60522_0 29 | - libblas=3.9.0=13_win64_mkl 30 | - libbrotlicommon=1.0.9=h8ffe710_6 31 | - libbrotlidec=1.0.9=h8ffe710_6 32 | - libbrotlienc=1.0.9=h8ffe710_6 33 | - libcblas=3.9.0=13_win64_mkl 34 | - libclang=13.0.1=default_h81446c8_0 35 | - libdeflate=1.10=h8ffe710_0 36 | - liblapack=3.9.0=13_win64_mkl 37 | - libpng=1.6.37=h1d00b33_2 38 | - libtiff=4.3.0=hc4061b1_3 39 | - libwebp=1.2.2=h57928b3_0 40 | - libwebp-base=1.2.2=h8ffe710_1 41 | - libxcb=1.13=hcd874cb_1004 42 | - libzlib=1.2.11=h8ffe710_1014 43 | - lz4-c=1.9.3=h8ffe710_1 44 | - m2w64-gcc-libgfortran=5.3.0=6 45 | - m2w64-gcc-libs=5.3.0=7 46 | - m2w64-gcc-libs-core=5.3.0=7 47 | - m2w64-gmp=6.1.0=2 48 | - m2w64-libwinpthread-git=5.0.0.4634.697f757=2 49 | - matplotlib=3.5.1=py37h03978a9_0 50 | - matplotlib-base=3.5.1=py37h4a79c79_0 51 | - mkl=2022.0.0=h0e2418a_796 52 | - msys2-conda-epoch=20160418=1 53 | - munkres=1.1.4=pyh9f0ad1d_0 54 | - numpy=1.21.5=py37h5fa1a60_0 55 | - openjpeg=2.4.0=hb211442_1 56 | - openssl=1.1.1n=h8ffe710_0 57 | - packaging=21.3=pyhd8ed1ab_0 58 | - pandas=1.3.5=py37h9386db6_0 59 | - pillow=9.1.0=py37h8675073_0 60 | - pip=22.0.4=pyhd8ed1ab_0 61 | - pthread-stubs=0.4=hcd874cb_1001 62 | - pyparsing=3.0.7=pyhd8ed1ab_0 63 | - pyqt=5.12.3=py37h03978a9_8 64 | - pyqt-impl=5.12.3=py37hf2a7229_8 65 | - pyqt5-sip=4.19.18=py37hf2a7229_8 66 | - pyqtchart=5.12=py37hf2a7229_8 67 | - pyqtwebengine=5.12.1=py37hf2a7229_8 68 | - python=3.7.12=h7840368_100_cpython 69 | - python-dateutil=2.8.2=pyhd8ed1ab_0 70 | - python_abi=3.7=2_cp37m 71 | - pytz=2022.1=pyhd8ed1ab_0 72 | - qt=5.12.9=h556501e_6 73 | - scikit-learn=1.0.2=py37hcabfae0_0 74 | - scipy=1.7.3=py37hb6553fb_0 75 | - setuptools=59.8.0=py37h03978a9_1 76 | - six=1.16.0=pyh6c4a22f_0 77 | - sqlite=3.37.1=h8ffe710_0 78 | - tbb=2021.5.0=h2d74725_0 79 | - threadpoolctl=3.1.0=pyh8a188c0_0 80 | - tk=8.6.12=h8ffe710_0 81 | - tornado=6.1=py37hcc03f2d_3 82 | - typing-extensions=4.1.1=hd8ed1ab_0 83 | - typing_extensions=4.1.1=pyha770c72_0 84 | - ucrt=10.0.20348.0=h57928b3_0 85 | - unicodedata2=14.0.0=py37hcc03f2d_0 86 | - vc=14.2=hb210afc_6 87 | - vs2015_runtime=14.29.30037=h902a5da_6 88 | - wheel=0.37.1=pyhd8ed1ab_0 89 | - xorg-libxau=1.0.9=hcd874cb_0 90 | - xorg-libxdmcp=1.1.3=hcd874cb_0 91 | - xz=5.2.5=h62dcd97_1 92 | - zlib=1.2.11=h8ffe710_1014 93 | - zstd=1.5.2=h6255e5f_0 94 | - pip: 95 | - argon2-cffi==21.3.0 96 | - argon2-cffi-bindings==21.2.0 97 | - attrs==21.4.0 98 | - axial-positional-embedding==0.2.1 99 | - backcall==0.2.0 100 | - beautifulsoup4==4.11.1 101 | - bleach==5.0.0 102 | - cffi==1.15.0 103 | - colorama==0.4.4 104 | - debugpy==1.6.0 105 | - decorator==5.1.1 106 | - defusedxml==0.7.1 107 | - einops==0.4.1 108 | - entrypoints==0.4 109 | - fastjsonschema==2.15.3 110 | - importlib-metadata==4.11.3 111 | - importlib-resources==5.7.1 112 | - ipykernel==6.13.0 113 | - ipython==7.33.0 114 | - ipython-genutils==0.2.0 115 | - ipywidgets==7.7.0 116 | - jedi==0.18.1 117 | - jinja2==3.1.2 118 | - jsonschema==4.5.1 119 | - jupyter==1.0.0 120 | - jupyter-client==7.3.1 121 | - jupyter-console==6.4.3 122 | - jupyter-core==4.10.0 123 | - jupyterlab-pygments==0.2.2 124 | - jupyterlab-widgets==1.1.0 125 | - local-attention==1.4.3 126 | - markupsafe==2.1.1 127 | - matplotlib-inline==0.1.3 128 | - mistune==0.8.4 129 | - nbclient==0.6.3 130 | - nbconvert==6.5.0 131 | - nbformat==5.4.0 132 | - nest-asyncio==1.5.5 133 | - notebook==6.4.11 134 | - pandocfilters==1.5.0 135 | - parso==0.8.3 136 | - pickleshare==0.7.5 137 | - product-key-memory==0.1.10 138 | - prometheus-client==0.14.1 139 | - prompt-toolkit==3.0.29 140 | - psutil==5.9.1 141 | - pycparser==2.21 142 | - pygments==2.12.0 143 | - pyrsistent==0.18.1 144 | - pywin32==304 145 | - pywinpty==2.0.5 146 | - pyzmq==23.0.0 147 | - qtconsole==5.3.0 148 | - qtpy==2.1.0 149 | - reformer-pytorch==1.4.4 150 | - send2trash==1.8.0 151 | - soupsieve==2.3.2.post1 152 | - terminado==0.15.0 153 | - tinycss2==1.1.1 154 | - torch==1.9.0+cu111 155 | - torchaudio==0.9.0 156 | - torchvision==0.10.0+cu111 157 | - traitlets==5.2.1.post0 158 | - wcwidth==0.2.5 159 | - webencodings==0.5.1 160 | - widgetsnbextension==3.6.0 161 | - zipp==3.8.0 162 | prefix: E:\Anaconda\envs\TSF_LSTF_Compare 163 | -------------------------------------------------------------------------------- /exp/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Masterleia/TSF_LSTF_Compare/74945de299f4c91d4ef21ad822c562c57b301f1b/exp/__init__.py -------------------------------------------------------------------------------- /exp/exp_basic.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | import numpy as np 4 | 5 | 6 | class Exp_Basic(object): 7 | def __init__(self, args): 8 | self.args = args 9 | self.device = self._acquire_device() 10 | self.model = self._build_model().to(self.device) 11 | 12 | def _build_model(self): 13 | raise NotImplementedError 14 | return None 15 | 16 | def _acquire_device(self): 17 | if self.args.use_gpu: 18 | os.environ["CUDA_VISIBLE_DEVICES"] = str( 19 | self.args.gpu) if not self.args.use_multi_gpu else self.args.devices 20 | device = torch.device('cuda:{}'.format(self.args.gpu)) 21 | print('Use GPU: cuda:{}'.format(self.args.gpu)) 22 | else: 23 | device = torch.device('cpu') 24 | print('Use CPU') 25 | return device 26 | 27 | def _get_data(self): 28 | pass 29 | 30 | def vali(self): 31 | pass 32 | 33 | def train(self): 34 | pass 35 | 36 | def test(self): 37 | pass 38 | -------------------------------------------------------------------------------- /layers/AutoCorrelation.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | import matplotlib.pyplot as plt 5 | import numpy as np 6 | import math 7 | from math import sqrt 8 | import os 9 | 10 | 11 | class AutoCorrelation(nn.Module): 12 | """ 13 | AutoCorrelation Mechanism with the following two phases: 14 | (1) period-based dependencies discovery 15 | (2) time delay aggregation 16 | This block can replace the self-attention family mechanism seamlessly. 17 | """ 18 | def __init__(self, mask_flag=True, factor=1, scale=None, attention_dropout=0.1, output_attention=False): 19 | super(AutoCorrelation, self).__init__() 20 | self.factor = factor 21 | self.scale = scale 22 | self.mask_flag = mask_flag 23 | self.output_attention = output_attention 24 | self.dropout = nn.Dropout(attention_dropout) 25 | 26 | def time_delay_agg_training(self, values, corr): 27 | """ 28 | SpeedUp version of Autocorrelation (a batch-normalization style design) 29 | This is for the training phase. 30 | """ 31 | head = values.shape[1] 32 | channel = values.shape[2] 33 | length = values.shape[3] 34 | # find top k 35 | top_k = int(self.factor * math.log(length)) 36 | mean_value = torch.mean(torch.mean(corr, dim=1), dim=1) 37 | index = torch.topk(torch.mean(mean_value, dim=0), top_k, dim=-1)[1] 38 | weights = torch.stack([mean_value[:, index[i]] for i in range(top_k)], dim=-1) 39 | # update corr 40 | tmp_corr = torch.softmax(weights, dim=-1) 41 | # aggregation 42 | tmp_values = values 43 | delays_agg = torch.zeros_like(values).float() 44 | for i in range(top_k): 45 | pattern = torch.roll(tmp_values, -int(index[i]), -1) 46 | delays_agg = delays_agg + pattern * \ 47 | (tmp_corr[:, i].unsqueeze(1).unsqueeze(1).unsqueeze(1).repeat(1, head, channel, length)) 48 | return delays_agg 49 | 50 | def time_delay_agg_inference(self, values, corr): 51 | """ 52 | SpeedUp version of Autocorrelation (a batch-normalization style design) 53 | This is for the inference phase. 54 | """ 55 | batch = values.shape[0] 56 | head = values.shape[1] 57 | channel = values.shape[2] 58 | length = values.shape[3] 59 | # index init 60 | init_index = torch.arange(length).unsqueeze(0).unsqueeze(0).unsqueeze(0).repeat(batch, head, channel, 1).cuda() 61 | # find top k 62 | top_k = int(self.factor * math.log(length)) 63 | mean_value = torch.mean(torch.mean(corr, dim=1), dim=1) 64 | weights = torch.topk(mean_value, top_k, dim=-1)[0] 65 | delay = torch.topk(mean_value, top_k, dim=-1)[1] 66 | # update corr 67 | tmp_corr = torch.softmax(weights, dim=-1) 68 | # aggregation 69 | tmp_values = values.repeat(1, 1, 1, 2) 70 | delays_agg = torch.zeros_like(values).float() 71 | for i in range(top_k): 72 | tmp_delay = init_index + delay[:, i].unsqueeze(1).unsqueeze(1).unsqueeze(1).repeat(1, head, channel, length) 73 | pattern = torch.gather(tmp_values, dim=-1, index=tmp_delay) 74 | delays_agg = delays_agg + pattern * \ 75 | (tmp_corr[:, i].unsqueeze(1).unsqueeze(1).unsqueeze(1).repeat(1, head, channel, length)) 76 | return delays_agg 77 | 78 | def time_delay_agg_full(self, values, corr): 79 | """ 80 | Standard version of Autocorrelation 81 | """ 82 | batch = values.shape[0] 83 | head = values.shape[1] 84 | channel = values.shape[2] 85 | length = values.shape[3] 86 | # index init 87 | init_index = torch.arange(length).unsqueeze(0).unsqueeze(0).unsqueeze(0).repeat(batch, head, channel, 1).cuda() 88 | # find top k 89 | top_k = int(self.factor * math.log(length)) 90 | weights = torch.topk(corr, top_k, dim=-1)[0] 91 | delay = torch.topk(corr, top_k, dim=-1)[1] 92 | # update corr 93 | tmp_corr = torch.softmax(weights, dim=-1) 94 | # aggregation 95 | tmp_values = values.repeat(1, 1, 1, 2) 96 | delays_agg = torch.zeros_like(values).float() 97 | for i in range(top_k): 98 | tmp_delay = init_index + delay[..., i].unsqueeze(-1) 99 | pattern = torch.gather(tmp_values, dim=-1, index=tmp_delay) 100 | delays_agg = delays_agg + pattern * (tmp_corr[..., i].unsqueeze(-1)) 101 | return delays_agg 102 | 103 | def forward(self, queries, keys, values, attn_mask): 104 | B, L, H, E = queries.shape 105 | _, S, _, D = values.shape 106 | if L > S: 107 | zeros = torch.zeros_like(queries[:, :(L - S), :]).float() 108 | values = torch.cat([values, zeros], dim=1) 109 | keys = torch.cat([keys, zeros], dim=1) 110 | else: 111 | values = values[:, :L, :, :] 112 | keys = keys[:, :L, :, :] 113 | 114 | # period-based dependencies 115 | q_fft = torch.fft.rfft(queries.permute(0, 2, 3, 1).contiguous(), dim=-1) 116 | k_fft = torch.fft.rfft(keys.permute(0, 2, 3, 1).contiguous(), dim=-1) 117 | res = q_fft * torch.conj(k_fft) 118 | corr = torch.fft.irfft(res, dim=-1) 119 | 120 | # time delay agg 121 | if self.training: 122 | V = self.time_delay_agg_training(values.permute(0, 2, 3, 1).contiguous(), corr).permute(0, 3, 1, 2) 123 | else: 124 | V = self.time_delay_agg_inference(values.permute(0, 2, 3, 1).contiguous(), corr).permute(0, 3, 1, 2) 125 | 126 | if self.output_attention: 127 | return (V.contiguous(), corr.permute(0, 3, 1, 2)) 128 | else: 129 | return (V.contiguous(), None) 130 | 131 | 132 | class AutoCorrelationLayer(nn.Module): 133 | def __init__(self, correlation, d_model, n_heads, d_keys=None, 134 | d_values=None): 135 | super(AutoCorrelationLayer, self).__init__() 136 | 137 | d_keys = d_keys or (d_model // n_heads) 138 | d_values = d_values or (d_model // n_heads) 139 | 140 | self.inner_correlation = correlation 141 | self.query_projection = nn.Linear(d_model, d_keys * n_heads) 142 | self.key_projection = nn.Linear(d_model, d_keys * n_heads) 143 | self.value_projection = nn.Linear(d_model, d_values * n_heads) 144 | self.out_projection = nn.Linear(d_values * n_heads, d_model) 145 | self.n_heads = n_heads 146 | 147 | def forward(self, queries, keys, values, attn_mask): 148 | B, L, _ = queries.shape 149 | _, S, _ = keys.shape 150 | H = self.n_heads 151 | 152 | queries = self.query_projection(queries).view(B, L, H, -1) 153 | keys = self.key_projection(keys).view(B, S, H, -1) 154 | values = self.value_projection(values).view(B, S, H, -1) 155 | 156 | out, attn = self.inner_correlation( 157 | queries, 158 | keys, 159 | values, 160 | attn_mask 161 | ) 162 | out = out.view(B, L, -1) 163 | 164 | return self.out_projection(out), attn 165 | -------------------------------------------------------------------------------- /layers/Autoformer_EncDec.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | 6 | class my_Layernorm(nn.Module): 7 | """ 8 | Special designed layernorm for the seasonal part 9 | """ 10 | def __init__(self, channels): 11 | super(my_Layernorm, self).__init__() 12 | self.layernorm = nn.LayerNorm(channels) 13 | 14 | def forward(self, x): 15 | x_hat = self.layernorm(x) 16 | bias = torch.mean(x_hat, dim=1).unsqueeze(1).repeat(1, x.shape[1], 1) 17 | return x_hat - bias 18 | 19 | 20 | class moving_avg(nn.Module): 21 | """ 22 | Moving average block to highlight the trend of time series 23 | """ 24 | def __init__(self, kernel_size, stride): 25 | super(moving_avg, self).__init__() 26 | self.kernel_size = kernel_size 27 | self.avg = nn.AvgPool1d(kernel_size=kernel_size, stride=stride, padding=0) 28 | 29 | def forward(self, x): 30 | # padding on the both ends of time series 31 | front = x[:, 0:1, :].repeat(1, (self.kernel_size - 1) // 2, 1) 32 | end = x[:, -1:, :].repeat(1, (self.kernel_size - 1) // 2, 1) 33 | x = torch.cat([front, x, end], dim=1) 34 | x = self.avg(x.permute(0, 2, 1)) 35 | x = x.permute(0, 2, 1) 36 | return x 37 | 38 | 39 | class series_decomp(nn.Module): 40 | """ 41 | Series decomposition block 42 | """ 43 | def __init__(self, kernel_size): 44 | super(series_decomp, self).__init__() 45 | self.moving_avg = moving_avg(kernel_size, stride=1) 46 | 47 | def forward(self, x): 48 | moving_mean = self.moving_avg(x) 49 | res = x - moving_mean 50 | return res, moving_mean 51 | 52 | 53 | class EncoderLayer(nn.Module): 54 | """ 55 | Autoformer encoder layer with the progressive decomposition architecture 56 | """ 57 | def __init__(self, attention, d_model, d_ff=None, moving_avg=25, dropout=0.1, activation="relu"): 58 | super(EncoderLayer, self).__init__() 59 | d_ff = d_ff or 4 * d_model 60 | self.attention = attention 61 | self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1, bias=False) 62 | self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1, bias=False) 63 | self.decomp1 = series_decomp(moving_avg) 64 | self.decomp2 = series_decomp(moving_avg) 65 | self.dropout = nn.Dropout(dropout) 66 | self.activation = F.relu if activation == "relu" else F.gelu 67 | 68 | def forward(self, x, attn_mask=None): 69 | new_x, attn = self.attention( 70 | x, x, x, 71 | attn_mask=attn_mask 72 | ) 73 | x = x + self.dropout(new_x) 74 | x, _ = self.decomp1(x) 75 | y = x 76 | y = self.dropout(self.activation(self.conv1(y.transpose(-1, 1)))) 77 | y = self.dropout(self.conv2(y).transpose(-1, 1)) 78 | res, _ = self.decomp2(x + y) 79 | return res, attn 80 | 81 | 82 | class Encoder(nn.Module): 83 | """ 84 | Autoformer encoder 85 | """ 86 | def __init__(self, attn_layers, conv_layers=None, norm_layer=None): 87 | super(Encoder, self).__init__() 88 | self.attn_layers = nn.ModuleList(attn_layers) 89 | self.conv_layers = nn.ModuleList(conv_layers) if conv_layers is not None else None 90 | self.norm = norm_layer 91 | 92 | def forward(self, x, attn_mask=None): 93 | attns = [] 94 | if self.conv_layers is not None: 95 | for attn_layer, conv_layer in zip(self.attn_layers, self.conv_layers): 96 | x, attn = attn_layer(x, attn_mask=attn_mask) 97 | x = conv_layer(x) 98 | attns.append(attn) 99 | x, attn = self.attn_layers[-1](x) 100 | attns.append(attn) 101 | else: 102 | for attn_layer in self.attn_layers: 103 | x, attn = attn_layer(x, attn_mask=attn_mask) 104 | attns.append(attn) 105 | 106 | if self.norm is not None: 107 | x = self.norm(x) 108 | 109 | return x, attns 110 | 111 | 112 | class DecoderLayer(nn.Module): 113 | """ 114 | Autoformer decoder layer with the progressive decomposition architecture 115 | """ 116 | def __init__(self, self_attention, cross_attention, d_model, c_out, d_ff=None, 117 | moving_avg=25, dropout=0.1, activation="relu"): 118 | super(DecoderLayer, self).__init__() 119 | d_ff = d_ff or 4 * d_model 120 | self.self_attention = self_attention 121 | self.cross_attention = cross_attention 122 | self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1, bias=False) 123 | self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1, bias=False) 124 | self.decomp1 = series_decomp(moving_avg) 125 | self.decomp2 = series_decomp(moving_avg) 126 | self.decomp3 = series_decomp(moving_avg) 127 | self.dropout = nn.Dropout(dropout) 128 | self.projection = nn.Conv1d(in_channels=d_model, out_channels=c_out, kernel_size=3, stride=1, padding=1, 129 | padding_mode='circular', bias=False) 130 | self.activation = F.relu if activation == "relu" else F.gelu 131 | 132 | def forward(self, x, cross, x_mask=None, cross_mask=None): 133 | x = x + self.dropout(self.self_attention( 134 | x, x, x, 135 | attn_mask=x_mask 136 | )[0]) 137 | x, trend1 = self.decomp1(x) 138 | x = x + self.dropout(self.cross_attention( 139 | x, cross, cross, 140 | attn_mask=cross_mask 141 | )[0]) 142 | x, trend2 = self.decomp2(x) 143 | y = x 144 | y = self.dropout(self.activation(self.conv1(y.transpose(-1, 1)))) 145 | y = self.dropout(self.conv2(y).transpose(-1, 1)) 146 | x, trend3 = self.decomp3(x + y) 147 | 148 | residual_trend = trend1 + trend2 + trend3 149 | residual_trend = self.projection(residual_trend.permute(0, 2, 1)).transpose(1, 2) 150 | return x, residual_trend 151 | 152 | 153 | class Decoder(nn.Module): 154 | """ 155 | Autoformer encoder 156 | """ 157 | def __init__(self, layers, norm_layer=None, projection=None): 158 | super(Decoder, self).__init__() 159 | self.layers = nn.ModuleList(layers) 160 | self.norm = norm_layer 161 | self.projection = projection 162 | 163 | def forward(self, x, cross, x_mask=None, cross_mask=None, trend=None): 164 | for layer in self.layers: 165 | x, residual_trend = layer(x, cross, x_mask=x_mask, cross_mask=cross_mask) 166 | trend = trend + residual_trend 167 | 168 | if self.norm is not None: 169 | x = self.norm(x) 170 | 171 | if self.projection is not None: 172 | x = self.projection(x) 173 | return x, trend 174 | -------------------------------------------------------------------------------- /layers/Embed.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | from torch.nn.utils import weight_norm 5 | import math 6 | 7 | 8 | class PositionalEmbedding(nn.Module): 9 | def __init__(self, d_model, max_len=5000): 10 | super(PositionalEmbedding, self).__init__() 11 | # Compute the positional encodings once in log space. 12 | pe = torch.zeros(max_len, d_model).float() 13 | pe.require_grad = False 14 | 15 | position = torch.arange(0, max_len).float().unsqueeze(1) 16 | div_term = (torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model)).exp() 17 | 18 | pe[:, 0::2] = torch.sin(position * div_term) 19 | pe[:, 1::2] = torch.cos(position * div_term) 20 | 21 | pe = pe.unsqueeze(0) 22 | self.register_buffer('pe', pe) 23 | 24 | def forward(self, x): 25 | return self.pe[:, :x.size(1)] 26 | 27 | 28 | class TokenEmbedding(nn.Module): 29 | def __init__(self, c_in, d_model): 30 | super(TokenEmbedding, self).__init__() 31 | padding = 1 if torch.__version__ >= '1.5.0' else 2 32 | self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_model, 33 | kernel_size=3, padding=padding, padding_mode='circular', bias=False) 34 | for m in self.modules(): 35 | if isinstance(m, nn.Conv1d): 36 | nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='leaky_relu') 37 | 38 | def forward(self, x): 39 | x = self.tokenConv(x.permute(0, 2, 1)).transpose(1, 2) 40 | return x 41 | 42 | 43 | class FixedEmbedding(nn.Module): 44 | def __init__(self, c_in, d_model): 45 | super(FixedEmbedding, self).__init__() 46 | 47 | w = torch.zeros(c_in, d_model).float() 48 | w.require_grad = False 49 | 50 | position = torch.arange(0, c_in).float().unsqueeze(1) 51 | div_term = (torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model)).exp() 52 | 53 | w[:, 0::2] = torch.sin(position * div_term) 54 | w[:, 1::2] = torch.cos(position * div_term) 55 | 56 | self.emb = nn.Embedding(c_in, d_model) 57 | self.emb.weight = nn.Parameter(w, requires_grad=False) 58 | 59 | def forward(self, x): 60 | return self.emb(x).detach() 61 | 62 | 63 | class TemporalEmbedding(nn.Module): 64 | def __init__(self, d_model, embed_type='fixed', freq='h'): 65 | super(TemporalEmbedding, self).__init__() 66 | 67 | minute_size = 4 68 | hour_size = 24 69 | weekday_size = 7 70 | day_size = 32 71 | month_size = 13 72 | 73 | Embed = FixedEmbedding if embed_type == 'fixed' else nn.Embedding 74 | if freq == 't': 75 | self.minute_embed = Embed(minute_size, d_model) 76 | self.hour_embed = Embed(hour_size, d_model) 77 | self.weekday_embed = Embed(weekday_size, d_model) 78 | self.day_embed = Embed(day_size, d_model) 79 | self.month_embed = Embed(month_size, d_model) 80 | 81 | def forward(self, x): 82 | x = x.long() 83 | 84 | minute_x = self.minute_embed(x[:, :, 4]) if hasattr(self, 'minute_embed') else 0. 85 | hour_x = self.hour_embed(x[:, :, 3]) 86 | weekday_x = self.weekday_embed(x[:, :, 2]) 87 | day_x = self.day_embed(x[:, :, 1]) 88 | month_x = self.month_embed(x[:, :, 0]) 89 | 90 | return hour_x + weekday_x + day_x + month_x + minute_x 91 | 92 | 93 | class TimeFeatureEmbedding(nn.Module): 94 | def __init__(self, d_model, embed_type='timeF', freq='h'): 95 | super(TimeFeatureEmbedding, self).__init__() 96 | 97 | freq_map = {'h': 4, 't': 5, 's': 6, 'm': 1, 'a': 1, 'w': 2, 'd': 3, 'b': 3} 98 | d_inp = freq_map[freq] 99 | self.embed = nn.Linear(d_inp, d_model, bias=False) 100 | 101 | def forward(self, x): 102 | return self.embed(x) 103 | 104 | 105 | class DataEmbedding(nn.Module): 106 | def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropout=0.1): 107 | super(DataEmbedding, self).__init__() 108 | 109 | self.value_embedding = TokenEmbedding(c_in=c_in, d_model=d_model) 110 | self.position_embedding = PositionalEmbedding(d_model=d_model) 111 | self.temporal_embedding = TemporalEmbedding(d_model=d_model, embed_type=embed_type, 112 | freq=freq) if embed_type != 'timeF' else TimeFeatureEmbedding( 113 | d_model=d_model, embed_type=embed_type, freq=freq) 114 | self.dropout = nn.Dropout(p=dropout) 115 | 116 | def forward(self, x, x_mark): 117 | x = self.value_embedding(x) + self.temporal_embedding(x_mark) + self.position_embedding(x) 118 | return self.dropout(x) 119 | 120 | 121 | class DataEmbedding_wo_pos(nn.Module): 122 | def __init__(self, c_in, d_model, embed_type='fixed', freq='h', dropout=0.1): 123 | super(DataEmbedding_wo_pos, self).__init__() 124 | 125 | self.value_embedding = TokenEmbedding(c_in=c_in, d_model=d_model) 126 | self.position_embedding = PositionalEmbedding(d_model=d_model) 127 | self.temporal_embedding = TemporalEmbedding(d_model=d_model, embed_type=embed_type, 128 | freq=freq) if embed_type != 'timeF' else TimeFeatureEmbedding( 129 | d_model=d_model, embed_type=embed_type, freq=freq) 130 | self.dropout = nn.Dropout(p=dropout) 131 | 132 | def forward(self, x, x_mark): 133 | x = self.value_embedding(x) + self.temporal_embedding(x_mark) 134 | return self.dropout(x) 135 | -------------------------------------------------------------------------------- /layers/MTGNN_layer.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | import torch 3 | import torch.nn as nn 4 | from torch.nn import init 5 | import numbers 6 | import torch.nn.functional as F 7 | 8 | 9 | class nconv(nn.Module): 10 | def __init__(self): 11 | super(nconv,self).__init__() 12 | 13 | def forward(self,x, A): 14 | x = torch.einsum('ncwl,vw->ncvl',(x,A)) 15 | return x.contiguous() 16 | 17 | class dy_nconv(nn.Module): 18 | def __init__(self): 19 | super(dy_nconv,self).__init__() 20 | 21 | def forward(self,x, A): 22 | x = torch.einsum('ncvl,nvwl->ncwl',(x,A)) 23 | return x.contiguous() 24 | 25 | class linear(nn.Module): 26 | def __init__(self,c_in,c_out,bias=True): 27 | super(linear,self).__init__() 28 | self.mlp = torch.nn.Conv2d(c_in, c_out, kernel_size=(1, 1), padding=(0,0), stride=(1,1), bias=bias) 29 | 30 | def forward(self,x): 31 | return self.mlp(x) 32 | 33 | 34 | class prop(nn.Module): 35 | def __init__(self,c_in,c_out,gdep,dropout,alpha): 36 | super(prop, self).__init__() 37 | self.nconv = nconv() 38 | self.mlp = linear(c_in,c_out) 39 | self.gdep = gdep 40 | self.dropout = dropout 41 | self.alpha = alpha 42 | 43 | def forward(self,x,adj): 44 | adj = adj + torch.eye(adj.size(0)).to(x.device) 45 | d = adj.sum(1) 46 | h = x 47 | dv = d 48 | a = adj / dv.view(-1, 1) 49 | for i in range(self.gdep): 50 | h = self.alpha*x + (1-self.alpha)*self.nconv(h,a) 51 | ho = self.mlp(h) 52 | return ho 53 | 54 | 55 | class mixprop(nn.Module): 56 | def __init__(self,c_in,c_out,gdep,dropout,alpha): 57 | super(mixprop, self).__init__() 58 | self.nconv = nconv() 59 | self.mlp = linear((gdep+1)*c_in,c_out) 60 | self.gdep = gdep 61 | self.dropout = dropout 62 | self.alpha = alpha 63 | 64 | 65 | def forward(self,x,adj): 66 | adj = adj + torch.eye(adj.size(0)).to(x.device) 67 | d = adj.sum(1) 68 | h = x 69 | out = [h] 70 | a = adj / d.view(-1, 1) 71 | for i in range(self.gdep): 72 | h = self.alpha*x + (1-self.alpha)*self.nconv(h,a) 73 | out.append(h) 74 | ho = torch.cat(out,dim=1) 75 | ho = self.mlp(ho) 76 | return ho 77 | 78 | class dy_mixprop(nn.Module): 79 | def __init__(self,c_in,c_out,gdep,dropout,alpha): 80 | super(dy_mixprop, self).__init__() 81 | self.nconv = dy_nconv() 82 | self.mlp1 = linear((gdep+1)*c_in,c_out) 83 | self.mlp2 = linear((gdep+1)*c_in,c_out) 84 | 85 | self.gdep = gdep 86 | self.dropout = dropout 87 | self.alpha = alpha 88 | self.lin1 = linear(c_in,c_in) 89 | self.lin2 = linear(c_in,c_in) 90 | 91 | 92 | def forward(self,x): 93 | #adj = adj + torch.eye(adj.size(0)).to(x.device) 94 | #d = adj.sum(1) 95 | x1 = torch.tanh(self.lin1(x)) 96 | x2 = torch.tanh(self.lin2(x)) 97 | adj = self.nconv(x1.transpose(2,1),x2) 98 | adj0 = torch.softmax(adj, dim=2) 99 | adj1 = torch.softmax(adj.transpose(2,1), dim=2) 100 | 101 | h = x 102 | out = [h] 103 | for i in range(self.gdep): 104 | h = self.alpha*x + (1-self.alpha)*self.nconv(h,adj0) 105 | out.append(h) 106 | ho = torch.cat(out,dim=1) 107 | ho1 = self.mlp1(ho) 108 | 109 | 110 | h = x 111 | out = [h] 112 | for i in range(self.gdep): 113 | h = self.alpha * x + (1 - self.alpha) * self.nconv(h, adj1) 114 | out.append(h) 115 | ho = torch.cat(out, dim=1) 116 | ho2 = self.mlp2(ho) 117 | 118 | return ho1+ho2 119 | 120 | 121 | 122 | class dilated_1D(nn.Module): 123 | def __init__(self, cin, cout, dilation_factor=2): 124 | super(dilated_1D, self).__init__() 125 | self.tconv = nn.ModuleList() 126 | self.kernel_set = [2,3,6,7] 127 | self.tconv = nn.Conv2d(cin,cout,(1,7),dilation=(1,dilation_factor)) 128 | 129 | def forward(self,input): 130 | x = self.tconv(input) 131 | return x 132 | 133 | class dilated_inception(nn.Module): 134 | def __init__(self, cin, cout, dilation_factor=2): 135 | super(dilated_inception, self).__init__() 136 | self.tconv = nn.ModuleList() 137 | self.kernel_set = [2,3,6,7] 138 | cout = int(cout/len(self.kernel_set)) 139 | for kern in self.kernel_set: 140 | self.tconv.append(nn.Conv2d(cin,cout,(1,kern),dilation=(1,dilation_factor))) 141 | 142 | def forward(self,input): 143 | x = [] 144 | for i in range(len(self.kernel_set)): 145 | x.append(self.tconv[i](input)) 146 | for i in range(len(self.kernel_set)): 147 | x[i] = x[i][...,-x[-1].size(3):] 148 | x = torch.cat(x,dim=1) 149 | return x 150 | 151 | 152 | class graph_constructor(nn.Module): 153 | def __init__(self, nnodes, k, dim, device, alpha=3, static_feat=None): 154 | super(graph_constructor, self).__init__() 155 | self.nnodes = nnodes 156 | if static_feat is not None: 157 | xd = static_feat.shape[1] 158 | self.lin1 = nn.Linear(xd, dim) 159 | self.lin2 = nn.Linear(xd, dim) 160 | else: 161 | self.emb1 = nn.Embedding(nnodes, dim) 162 | self.emb2 = nn.Embedding(nnodes, dim) 163 | self.lin1 = nn.Linear(dim,dim) 164 | self.lin2 = nn.Linear(dim,dim) 165 | 166 | self.device = device 167 | self.k = k 168 | self.dim = dim 169 | self.alpha = alpha 170 | self.static_feat = static_feat 171 | 172 | def forward(self, idx): 173 | if self.static_feat is None: 174 | nodevec1 = self.emb1(idx) 175 | nodevec2 = self.emb2(idx) 176 | else: 177 | nodevec1 = self.static_feat[idx,:] 178 | nodevec2 = nodevec1 179 | 180 | nodevec1 = torch.tanh(self.alpha*self.lin1(nodevec1)) 181 | nodevec2 = torch.tanh(self.alpha*self.lin2(nodevec2)) 182 | 183 | a = torch.mm(nodevec1, nodevec2.transpose(1,0))-torch.mm(nodevec2, nodevec1.transpose(1,0)) 184 | adj = F.relu(torch.tanh(self.alpha*a)) 185 | mask = torch.zeros(idx.size(0), idx.size(0)).to(self.device) 186 | mask.fill_(float('0')) 187 | s1,t1 = (adj + torch.rand_like(adj)*0.01).topk(self.k,1) 188 | mask.scatter_(1,t1,s1.fill_(1)) 189 | adj = adj*mask 190 | return adj 191 | 192 | def fullA(self, idx): 193 | if self.static_feat is None: 194 | nodevec1 = self.emb1(idx) 195 | nodevec2 = self.emb2(idx) 196 | else: 197 | nodevec1 = self.static_feat[idx,:] 198 | nodevec2 = nodevec1 199 | 200 | nodevec1 = torch.tanh(self.alpha*self.lin1(nodevec1)) 201 | nodevec2 = torch.tanh(self.alpha*self.lin2(nodevec2)) 202 | 203 | a = torch.mm(nodevec1, nodevec2.transpose(1,0))-torch.mm(nodevec2, nodevec1.transpose(1,0)) 204 | adj = F.relu(torch.tanh(self.alpha*a)) 205 | return adj 206 | 207 | class graph_global(nn.Module): 208 | def __init__(self, nnodes, k, dim, device, alpha=3, static_feat=None): 209 | super(graph_global, self).__init__() 210 | self.nnodes = nnodes 211 | self.A = nn.Parameter(torch.randn(nnodes, nnodes).to(device), requires_grad=True).to(device) 212 | 213 | def forward(self, idx): 214 | return F.relu(self.A) 215 | 216 | 217 | class graph_undirected(nn.Module): 218 | def __init__(self, nnodes, k, dim, device, alpha=3, static_feat=None): 219 | super(graph_undirected, self).__init__() 220 | self.nnodes = nnodes 221 | if static_feat is not None: 222 | xd = static_feat.shape[1] 223 | self.lin1 = nn.Linear(xd, dim) 224 | else: 225 | self.emb1 = nn.Embedding(nnodes, dim) 226 | self.lin1 = nn.Linear(dim,dim) 227 | 228 | self.device = device 229 | self.k = k 230 | self.dim = dim 231 | self.alpha = alpha 232 | self.static_feat = static_feat 233 | 234 | def forward(self, idx): 235 | if self.static_feat is None: 236 | nodevec1 = self.emb1(idx) 237 | nodevec2 = self.emb1(idx) 238 | else: 239 | nodevec1 = self.static_feat[idx,:] 240 | nodevec2 = nodevec1 241 | 242 | nodevec1 = torch.tanh(self.alpha*self.lin1(nodevec1)) 243 | nodevec2 = torch.tanh(self.alpha*self.lin1(nodevec2)) 244 | 245 | a = torch.mm(nodevec1, nodevec2.transpose(1,0)) 246 | adj = F.relu(torch.tanh(self.alpha*a)) 247 | mask = torch.zeros(idx.size(0), idx.size(0)).to(self.device) 248 | mask.fill_(float('0')) 249 | s1,t1 = adj.topk(self.k,1) 250 | mask.scatter_(1,t1,s1.fill_(1)) 251 | adj = adj*mask 252 | return adj 253 | 254 | 255 | 256 | class graph_directed(nn.Module): 257 | def __init__(self, nnodes, k, dim, device, alpha=3, static_feat=None): 258 | super(graph_directed, self).__init__() 259 | self.nnodes = nnodes 260 | if static_feat is not None: 261 | xd = static_feat.shape[1] 262 | self.lin1 = nn.Linear(xd, dim) 263 | self.lin2 = nn.Linear(xd, dim) 264 | else: 265 | self.emb1 = nn.Embedding(nnodes, dim) 266 | self.emb2 = nn.Embedding(nnodes, dim) 267 | self.lin1 = nn.Linear(dim,dim) 268 | self.lin2 = nn.Linear(dim,dim) 269 | 270 | self.device = device 271 | self.k = k 272 | self.dim = dim 273 | self.alpha = alpha 274 | self.static_feat = static_feat 275 | 276 | def forward(self, idx): 277 | if self.static_feat is None: 278 | nodevec1 = self.emb1(idx) 279 | nodevec2 = self.emb2(idx) 280 | else: 281 | nodevec1 = self.static_feat[idx,:] 282 | nodevec2 = nodevec1 283 | 284 | nodevec1 = torch.tanh(self.alpha*self.lin1(nodevec1)) 285 | nodevec2 = torch.tanh(self.alpha*self.lin2(nodevec2)) 286 | 287 | a = torch.mm(nodevec1, nodevec2.transpose(1,0)) 288 | adj = F.relu(torch.tanh(self.alpha*a)) 289 | mask = torch.zeros(idx.size(0), idx.size(0)).to(self.device) 290 | mask.fill_(float('0')) 291 | s1,t1 = adj.topk(self.k,1) 292 | mask.scatter_(1,t1,s1.fill_(1)) 293 | adj = adj*mask 294 | return adj 295 | 296 | 297 | class LayerNorm(nn.Module): 298 | __constants__ = ['normalized_shape', 'weight', 'bias', 'eps', 'elementwise_affine'] 299 | def __init__(self, normalized_shape, eps=1e-5, elementwise_affine=True): 300 | super(LayerNorm, self).__init__() 301 | if isinstance(normalized_shape, numbers.Integral): 302 | normalized_shape = (normalized_shape,) 303 | self.normalized_shape = tuple(normalized_shape) 304 | self.eps = eps 305 | self.elementwise_affine = elementwise_affine 306 | if self.elementwise_affine: 307 | self.weight = nn.Parameter(torch.Tensor(*normalized_shape)) 308 | self.bias = nn.Parameter(torch.Tensor(*normalized_shape)) 309 | else: 310 | self.register_parameter('weight', None) 311 | self.register_parameter('bias', None) 312 | self.reset_parameters() 313 | 314 | 315 | def reset_parameters(self): 316 | if self.elementwise_affine: 317 | init.ones_(self.weight) 318 | init.zeros_(self.bias) 319 | 320 | def forward(self, input, idx): 321 | if self.elementwise_affine: 322 | return F.layer_norm(input, tuple(input.shape[1:]), self.weight[:,idx,:], self.bias[:,idx,:], self.eps) 323 | else: 324 | return F.layer_norm(input, tuple(input.shape[1:]), self.weight, self.bias, self.eps) 325 | 326 | def extra_repr(self): 327 | return '{normalized_shape}, eps={eps}, ' \ 328 | 'elementwise_affine={elementwise_affine}'.format(**self.__dict__) 329 | -------------------------------------------------------------------------------- /layers/SelfAttention_Family.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | import matplotlib.pyplot as plt 6 | 7 | import numpy as np 8 | import math 9 | from math import sqrt 10 | from utils.masking import TriangularCausalMask, ProbMask 11 | from reformer_pytorch import LSHSelfAttention 12 | import os 13 | 14 | 15 | class FullAttention(nn.Module): 16 | def __init__(self, mask_flag=True, factor=5, scale=None, attention_dropout=0.1, output_attention=False): 17 | super(FullAttention, self).__init__() 18 | self.scale = scale 19 | self.mask_flag = mask_flag 20 | self.output_attention = output_attention 21 | self.dropout = nn.Dropout(attention_dropout) 22 | 23 | def forward(self, queries, keys, values, attn_mask): 24 | B, L, H, E = queries.shape 25 | _, S, _, D = values.shape 26 | scale = self.scale or 1. / sqrt(E) 27 | 28 | scores = torch.einsum("blhe,bshe->bhls", queries, keys) 29 | 30 | if self.mask_flag: 31 | if attn_mask is None: 32 | attn_mask = TriangularCausalMask(B, L, device=queries.device) 33 | 34 | scores.masked_fill_(attn_mask.mask, -np.inf) 35 | 36 | A = self.dropout(torch.softmax(scale * scores, dim=-1)) 37 | V = torch.einsum("bhls,bshd->blhd", A, values) 38 | 39 | if self.output_attention: 40 | return (V.contiguous(), A) 41 | else: 42 | return (V.contiguous(), None) 43 | 44 | 45 | class ProbAttention(nn.Module): 46 | def __init__(self, mask_flag=True, factor=5, scale=None, attention_dropout=0.1, output_attention=False): 47 | super(ProbAttention, self).__init__() 48 | self.factor = factor 49 | self.scale = scale 50 | self.mask_flag = mask_flag 51 | self.output_attention = output_attention 52 | self.dropout = nn.Dropout(attention_dropout) 53 | 54 | def _prob_QK(self, Q, K, sample_k, n_top): # n_top: c*ln(L_q) 55 | # Q [B, H, L, D] 56 | B, H, L_K, E = K.shape 57 | _, _, L_Q, _ = Q.shape 58 | 59 | # calculate the sampled Q_K 60 | K_expand = K.unsqueeze(-3).expand(B, H, L_Q, L_K, E) 61 | index_sample = torch.randint(L_K, (L_Q, sample_k)) # real U = U_part(factor*ln(L_k))*L_q 62 | K_sample = K_expand[:, :, torch.arange(L_Q).unsqueeze(1), index_sample, :] 63 | Q_K_sample = torch.matmul(Q.unsqueeze(-2), K_sample.transpose(-2, -1)).squeeze() 64 | 65 | # find the Top_k query with sparisty measurement 66 | M = Q_K_sample.max(-1)[0] - torch.div(Q_K_sample.sum(-1), L_K) 67 | M_top = M.topk(n_top, sorted=False)[1] 68 | 69 | # use the reduced Q to calculate Q_K 70 | Q_reduce = Q[torch.arange(B)[:, None, None], 71 | torch.arange(H)[None, :, None], 72 | M_top, :] # factor*ln(L_q) 73 | Q_K = torch.matmul(Q_reduce, K.transpose(-2, -1)) # factor*ln(L_q)*L_k 74 | 75 | return Q_K, M_top 76 | 77 | def _get_initial_context(self, V, L_Q): 78 | B, H, L_V, D = V.shape 79 | if not self.mask_flag: 80 | # V_sum = V.sum(dim=-2) 81 | V_sum = V.mean(dim=-2) 82 | contex = V_sum.unsqueeze(-2).expand(B, H, L_Q, V_sum.shape[-1]).clone() 83 | else: # use mask 84 | assert (L_Q == L_V) # requires that L_Q == L_V, i.e. for self-attention only 85 | contex = V.cumsum(dim=-2) 86 | return contex 87 | 88 | def _update_context(self, context_in, V, scores, index, L_Q, attn_mask): 89 | B, H, L_V, D = V.shape 90 | 91 | if self.mask_flag: 92 | attn_mask = ProbMask(B, H, L_Q, index, scores, device=V.device) 93 | scores.masked_fill_(attn_mask.mask, -np.inf) 94 | 95 | attn = torch.softmax(scores, dim=-1) # nn.Softmax(dim=-1)(scores) 96 | 97 | context_in[torch.arange(B)[:, None, None], 98 | torch.arange(H)[None, :, None], 99 | index, :] = torch.matmul(attn, V).type_as(context_in) 100 | if self.output_attention: 101 | attns = (torch.ones([B, H, L_V, L_V]) / L_V).type_as(attn).to(attn.device) 102 | attns[torch.arange(B)[:, None, None], torch.arange(H)[None, :, None], index, :] = attn 103 | return (context_in, attns) 104 | else: 105 | return (context_in, None) 106 | 107 | def forward(self, queries, keys, values, attn_mask): 108 | B, L_Q, H, D = queries.shape 109 | _, L_K, _, _ = keys.shape 110 | 111 | queries = queries.transpose(2, 1) 112 | keys = keys.transpose(2, 1) 113 | values = values.transpose(2, 1) 114 | 115 | U_part = self.factor * np.ceil(np.log(L_K)).astype('int').item() # c*ln(L_k) 116 | u = self.factor * np.ceil(np.log(L_Q)).astype('int').item() # c*ln(L_q) 117 | 118 | U_part = U_part if U_part < L_K else L_K 119 | u = u if u < L_Q else L_Q 120 | 121 | scores_top, index = self._prob_QK(queries, keys, sample_k=U_part, n_top=u) 122 | 123 | # add scale factor 124 | scale = self.scale or 1. / sqrt(D) 125 | if scale is not None: 126 | scores_top = scores_top * scale 127 | # get the context 128 | context = self._get_initial_context(values, L_Q) 129 | # update the context with selected top_k queries 130 | context, attn = self._update_context(context, values, scores_top, index, L_Q, attn_mask) 131 | 132 | return context.contiguous(), attn 133 | 134 | 135 | class AttentionLayer(nn.Module): 136 | def __init__(self, attention, d_model, n_heads, d_keys=None, 137 | d_values=None): 138 | super(AttentionLayer, self).__init__() 139 | 140 | d_keys = d_keys or (d_model // n_heads) 141 | d_values = d_values or (d_model // n_heads) 142 | 143 | self.inner_attention = attention 144 | self.query_projection = nn.Linear(d_model, d_keys * n_heads) 145 | self.key_projection = nn.Linear(d_model, d_keys * n_heads) 146 | self.value_projection = nn.Linear(d_model, d_values * n_heads) 147 | self.out_projection = nn.Linear(d_values * n_heads, d_model) 148 | self.n_heads = n_heads 149 | 150 | def forward(self, queries, keys, values, attn_mask): 151 | B, L, _ = queries.shape 152 | _, S, _ = keys.shape 153 | H = self.n_heads 154 | 155 | queries = self.query_projection(queries).view(B, L, H, -1) 156 | keys = self.key_projection(keys).view(B, S, H, -1) 157 | values = self.value_projection(values).view(B, S, H, -1) 158 | 159 | out, attn = self.inner_attention( 160 | queries, 161 | keys, 162 | values, 163 | attn_mask 164 | ) 165 | out = out.view(B, L, -1) 166 | 167 | return self.out_projection(out), attn 168 | 169 | 170 | class ReformerLayer(nn.Module): 171 | def __init__(self, attention, d_model, n_heads, d_keys=None, 172 | d_values=None, causal=False, bucket_size=4, n_hashes=4): 173 | super().__init__() 174 | self.bucket_size = bucket_size 175 | self.attn = LSHSelfAttention( 176 | dim=d_model, 177 | heads=n_heads, 178 | bucket_size=bucket_size, 179 | n_hashes=n_hashes, 180 | causal=causal 181 | ) 182 | 183 | def fit_length(self, queries): 184 | # inside reformer: assert N % (bucket_size * 2) == 0 185 | B, N, C = queries.shape 186 | if N % (self.bucket_size * 2) == 0: 187 | return queries 188 | else: 189 | # fill the time series 190 | fill_len = (self.bucket_size * 2) - (N % (self.bucket_size * 2)) 191 | return torch.cat([queries, torch.zeros([B, fill_len, C]).to(queries.device)], dim=1) 192 | 193 | def forward(self, queries, keys, values, attn_mask): 194 | # in Reformer: defalut queries=keys 195 | B, N, C = queries.shape 196 | queries = self.attn(self.fit_length(queries))[:, :N, :] 197 | return queries, None 198 | -------------------------------------------------------------------------------- /layers/Transformer_EncDec.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | 6 | class ConvLayer(nn.Module): 7 | def __init__(self, c_in): 8 | super(ConvLayer, self).__init__() 9 | self.downConv = nn.Conv1d(in_channels=c_in, 10 | out_channels=c_in, 11 | kernel_size=3, 12 | padding=2, 13 | padding_mode='circular') 14 | self.norm = nn.BatchNorm1d(c_in) 15 | self.activation = nn.ELU() 16 | self.maxPool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1) 17 | 18 | def forward(self, x): 19 | x = self.downConv(x.permute(0, 2, 1)) 20 | x = self.norm(x) 21 | x = self.activation(x) 22 | x = self.maxPool(x) 23 | x = x.transpose(1, 2) 24 | return x 25 | 26 | 27 | class EncoderLayer(nn.Module): 28 | def __init__(self, attention, d_model, d_ff=None, dropout=0.1, activation="relu"): 29 | super(EncoderLayer, self).__init__() 30 | d_ff = d_ff or 4 * d_model 31 | self.attention = attention 32 | self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1) 33 | self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1) 34 | self.norm1 = nn.LayerNorm(d_model) 35 | self.norm2 = nn.LayerNorm(d_model) 36 | self.dropout = nn.Dropout(dropout) 37 | self.activation = F.relu if activation == "relu" else F.gelu 38 | 39 | def forward(self, x, attn_mask=None): 40 | new_x, attn = self.attention( 41 | x, x, x, 42 | attn_mask=attn_mask 43 | ) 44 | x = x + self.dropout(new_x) 45 | 46 | y = x = self.norm1(x) 47 | y = self.dropout(self.activation(self.conv1(y.transpose(-1, 1)))) 48 | y = self.dropout(self.conv2(y).transpose(-1, 1)) 49 | 50 | return self.norm2(x + y), attn 51 | 52 | 53 | class Encoder(nn.Module): 54 | def __init__(self, attn_layers, conv_layers=None, norm_layer=None): 55 | super(Encoder, self).__init__() 56 | self.attn_layers = nn.ModuleList(attn_layers) 57 | self.conv_layers = nn.ModuleList(conv_layers) if conv_layers is not None else None 58 | self.norm = norm_layer 59 | 60 | def forward(self, x, attn_mask=None): 61 | # x [B, L, D] 62 | attns = [] 63 | if self.conv_layers is not None: 64 | for attn_layer, conv_layer in zip(self.attn_layers, self.conv_layers): 65 | x, attn = attn_layer(x, attn_mask=attn_mask) 66 | x = conv_layer(x) 67 | attns.append(attn) 68 | x, attn = self.attn_layers[-1](x) 69 | attns.append(attn) 70 | else: 71 | for attn_layer in self.attn_layers: 72 | x, attn = attn_layer(x, attn_mask=attn_mask) 73 | attns.append(attn) 74 | 75 | if self.norm is not None: 76 | x = self.norm(x) 77 | 78 | return x, attns 79 | 80 | 81 | class DecoderLayer(nn.Module): 82 | def __init__(self, self_attention, cross_attention, d_model, d_ff=None, 83 | dropout=0.1, activation="relu"): 84 | super(DecoderLayer, self).__init__() 85 | d_ff = d_ff or 4 * d_model 86 | self.self_attention = self_attention 87 | self.cross_attention = cross_attention 88 | self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1) 89 | self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1) 90 | self.norm1 = nn.LayerNorm(d_model) 91 | self.norm2 = nn.LayerNorm(d_model) 92 | self.norm3 = nn.LayerNorm(d_model) 93 | self.dropout = nn.Dropout(dropout) 94 | self.activation = F.relu if activation == "relu" else F.gelu 95 | 96 | def forward(self, x, cross, x_mask=None, cross_mask=None): 97 | x = x + self.dropout(self.self_attention( 98 | x, x, x, 99 | attn_mask=x_mask 100 | )[0]) 101 | x = self.norm1(x) 102 | 103 | x = x + self.dropout(self.cross_attention( 104 | x, cross, cross, 105 | attn_mask=cross_mask 106 | )[0]) 107 | 108 | y = x = self.norm2(x) 109 | y = self.dropout(self.activation(self.conv1(y.transpose(-1, 1)))) 110 | y = self.dropout(self.conv2(y).transpose(-1, 1)) 111 | 112 | return self.norm3(x + y) 113 | 114 | 115 | class Decoder(nn.Module): 116 | def __init__(self, layers, norm_layer=None, projection=None): 117 | super(Decoder, self).__init__() 118 | self.layers = nn.ModuleList(layers) 119 | self.norm = norm_layer 120 | self.projection = projection 121 | 122 | def forward(self, x, cross, x_mask=None, cross_mask=None): 123 | for layer in self.layers: 124 | x = layer(x, cross, x_mask=x_mask, cross_mask=cross_mask) 125 | 126 | if self.norm is not None: 127 | x = self.norm(x) 128 | 129 | if self.projection is not None: 130 | x = self.projection(x) 131 | return x 132 | -------------------------------------------------------------------------------- /layers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Masterleia/TSF_LSTF_Compare/74945de299f4c91d4ef21ad822c562c57b301f1b/layers/__init__.py -------------------------------------------------------------------------------- /models/Autoformer.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | from layers.Embed import DataEmbedding, DataEmbedding_wo_pos 5 | from layers.AutoCorrelation import AutoCorrelation, AutoCorrelationLayer 6 | from layers.Autoformer_EncDec import Encoder, Decoder, EncoderLayer, DecoderLayer, my_Layernorm, series_decomp 7 | import math 8 | import numpy as np 9 | 10 | 11 | class Model(nn.Module): 12 | 13 | def __init__(self, configs): 14 | super(Model, self).__init__() 15 | self.seq_len = configs.seq_len 16 | self.label_len = configs.label_len 17 | self.pred_len = configs.pred_len 18 | self.output_attention = configs.output_attention 19 | 20 | # Decomp 21 | kernel_size = configs.moving_avg 22 | self.decomp = series_decomp(kernel_size) 23 | 24 | 25 | self.enc_embedding = DataEmbedding_wo_pos(configs.enc_in, configs.d_model, configs.embed, configs.freq, 26 | configs.dropout) 27 | self.dec_embedding = DataEmbedding_wo_pos(configs.dec_in, configs.d_model, configs.embed, configs.freq, 28 | configs.dropout) 29 | 30 | # Encoder 31 | self.encoder = Encoder( 32 | [ 33 | EncoderLayer( 34 | AutoCorrelationLayer( 35 | AutoCorrelation(False, configs.factor, attention_dropout=configs.dropout, 36 | output_attention=configs.output_attention), 37 | configs.d_model, configs.n_heads), 38 | configs.d_model, 39 | configs.d_ff, 40 | moving_avg=configs.moving_avg, 41 | dropout=configs.dropout, 42 | activation=configs.activation 43 | ) for l in range(configs.e_layers) 44 | ], 45 | norm_layer=my_Layernorm(configs.d_model) 46 | ) 47 | # Decoder 48 | self.decoder = Decoder( 49 | [ 50 | DecoderLayer( 51 | AutoCorrelationLayer( 52 | AutoCorrelation(True, configs.factor, attention_dropout=configs.dropout, 53 | output_attention=False), 54 | configs.d_model, configs.n_heads), 55 | AutoCorrelationLayer( 56 | AutoCorrelation(False, configs.factor, attention_dropout=configs.dropout, 57 | output_attention=False), 58 | configs.d_model, configs.n_heads), 59 | configs.d_model, 60 | configs.c_out, 61 | configs.d_ff, 62 | moving_avg=configs.moving_avg, 63 | dropout=configs.dropout, 64 | activation=configs.activation, 65 | ) 66 | for l in range(configs.d_layers) 67 | ], 68 | norm_layer=my_Layernorm(configs.d_model), 69 | projection=nn.Linear(configs.d_model, configs.c_out, bias=True) 70 | ) 71 | 72 | def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, 73 | enc_self_mask=None, dec_self_mask=None, dec_enc_mask=None): 74 | # decomp init 75 | mean = torch.mean(x_enc, dim=1).unsqueeze(1).repeat(1, self.pred_len, 1) 76 | zeros = torch.zeros([x_dec.shape[0], self.pred_len, x_dec.shape[2]], device=x_enc.device) 77 | seasonal_init, trend_init = self.decomp(x_enc) 78 | # decoder input 79 | trend_init = torch.cat([trend_init[:, -self.label_len:, :], mean], dim=1) 80 | seasonal_init = torch.cat([seasonal_init[:, -self.label_len:, :], zeros], dim=1) 81 | # enc 82 | enc_out = self.enc_embedding(x_enc, x_mark_enc) 83 | enc_out, attns = self.encoder(enc_out, attn_mask=enc_self_mask) 84 | # dec 85 | dec_out = self.dec_embedding(seasonal_init, x_mark_dec) 86 | seasonal_part, trend_part = self.decoder(dec_out, enc_out, x_mask=dec_self_mask, cross_mask=dec_enc_mask, 87 | trend=trend_init) 88 | # final 89 | dec_out = trend_part + seasonal_part 90 | 91 | if self.output_attention: 92 | return dec_out[:, -self.pred_len:, :], attns 93 | else: 94 | return dec_out[:, -self.pred_len:, :] # [B, L, D] 95 | -------------------------------------------------------------------------------- /models/Graph_WaveNet.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import torch 4 | import torch.nn as nn 5 | import torch.nn.functional as F 6 | from torch.autograd import Variable 7 | import sys 8 | 9 | 10 | class nconv(nn.Module): 11 | def __init__(self): 12 | super(nconv,self).__init__() 13 | 14 | def forward(self,x, A): 15 | x = torch.einsum('ncvl,vw->ncwl',(x,A)) 16 | return x.contiguous() 17 | 18 | class linear(nn.Module): 19 | def __init__(self,c_in,c_out): 20 | super(linear,self).__init__() 21 | self.mlp = torch.nn.Conv2d(c_in, c_out, kernel_size=(1, 1), padding=(0,0), stride=(1,1), bias=True) 22 | 23 | def forward(self,x): 24 | return self.mlp(x) 25 | 26 | class gcn(nn.Module): 27 | def __init__(self,c_in,c_out,dropout,support_len=3,order=2): 28 | super(gcn,self).__init__() 29 | self.nconv = nconv() 30 | c_in = (order*support_len+1)*c_in 31 | self.mlp = linear(c_in,c_out) 32 | self.dropout = dropout 33 | self.order = order 34 | 35 | def forward(self,x,support): 36 | out = [x] 37 | for a in support: 38 | x1 = self.nconv(x,a) 39 | out.append(x1) 40 | for k in range(2, self.order + 1): 41 | x2 = self.nconv(x1,a) 42 | out.append(x2) 43 | x1 = x2 44 | 45 | h = torch.cat(out,dim=1) 46 | h = self.mlp(h) 47 | h = F.dropout(h, self.dropout, training=self.training) 48 | return h 49 | 50 | 51 | class Model(nn.Module): 52 | def __init__(self, configs, num_nodes=207, dropout=0.3, supports=None, gcn_bool=False, addaptadj=True, aptinit=None, in_dim=7,out_dim=12,residual_channels=32,dilation_channels=32,skip_channels=256,end_channels=512,kernel_size=2,blocks=32,layers=2): 53 | super(Model, self).__init__() 54 | blocks = int(configs.seq_len / 3) 55 | self.dropout = dropout 56 | self.blocks = blocks 57 | self.layers = layers 58 | self.gcn_bool = gcn_bool 59 | self.addaptadj = addaptadj 60 | # self.device = configs.devices 61 | if configs.use_gpu: 62 | os.environ["CUDA_VISIBLE_DEVICES"] = str( 63 | configs.gpu) if not configs.use_multi_gpu else configs.devices 64 | device = torch.device('cuda:{}'.format(configs.gpu)) 65 | else: 66 | device = torch.device('cuda:0') 67 | in_dim = 1 68 | out_dim = configs.pred_len 69 | 70 | self.filter_convs = nn.ModuleList() 71 | self.gate_convs = nn.ModuleList() 72 | self.residual_convs = nn.ModuleList() 73 | self.skip_convs = nn.ModuleList() 74 | self.bn = nn.ModuleList() 75 | self.gconv = nn.ModuleList() 76 | 77 | self.start_conv = nn.Conv2d(in_channels=in_dim, 78 | out_channels=residual_channels, 79 | kernel_size=(1,1)) 80 | self.supports = supports 81 | 82 | receptive_field = 1 83 | 84 | self.supports_len = 0 85 | if supports is not None: 86 | self.supports_len += len(supports) 87 | 88 | if gcn_bool and addaptadj: 89 | if aptinit is None: 90 | if supports is None: 91 | self.supports = [] 92 | self.nodevec1 = nn.Parameter(torch.randn(num_nodes, 10).to(device), requires_grad=True).to(device) 93 | self.nodevec2 = nn.Parameter(torch.randn(10, num_nodes).to(device), requires_grad=True).to(device) 94 | self.supports_len +=1 95 | else: 96 | if supports is None: 97 | self.supports = [] 98 | m, p, n = torch.svd(aptinit) 99 | initemb1 = torch.mm(m[:, :10], torch.diag(p[:10] ** 0.5)) 100 | initemb2 = torch.mm(torch.diag(p[:10] ** 0.5), n[:, :10].t()) 101 | self.nodevec1 = nn.Parameter(initemb1, requires_grad=True).to(device) 102 | self.nodevec2 = nn.Parameter(initemb2, requires_grad=True).to(device) 103 | self.supports_len += 1 104 | 105 | 106 | 107 | 108 | for b in range(blocks): 109 | additional_scope = kernel_size - 1 110 | new_dilation = 1 111 | for i in range(layers): 112 | # dilated convolutions 113 | self.filter_convs.append(nn.Conv2d(in_channels=residual_channels, 114 | out_channels=dilation_channels, 115 | kernel_size=(1,kernel_size),dilation=new_dilation)) 116 | 117 | self.gate_convs.append(nn.Conv1d(in_channels=residual_channels, 118 | out_channels=dilation_channels, 119 | kernel_size=(1, kernel_size), dilation=new_dilation)) 120 | 121 | # 1x1 convolution for residual connection 122 | self.residual_convs.append(nn.Conv1d(in_channels=dilation_channels, 123 | out_channels=residual_channels, 124 | kernel_size=(1, 1))) 125 | 126 | # 1x1 convolution for skip connection 127 | self.skip_convs.append(nn.Conv1d(in_channels=dilation_channels, 128 | out_channels=skip_channels, 129 | kernel_size=(1, 1))) 130 | self.bn.append(nn.BatchNorm2d(residual_channels)) 131 | new_dilation *=2 132 | receptive_field += additional_scope 133 | additional_scope *= 2 134 | if self.gcn_bool: 135 | self.gconv.append(gcn(dilation_channels,residual_channels,dropout,support_len=self.supports_len)) 136 | 137 | 138 | 139 | self.end_conv_1 = nn.Conv2d(in_channels=skip_channels, 140 | out_channels=end_channels, 141 | kernel_size=(1,1), 142 | bias=True) 143 | 144 | self.end_conv_2 = nn.Conv2d(in_channels=end_channels, 145 | out_channels=out_dim, 146 | kernel_size=(1,1), 147 | bias=True) 148 | 149 | self.receptive_field = receptive_field 150 | 151 | 152 | 153 | def forward(self, input): 154 | in_len = input.size(3) 155 | if in_len 0): 24 | self.GRUskip = nn.GRU(self.hidC, self.hidS) 25 | self.linear1 = nn.Linear(self.hidR + self.skip * self.hidS, self.m * self.pre) 26 | else: 27 | self.linear1 = nn.Linear(self.hidR, self.m * self.pre) 28 | if (self.hw > 0): 29 | self.highway = nn.Linear(self.hw, self.pre) 30 | self.output = None 31 | if (args.output_fun == 'sigmoid'): 32 | self.output = F.sigmoid 33 | if (args.output_fun == 'tanh'): 34 | self.output = F.tanh 35 | 36 | def forward(self, x): 37 | batch_size = x.size(0) 38 | 39 | # CNN 40 | c = x.view(-1, 1, self.P, self.m) 41 | c = F.relu(self.conv1(c)) 42 | c = self.dropout(c) 43 | c = torch.squeeze(c, 3) 44 | 45 | # RNN 46 | r = c.permute(2, 0, 1).contiguous() 47 | _, r = self.GRU1(r) 48 | r = self.dropout(torch.squeeze(r, 0)) 49 | # r = r.view(batch_size*self.m, -1) 50 | 51 | # skip-rnn 52 | 53 | if (self.skip > 0): 54 | s = c[:, :, int(-self.pt * self.skip):].contiguous() 55 | s = s.view(batch_size, self.hidC, self.pt, self.skip) 56 | s = s.permute(2, 0, 3, 1).contiguous() 57 | s = s.view(self.pt, batch_size * self.skip, self.hidC) 58 | _, s = self.GRUskip(s) 59 | s = s.view(batch_size, self.skip * self.hidS) 60 | s = self.dropout(s) 61 | # s = s.view(batch_size*self.m, -1) 62 | r = torch.cat((r, s), 1) 63 | 64 | res = self.linear1(r) 65 | 66 | # highway 67 | if (self.hw > 0): 68 | z = x[:, -self.hw:, :] 69 | z = z.permute(0, 2, 1).contiguous().view(-1, self.hw) 70 | z = self.highway(z) 71 | z = z.view(-1, self.m * self.pre) 72 | res = res + z 73 | 74 | res = res.view(batch_size, self.pre, self.m) 75 | 76 | if (self.output): 77 | res = self.output(res) 78 | return res 79 | 80 | 81 | 82 | 83 | -------------------------------------------------------------------------------- /models/MTGNN.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import torch 4 | import torch.nn as nn 5 | import torch.nn.functional as F 6 | 7 | 8 | from layers.MTGNN_layer import * 9 | 10 | 11 | class Model(nn.Module): 12 | def __init__(self, args, predefined_A=None, static_feat=None, dropout=0.3, subgraph_size=20, node_dim=40, dilation_exponential=1, conv_channels=32, residual_channels=32, skip_channels=64, end_channels=128, seq_length=12, in_dim=1, out_dim=12, layers=3, propalpha=0.05, tanhalpha=3, layer_norm_affline=True): 13 | super(Model, self).__init__() 14 | seq_length = args.seq_len 15 | out_dim = args.pred_len 16 | layers = int(seq_length/6) 17 | num_nodes = args.enc_in 18 | gcn_depth = 2 19 | buildA_true = True 20 | gcn_true = True 21 | device = torch.device('cuda:0') 22 | subgraph_size = args.enc_in 23 | 24 | 25 | self.gcn_true = gcn_true 26 | self.buildA_true = buildA_true 27 | self.num_nodes = num_nodes 28 | self.dropout = dropout 29 | self.predefined_A = predefined_A 30 | self.filter_convs = nn.ModuleList() 31 | self.gate_convs = nn.ModuleList() 32 | self.residual_convs = nn.ModuleList() 33 | self.skip_convs = nn.ModuleList() 34 | self.gconv1 = nn.ModuleList() 35 | self.gconv2 = nn.ModuleList() 36 | self.norm = nn.ModuleList() 37 | self.start_conv = nn.Conv2d(in_channels=in_dim, 38 | out_channels=residual_channels, 39 | kernel_size=(1, 1)) 40 | self.gc = graph_constructor(num_nodes, subgraph_size, node_dim, device, alpha=tanhalpha, static_feat=static_feat) 41 | 42 | self.seq_length = seq_length 43 | kernel_size = 7 44 | if dilation_exponential>1: 45 | self.receptive_field = int(1+(kernel_size-1)*(dilation_exponential**layers-1)/(dilation_exponential-1)) 46 | else: 47 | self.receptive_field = layers*(kernel_size-1) + 1 48 | 49 | for i in range(1): 50 | if dilation_exponential>1: 51 | rf_size_i = int(1 + i*(kernel_size-1)*(dilation_exponential**layers-1)/(dilation_exponential-1)) 52 | else: 53 | rf_size_i = i*layers*(kernel_size-1)+1 54 | new_dilation = 1 55 | for j in range(1,layers+1): 56 | if dilation_exponential > 1: 57 | rf_size_j = int(rf_size_i + (kernel_size-1)*(dilation_exponential**j-1)/(dilation_exponential-1)) 58 | else: 59 | rf_size_j = rf_size_i+j*(kernel_size-1) 60 | 61 | self.filter_convs.append(dilated_inception(residual_channels, conv_channels, dilation_factor=new_dilation)) 62 | self.gate_convs.append(dilated_inception(residual_channels, conv_channels, dilation_factor=new_dilation)) 63 | self.residual_convs.append(nn.Conv2d(in_channels=conv_channels, 64 | out_channels=residual_channels, 65 | kernel_size=(1, 1))) 66 | if self.seq_length>self.receptive_field: 67 | self.skip_convs.append(nn.Conv2d(in_channels=conv_channels, 68 | out_channels=skip_channels, 69 | kernel_size=(1, self.seq_length-rf_size_j+1))) 70 | else: 71 | self.skip_convs.append(nn.Conv2d(in_channels=conv_channels, 72 | out_channels=skip_channels, 73 | kernel_size=(1, self.receptive_field-rf_size_j+1))) 74 | 75 | if self.gcn_true: 76 | self.gconv1.append(mixprop(conv_channels, residual_channels, gcn_depth, dropout, propalpha)) 77 | self.gconv2.append(mixprop(conv_channels, residual_channels, gcn_depth, dropout, propalpha)) 78 | 79 | if self.seq_length>self.receptive_field: 80 | self.norm.append(LayerNorm((residual_channels, num_nodes, self.seq_length - rf_size_j + 1),elementwise_affine=layer_norm_affline)) 81 | else: 82 | self.norm.append(LayerNorm((residual_channels, num_nodes, self.receptive_field - rf_size_j + 1),elementwise_affine=layer_norm_affline)) 83 | 84 | new_dilation *= dilation_exponential 85 | 86 | self.layers = layers 87 | self.end_conv_1 = nn.Conv2d(in_channels=skip_channels, 88 | out_channels=end_channels, 89 | kernel_size=(1,1), 90 | bias=True) 91 | self.end_conv_2 = nn.Conv2d(in_channels=end_channels, 92 | out_channels=out_dim, 93 | kernel_size=(1,1), 94 | bias=True) 95 | if self.seq_length > self.receptive_field: 96 | self.skip0 = nn.Conv2d(in_channels=in_dim, out_channels=skip_channels, kernel_size=(1, self.seq_length), bias=True) 97 | self.skipE = nn.Conv2d(in_channels=residual_channels, out_channels=skip_channels, kernel_size=(1, self.seq_length-self.receptive_field+1), bias=True) 98 | 99 | else: 100 | self.skip0 = nn.Conv2d(in_channels=in_dim, out_channels=skip_channels, kernel_size=(1, self.receptive_field), bias=True) 101 | self.skipE = nn.Conv2d(in_channels=residual_channels, out_channels=skip_channels, kernel_size=(1, 1), bias=True) 102 | 103 | 104 | self.idx = torch.arange(self.num_nodes).to(device) 105 | 106 | 107 | def forward(self, input, idx=None): 108 | seq_len = input.size(3) 109 | assert seq_len==self.seq_length, 'input sequence length not equal to preset sequence length' 110 | 111 | if self.seq_length>>>>>>start training : {}>>>>>>>>>>>>>>>>>>>>>>>>>>'.format(setting)) 139 | exp.mark_train(setting) 140 | 141 | print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting)) 142 | mae, rmse, mape = exp.mark_test(setting) 143 | totall_MAE.append(mae) 144 | totall_RMSE.append(rmse) 145 | totall_MAPE.append(mape) 146 | 147 | if args.do_predict: 148 | print('>>>>>>>predicting : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting)) 149 | exp.mask_predict(setting, True) 150 | 151 | torch.cuda.empty_cache() 152 | 153 | print('==============================================================================') 154 | print(f'totall_MAE={np.mean(totall_MAE)}, totall_RMSE={np.mean(totall_RMSE)}, totall_MAPE={np.mean(totall_MAPE)}') 155 | 156 | else: 157 | for ii in range(args.itr): 158 | # setting record of experiments 159 | setting = '{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_fc{}_eb{}_dt{}_{}_{}'.format( 160 | args.model_id, 161 | args.model, 162 | args.data, 163 | args.features, 164 | args.seq_len, 165 | args.label_len, 166 | args.pred_len, 167 | args.d_model, 168 | args.n_heads, 169 | args.e_layers, 170 | args.d_layers, 171 | args.d_ff, 172 | args.factor, 173 | args.embed, 174 | args.distil, 175 | args.des, ii) 176 | 177 | exp = Exp(args) # set experiments 178 | print('>>>>>>>start training : {}>>>>>>>>>>>>>>>>>>>>>>>>>>'.format(setting)) 179 | exp.no_mark_train(setting) 180 | 181 | print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting)) 182 | mae, rmse, mape = exp.no_mark_test(setting) 183 | totall_MAE.append(mae) 184 | totall_RMSE.append(rmse) 185 | totall_MAPE.append(mape) 186 | 187 | if args.do_predict: 188 | print('>>>>>>>predicting : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting)) 189 | exp.no_mask_predict(setting, True) 190 | 191 | torch.cuda.empty_cache() 192 | 193 | print('==============================================================================') 194 | print( 195 | f'totall_MAE={np.mean(totall_MAE)}, totall_RMSE={np.mean(totall_RMSE)}, totall_MAPE={np.mean(totall_MAPE)}') 196 | 197 | else: 198 | ii = 0 199 | setting = '{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_fc{}_eb{}_dt{}_{}_{}'.format(args.model_id, 200 | args.model, 201 | args.data, 202 | args.features, 203 | args.seq_len, 204 | args.label_len, 205 | args.pred_len, 206 | args.d_model, 207 | args.n_heads, 208 | args.e_layers, 209 | args.d_layers, 210 | args.d_ff, 211 | args.factor, 212 | args.embed, 213 | args.distil, 214 | args.des, ii) 215 | 216 | exp = Exp(args) # set experiments 217 | print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting)) 218 | if (args.model != 'Graph_WaveNet') & (args.model != 'LSTNet') & (args.model != 'MTGNN'): 219 | exp.mark_test(setting, test=1) 220 | else: 221 | exp.no_mark_test(setting, test=1) 222 | torch.cuda.empty_cache() 223 | -------------------------------------------------------------------------------- /scripts/Autoformer/Autoformer_COVID19_world.sh: -------------------------------------------------------------------------------- 1 | 2 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_24_12_12 --model Autoformer --data custom --features M \ 3 | --target 'Increase rate' --freq 'd' --seq_len 24 --label_len 12 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 4 --dec_in 4 --c_out 4 \ 4 | --des 'Exp' --itr 1 5 | 6 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_36_24_12 --model Autoformer --data custom --features M \ 7 | #--target 'Increase rate' --freq 'd' --seq_len 36 --label_len 24 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 4 --dec_in 4 --c_out 4 \ 8 | #--des 'Exp' --itr 1 9 | # 10 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_48_24_12 --model Autoformer --data custom --features M \ 11 | #--target 'Increase rate' --freq 'd' --seq_len 48 --label_len 24 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 4 --dec_in 4 --c_out 4 \ 12 | #--des 'Exp' --itr 1 13 | # 14 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_36_12_12 --model Autoformer --data custom --features M \ 15 | #--target 'Increase rate' --freq 'd' --seq_len 36 --label_len 12 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 4 --dec_in 4 --c_out 4 \ 16 | #--des 'Exp' --itr 1 17 | # 18 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_96_48_12 --model Autoformer --data custom --features M \ 19 | #--target 'Increase rate' --freq 'd' --seq_len 96 --label_len 48 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 4 --dec_in 4 --c_out 4 \ 20 | #--des 'Exp' --itr 1 21 | # 22 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_36_18_12 --model Autoformer --data custom --features M \ 23 | #--target 'Increase rate' --freq 'd' --seq_len 36 --label_len 18 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 4 --dec_in 4 --c_out 4 \ 24 | #--des 'Exp' --itr 1 25 | 26 | 27 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_36_24_12 --model Autoformer --data custom --features M \ 28 | --target 'Increase rate' --freq 'd' --seq_len 36 --label_len 24 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 4 --dec_in 4 --c_out 4 \ 29 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 30 | 31 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_36_24_24 --model Autoformer --data custom --features M \ 32 | --target 'Increase rate' --freq 'd' --seq_len 36 --label_len 24 --pred_len 24 --e_layers 2 --d_layers 1 --factor 3 --enc_in 4 --dec_in 4 --c_out 4 \ 33 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 34 | 35 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_36_24_48 --model Autoformer --data custom --features M \ 36 | --target 'Increase rate' --freq 'd' --seq_len 36 --label_len 24 --pred_len 48 --e_layers 2 --d_layers 1 --factor 3 --enc_in 4 --dec_in 4 --c_out 4 \ 37 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 38 | 39 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_36_24_60 --model Autoformer --data custom --features M \ 40 | --target 'Increase rate' --freq 'd' --seq_len 36 --label_len 24 --pred_len 60 --e_layers 2 --d_layers 1 --factor 3 --enc_in 4 --dec_in 4 --c_out 4 \ 41 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 42 | 43 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_36_24_72 --model Autoformer --data custom --features M \ 44 | --target 'Increase rate' --freq 'd' --seq_len 36 --label_len 24 --pred_len 72 --e_layers 2 --d_layers 1 --factor 3 --enc_in 4 --dec_in 4 --c_out 4 \ 45 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' -------------------------------------------------------------------------------- /scripts/Autoformer/Autoformer_PEMS03.sh: -------------------------------------------------------------------------------- 1 | 2 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_48_24_12 --model Autoformer --data custom --features M \ 3 | #--target '313344' --freq 't' --seq_len 48 --label_len 24 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 4 | #--des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 5 | 6 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_96_48_12 --model Autoformer --data custom --features M \ 7 | --target '313344' --freq 't' --seq_len 96 --label_len 48 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 8 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 9 | 10 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_192_96_12 --model Autoformer --data custom --features M \ 11 | #--target '313344' --freq 't' --seq_len 192 --label_len 96 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 12 | #--des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 13 | # 14 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_192_192_12 --model Autoformer --data custom --features M \ 15 | #--target '313344' --freq 't' --seq_len 192 --label_len 192 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 16 | #--des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 17 | # 18 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_384_192_12 --model Autoformer --data custom --features M \ 19 | #--target '313344' --freq 't' --seq_len 384 --label_len 192 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 20 | #--des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 21 | # 22 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_384_384_12 --model Autoformer --data custom --features M \ 23 | #--target '313344' --freq 't' --seq_len 384 --label_len 384 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 24 | #--des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 25 | 26 | 27 | 28 | 29 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_96_48_48 --model Autoformer --data custom --features M \ 30 | --target '313344' --freq 't' --seq_len 96 --label_len 48 --pred_len 48 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 31 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 32 | 33 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_96_48_192 --model Autoformer --data custom --features M \ 34 | --target '313344' --freq 't' --seq_len 96 --label_len 48 --pred_len 192 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 35 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 36 | 37 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_96_48_384 --model Autoformer --data custom --features M \ 38 | --target '313344' --freq 't' --seq_len 96 --label_len 48 --pred_len 384 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 39 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 40 | 41 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_96_48_768 --model Autoformer --data custom --features M \ 42 | --target '313344' --freq 't' --seq_len 96 --label_len 48 --pred_len 768 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 43 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' -------------------------------------------------------------------------------- /scripts/Autoformer/Autoformer_Stock_apple.sh: -------------------------------------------------------------------------------- 1 | 2 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path Stock_apple.csv --model_id Stock_96_48_12 --model Autoformer --data custom --features M \ 3 | --target 'Close_AAPL' --freq 'd' --seq_len 96 --label_len 48 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 4 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 5 | 6 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path Stock_apple.csv --model_id Stock_96_48_48 --model Autoformer --data custom --features M \ 7 | --target 'Close_AAPL' --freq 'd' --seq_len 96 --label_len 48 --pred_len 48 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 8 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 9 | 10 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path Stock_apple.csv --model_id Stock_96_48_96 --model Autoformer --data custom --features M \ 11 | --target 'Close_AAPL' --freq 'd' --seq_len 96 --label_len 48 --pred_len 96 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 12 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 13 | 14 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path Stock_apple.csv --model_id Stock_96_48_288 --model Autoformer --data custom --features M \ 15 | --target 'Close_AAPL' --freq 'd' --seq_len 96 --label_len 48 --pred_len 288 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 16 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 17 | 18 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path Stock_apple.csv --model_id Stock_96_48_480 --model Autoformer --data custom --features M \ 19 | --target 'Close_AAPL' --freq 'd' --seq_len 96 --label_len 48 --pred_len 480 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 20 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 21 | 22 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path Stock_apple.csv --model_id Stock_384_384_12 --model Autoformer --data custom --features M \ 23 | #--target 'Close_AAPL' --freq 'd' --seq_len 96 --label_len 48 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 24 | #--des 'Exp' --itr 1 25 | 26 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path exchange_rate.csv --model_id exchange_96_48_12 --model Reformer --data custom --features M \ 27 | --target 'OT' --freq 'd' --seq_len 96 --label_len 48 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 8 --dec_in 8 --c_out 8 \ 28 | --des 'Exp' --itr 1 --use_multi_gpu --devices '0,1' 29 | 30 | 31 | 32 | -------------------------------------------------------------------------------- /scripts/Autoformer/Autoformer_WTH.sh: -------------------------------------------------------------------------------- 1 | 2 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_48_24_12 --model Autoformer --data custom --features M \ 3 | #--target 'WetBulbCelsius' --freq 'h' --seq_len 48 --label_len 24 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 4 | #--des 'Exp' --itr 1 5 | # 6 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_96_48_12 --model Autoformer --data custom --features M \ 7 | #--target 'WetBulbCelsius' --freq 'h' --seq_len 96 --label_len 48 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 8 | #--des 'Exp' --itr 1 9 | # 10 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_192_96_12 --model Autoformer --data custom --features M \ 11 | #--target 'WetBulbCelsius' --freq 'h' --seq_len 192 --label_len 96 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 12 | #--des 'Exp' --itr 1 13 | # 14 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_192_192_12 --model Autoformer --data custom --features M \ 15 | #--target 'WetBulbCelsius' --freq 'h' --seq_len 192 --label_len 192 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 16 | #--des 'Exp' --itr 1 17 | # 18 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_384_192_12 --model Autoformer --data custom --features M \ 19 | #--target 'WetBulbCelsius' --freq 'h' --seq_len 384 --label_len 192 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 20 | #--des 'Exp' --itr 1 21 | # 22 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_384_384_12 --model Autoformer --data custom --features M \ 23 | #--target 'WetBulbCelsius' --freq 'h' --seq_len 384 --label_len 384 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 24 | #--des 'Exp' --itr 1 25 | 26 | 27 | 28 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_96_48_12 --model Autoformer --data custom --features M \ 29 | --target 'WetBulbCelsius' --freq 'h' --seq_len 96 --label_len 48 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 30 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 31 | 32 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_96_48_48 --model Autoformer --data custom --features M \ 33 | --target 'WetBulbCelsius' --freq 'h' --seq_len 96 --label_len 48 --pred_len 48 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 34 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 35 | 36 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_96_48_192 --model Autoformer --data custom --features M \ 37 | --target 'WetBulbCelsius' --freq 'h' --seq_len 96 --label_len 48 --pred_len 192 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 38 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 39 | 40 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_96_48_384 --model Autoformer --data custom --features M \ 41 | --target 'WetBulbCelsius' --freq 'h' --seq_len 96 --label_len 48 --pred_len 384 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 42 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 43 | 44 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_96_48_768 --model Autoformer --data custom --features M \ 45 | --target 'WetBulbCelsius' --freq 'h' --seq_len 96 --label_len 48 --pred_len 768 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 46 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 47 | 48 | 49 | -------------------------------------------------------------------------------- /scripts/Autoformer/Autoformer_myETT.sh: -------------------------------------------------------------------------------- 1 | 2 | python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_192_48 --model Autoformer --data ETTh1 --features M \ 3 | --seq_len 192 --label_len 96 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 4 | 5 | python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_192_48 --model Autoformer --data ETTh1 --features M \ 6 | --seq_len 192 --label_len 96 --pred_len 48 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 7 | 8 | 9 | python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_192_192 --model Autoformer --data ETTh1 --features M \ 10 | --seq_len 192 --label_len 96 --pred_len 192 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 11 | 12 | 13 | python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_192_384 --model Autoformer --data ETTh1 --features M \ 14 | --seq_len 192 --label_len 96 --pred_len 384 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 15 | 16 | 17 | python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_192_768 --model Autoformer --data ETTh1 --features M \ 18 | --seq_len 192 --label_len 96 --pred_len 768 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' -------------------------------------------------------------------------------- /scripts/Graph_WaveNet/Graph_WaveNet_COVID19_world.sh: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_36_12 --model Graph_WaveNet --data custom --features M \ 5 | --target 'Increase rate' --freq 'd' --seq_len 36 --label_len 0 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 4 --dec_in 4 --c_out 4 \ 6 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 7 | 8 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_36_24 --model Graph_WaveNet --data custom --features M \ 9 | --target 'Increase rate' --freq 'd' --seq_len 36 --label_len 0 --pred_len 24 --e_layers 2 --d_layers 1 --factor 3 --enc_in 4 --dec_in 4 --c_out 4 \ 10 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 11 | 12 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_36_48 --model Graph_WaveNet --data custom --features M \ 13 | --target 'Increase rate' --freq 'd' --seq_len 36 --label_len 0 --pred_len 48 --e_layers 2 --d_layers 1 --factor 3 --enc_in 4 --dec_in 4 --c_out 4 \ 14 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 15 | 16 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_36_60 --model Graph_WaveNet --data custom --features M \ 17 | --target 'Increase rate' --freq 'd' --seq_len 36 --label_len 0 --pred_len 60 --e_layers 2 --d_layers 1 --factor 3 --enc_in 4 --dec_in 4 --c_out 4 \ 18 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 19 | 20 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_36_72 --model Graph_WaveNet --data custom --features M \ 21 | --target 'Increase rate' --freq 'd' --seq_len 36 --label_len 0 --pred_len 72 --e_layers 2 --d_layers 1 --factor 3 --enc_in 4 --dec_in 4 --c_out 4 \ 22 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' -------------------------------------------------------------------------------- /scripts/Graph_WaveNet/Graph_WaveNet_PEMS03.sh: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_96_12 --model Graph_WaveNet --data custom --features M \ 5 | --target '313344' --freq 't' --seq_len 96 --label_len 0 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 6 | --des 'Exp' --itr 1 --use_multi_gpu --devices '0,1' --batch_size 16 7 | 8 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_96_48 --model Graph_WaveNet --data custom --features M \ 9 | --target '313344' --freq 't' --seq_len 96 --label_len 0 --pred_len 48 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 10 | --des 'Exp' --itr 1 --use_multi_gpu --devices '0,1' --batch_size 16 11 | 12 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_96_192 --model Graph_WaveNet --data custom --features M \ 13 | --target '313344' --freq 't' --seq_len 96 --label_len 0 --pred_len 192 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 14 | --des 'Exp' --itr 1 --use_multi_gpu --devices '0,1' --batch_size 16 15 | 16 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_96_384 --model Graph_WaveNet --data custom --features M \ 17 | --target '313344' --freq 't' --seq_len 96 --label_len 0 --pred_len 384 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 18 | --des 'Exp' --itr 1 --use_multi_gpu --devices '0,1' --batch_size 16 19 | 20 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_96_768 --model Graph_WaveNet --data custom --features M \ 21 | --target '313344' --freq 't' --seq_len 96 --label_len 0 --pred_len 768 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 22 | --des 'Exp' --itr 1 --use_multi_gpu --devices '0,1' --batch_size 16 23 | 24 | 25 | 26 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_192_12 --model Graph_WaveNet --data custom --features M \ 27 | #--target '313344' --freq 't' --seq_len 192 --label_len 0 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 28 | #--des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 29 | # 30 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_192_48 --model Graph_WaveNet --data custom --features M \ 31 | #--target '313344' --freq 't' --seq_len 192 --label_len 0 --pred_len 48 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 32 | #--des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 33 | # 34 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_192_192 --model Graph_WaveNet --data custom --features M \ 35 | #--target '313344' --freq 't' --seq_len 192 --label_len 0 --pred_len 192 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 36 | #--des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 37 | # 38 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_192_384 --model Graph_WaveNet --data custom --features M \ 39 | #--target '313344' --freq 't' --seq_len 192 --label_len 0 --pred_len 384 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 40 | #--des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 41 | # 42 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_192_768 --model Graph_WaveNet --data custom --features M \ 43 | #--target '313344' --freq 't' --seq_len 192 --label_len 0 --pred_len 768 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 44 | #--des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' -------------------------------------------------------------------------------- /scripts/Graph_WaveNet/Graph_WaveNet_Stock_apple.sh: -------------------------------------------------------------------------------- 1 | 2 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path Stock_apple.csv --model_id Stock_96_12 --model Graph_WaveNet --data custom --features M \ 3 | --target 'Close_AAPL' --freq 'd' --seq_len 96 --label_len 0 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 4 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 5 | 6 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path Stock_apple.csv --model_id Stock_96_48 --model Graph_WaveNet --data custom --features M \ 7 | --target 'Close_AAPL' --freq 'd' --seq_len 96 --label_len 0 --pred_len 48 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 8 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 9 | 10 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path Stock_apple.csv --model_id Stock_96_96 --model Graph_WaveNet --data custom --features M \ 11 | --target 'Close_AAPL' --freq 'd' --seq_len 96 --label_len 0 --pred_len 96 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 12 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 13 | 14 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path Stock_apple.csv --model_id Stock_96_288 --model Graph_WaveNet --data custom --features M \ 15 | --target 'Close_AAPL' --freq 'd' --seq_len 96 --label_len 0 --pred_len 288 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 16 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 17 | 18 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path Stock_apple.csv --model_id Stock_96_480 --model Graph_WaveNet --data custom --features M \ 19 | --target 'Close_AAPL' --freq 'd' --seq_len 96 --label_len 0 --pred_len 480 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 20 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 21 | 22 | 23 | 24 | 25 | 26 | 27 | -------------------------------------------------------------------------------- /scripts/Graph_WaveNet/Graph_WaveNet_WTH.sh: -------------------------------------------------------------------------------- 1 | 2 | 3 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_96_12 --model Graph_WaveNet --data custom --features M \ 4 | --target 'WetBulbCelsius' --freq 'h' --seq_len 96 --label_len 0 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 5 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 6 | 7 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_96_48 --model Graph_WaveNet --data custom --features M \ 8 | --target 'WetBulbCelsius' --freq 'h' --seq_len 96 --label_len 0 --pred_len 48 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 9 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 10 | 11 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_96_192 --model Graph_WaveNet --data custom --features M \ 12 | --target 'WetBulbCelsius' --freq 'h' --seq_len 96 --label_len 0 --pred_len 192 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 13 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 14 | 15 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_96_384 --model Graph_WaveNet --data custom --features M \ 16 | --target 'WetBulbCelsius' --freq 'h' --seq_len 96 --label_len 0 --pred_len 384 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 17 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 18 | 19 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_96_768 --model Graph_WaveNet --data custom --features M \ 20 | --target 'WetBulbCelsius' --freq 'h' --seq_len 96 --label_len 0 --pred_len 768 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 21 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 22 | 23 | 24 | -------------------------------------------------------------------------------- /scripts/Graph_WaveNet/Graph_WaveNet_myETT.sh: -------------------------------------------------------------------------------- 1 | 2 | 3 | #python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_96_12 --model Graph_WaveNet --data ETTh1 --features M \ 4 | #--seq_len 96 --label_len 0 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 5 | # 6 | #python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_96_48 --model Graph_WaveNet --data ETTh1 --features M \ 7 | #--seq_len 96 --label_len 0 --pred_len 48 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 8 | # 9 | # 10 | #python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_96_192 --model Graph_WaveNet --data ETTh1 --features M \ 11 | #--seq_len 96 --label_len 0 --pred_len 192 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 12 | # 13 | # 14 | #python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_96_384 --model Graph_WaveNet --data ETTh1 --features M \ 15 | #--seq_len 96 --label_len 0 --pred_len 384 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 16 | # 17 | # 18 | #python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_96_768 --model Graph_WaveNet --data ETTh1 --features M \ 19 | #--seq_len 96 --label_len 0 --pred_len 768 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 20 | 21 | 22 | 23 | 24 | 25 | 26 | python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_96_12 --model Graph_WaveNet --data ETTh1 --features M \ 27 | --seq_len 192 --label_len 0 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 28 | 29 | 30 | python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_96_48 --model Graph_WaveNet --data ETTh1 --features M \ 31 | --seq_len 192 --label_len 0 --pred_len 48 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 32 | 33 | 34 | python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_96_192 --model Graph_WaveNet --data ETTh1 --features M \ 35 | --seq_len 192 --label_len 0 --pred_len 192 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 36 | 37 | 38 | python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_96_384 --model Graph_WaveNet --data ETTh1 --features M \ 39 | --seq_len 192 --label_len 0 --pred_len 384 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 40 | 41 | 42 | python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_96_768 --model Graph_WaveNet --data ETTh1 --features M \ 43 | --seq_len 192 --label_len 0 --pred_len 768 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' -------------------------------------------------------------------------------- /scripts/LSTNet/LSTNet_COVID19_world.sh: -------------------------------------------------------------------------------- 1 | 2 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_36_12 --model LSTNet --data custom --features M \ 3 | --target 'Increase rate' --freq 'd' --seq_len 36 --label_len 0 --pred_len 12 --enc_in 4 --dec_in 4 --c_out 4 --hidCNN 50 --hidRNN 50 --highway_window 36 \ 4 | --output_fun sigmod --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 5 | 6 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_36_24 --model LSTNet --data custom --features M \ 7 | --target 'Increase rate' --freq 'd' --seq_len 36 --label_len 0 --pred_len 24 --enc_in 4 --dec_in 4 --c_out 4 --hidCNN 50 --hidRNN 50 --highway_window 36 \ 8 | --output_fun sigmod --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 9 | 10 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_36_48 --model LSTNet --data custom --features M \ 11 | --target 'Increase rate' --freq 'd' --seq_len 36 --label_len 0 --pred_len 48 --enc_in 4 --dec_in 4 --c_out 4 --hidCNN 50 --hidRNN 50 --highway_window 36 \ 12 | --output_fun sigmod --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 13 | 14 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_36_60 --model LSTNet --data custom --features M \ 15 | --target 'Increase rate' --freq 'd' --seq_len 36 --label_len 0 --pred_len 60 --enc_in 4 --dec_in 4 --c_out 4 --hidCNN 50 --hidRNN 50 --highway_window 36 \ 16 | --output_fun sigmod --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 17 | 18 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_36_72 --model LSTNet --data custom --features M \ 19 | --target 'Increase rate' --freq 'd' --seq_len 36 --label_len 0 --pred_len 72 --enc_in 4 --dec_in 4 --c_out 4 --hidCNN 50 --hidRNN 50 --highway_window 36 \ 20 | --output_fun sigmod --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' -------------------------------------------------------------------------------- /scripts/LSTNet/LSTNet_PEMS03.sh: -------------------------------------------------------------------------------- 1 | 2 | 3 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_96_12 --model LSTNet --data custom --features M \ 4 | #--target '313344' --freq 't' --seq_len 96 --label_len 0 --pred_len 12 --enc_in 358 --dec_in 358 --c_out 358 --hidCNN 50 --hidRNN 50 --highway_window 96 \ 5 | #--output_fun sigmod --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 6 | # 7 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_96_48 --model LSTNet --data custom --features M \ 8 | #--target '313344' --freq 't' --seq_len 96 --label_len 0 --pred_len 48 --enc_in 358 --dec_in 358 --c_out 358 --hidCNN 50 --hidRNN 50 --highway_window 96 \ 9 | #--output_fun sigmod --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 10 | # 11 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_96_192 --model LSTNet --data custom --features M \ 12 | #--target '313344' --freq 't' --seq_len 96 --label_len 0 --pred_len 192 --enc_in 358 --dec_in 358 --c_out 358 --hidCNN 50 --hidRNN 50 --highway_window 96 \ 13 | #--output_fun sigmod --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 14 | # 15 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_96_384 --model LSTNet --data custom --features M \ 16 | #--target '313344' --freq 't' --seq_len 96 --label_len 0 --pred_len 384 --enc_in 358 --dec_in 358 --c_out 358 --hidCNN 50 --hidRNN 50 --highway_window 96 \ 17 | #--output_fun sigmod --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 18 | # 19 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_96_768 --model LSTNet --data custom --features M \ 20 | #--target '313344' --freq 't' --seq_len 96 --label_len 0 --pred_len 768 --enc_in 358 --dec_in 358 --c_out 358 --hidCNN 50 --hidRNN 50 --highway_window 96 \ 21 | #--output_fun sigmod --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 22 | 23 | 24 | 25 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_192_12 --model LSTNet --data custom --features M \ 26 | --target '313344' --freq 't' --seq_len 192 --label_len 0 --pred_len 12 --enc_in 358 --dec_in 358 --c_out 358 --hidCNN 50 --hidRNN 50 --highway_window 192 \ 27 | --output_fun sigmod --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 28 | 29 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_192_48 --model LSTNet --data custom --features M \ 30 | --target '313344' --freq 't' --seq_len 192 --label_len 0 --pred_len 48 --enc_in 358 --dec_in 358 --c_out 358 --hidCNN 50 --hidRNN 50 --highway_window 192 \ 31 | --output_fun sigmod --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 32 | 33 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_192_192 --model LSTNet --data custom --features M \ 34 | --target '313344' --freq 't' --seq_len 192 --label_len 0 --pred_len 192 --enc_in 358 --dec_in 358 --c_out 358 --hidCNN 50 --hidRNN 50 --highway_window 192 \ 35 | --output_fun sigmod --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 36 | 37 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_192_384 --model LSTNet --data custom --features M \ 38 | --target '313344' --freq 't' --seq_len 192 --label_len 0 --pred_len 384 --enc_in 358 --dec_in 358 --c_out 358 --hidCNN 50 --hidRNN 50 --highway_window 192 \ 39 | --output_fun sigmod --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 40 | 41 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_192_768 --model LSTNet --data custom --features M \ 42 | --target '313344' --freq 't' --seq_len 192 --label_len 0 --pred_len 768 --enc_in 358 --dec_in 358 --c_out 358 --hidCNN 50 --hidRNN 50 --highway_window 192 \ 43 | --output_fun sigmod --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' -------------------------------------------------------------------------------- /scripts/LSTNet/LSTNet_Stock_apple.sh: -------------------------------------------------------------------------------- 1 | 2 | 3 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path Stock_apple.csv --model_id Stock_96_12 --model LSTNet --data custom --features M \ 4 | --target 'Close_AAPL' --freq 'd' --seq_len 96 --label_len 0 --pred_len 12 --enc_in 12 --dec_in 12 --c_out 12 --hidCNN 50 --hidRNN 50 --highway_window 96 \ 5 | --output_fun sigmod --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 6 | 7 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path Stock_apple.csv --model_id Stock_96_48 --model LSTNet --data custom --features M \ 8 | --target 'Close_AAPL' --freq 'd' --seq_len 96 --label_len 0 --pred_len 48 --enc_in 12 --dec_in 12 --c_out 12 --hidCNN 50 --hidRNN 50 --highway_window 96 \ 9 | --output_fun sigmod --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 10 | 11 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path Stock_apple.csv --model_id Stock_96_96 --model LSTNet --data custom --features M \ 12 | --target 'Close_AAPL' --freq 'd' --seq_len 96 --label_len 0 --pred_len 96 --enc_in 12 --dec_in 12 --c_out 12 --hidCNN 50 --hidRNN 50 --highway_window 96 \ 13 | --output_fun sigmod --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 14 | 15 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path Stock_apple.csv --model_id Stock_96_288 --model LSTNet --data custom --features M \ 16 | --target 'Close_AAPL' --freq 'd' --seq_len 96 --label_len 0 --pred_len 288 --enc_in 12 --dec_in 12 --c_out 12 --hidCNN 50 --hidRNN 50 --highway_window 96 \ 17 | --output_fun sigmod --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 18 | 19 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path Stock_apple.csv --model_id Stock_96_480 --model LSTNet --data custom --features M \ 20 | --target 'Close_AAPL' --freq 'd' --seq_len 96 --label_len 0 --pred_len 480 --enc_in 12 --dec_in 12 --c_out 12 --hidCNN 50 --hidRNN 50 --highway_window 96 \ 21 | --output_fun sigmod --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' -------------------------------------------------------------------------------- /scripts/LSTNet/LSTNet_WTH.sh: -------------------------------------------------------------------------------- 1 | 2 | 3 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_96_12 --model LSTNet --data custom --features M \ 4 | --target 'WetBulbCelsius' --freq 'h' --seq_len 96 --label_len 0 --pred_len 12 --enc_in 12 --dec_in 12 --c_out 12 --hidCNN 50 --hidRNN 50 --highway_window 96 \ 5 | --output_fun sigmod --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 6 | 7 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_96_48 --model LSTNet --data custom --features M \ 8 | --target 'WetBulbCelsius' --freq 'h' --seq_len 96 --label_len 0 --pred_len 48 --enc_in 12 --dec_in 12 --c_out 12 --hidCNN 50 --hidRNN 50 --highway_window 96 \ 9 | --output_fun sigmod --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 10 | 11 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_96_192 --model LSTNet --data custom --features M \ 12 | --target 'WetBulbCelsius' --freq 'h' --seq_len 96 --label_len 0 --pred_len 192 --enc_in 12 --dec_in 12 --c_out 12 --hidCNN 50 --hidRNN 50 --highway_window 96 \ 13 | --output_fun sigmod --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 14 | 15 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_96_384 --model LSTNet --data custom --features M \ 16 | --target 'WetBulbCelsius' --freq 'h' --seq_len 96 --label_len 0 --pred_len 384 --enc_in 12 --dec_in 12 --c_out 12 --hidCNN 50 --hidRNN 50 --highway_window 96 \ 17 | --output_fun sigmod --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 18 | 19 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_96_768 --model LSTNet --data custom --features M \ 20 | --target 'WetBulbCelsius' --freq 'h' --seq_len 96 --label_len 0 --pred_len 768 --enc_in 12 --dec_in 12 --c_out 12 --hidCNN 50 --hidRNN 50 --highway_window 96 \ 21 | --output_fun sigmod --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' -------------------------------------------------------------------------------- /scripts/LSTNet/LSTNet_myETT.sh: -------------------------------------------------------------------------------- 1 | 2 | 3 | #python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_96_12 --model LSTNet --data ETTh1 --features M \ 4 | #--seq_len 96 --label_len 0 --pred_len 12 --enc_in 7 --dec_in 7 --c_out 7 --hidCNN 50 --hidRNN 50 --highway_window 96 --output_fun sigmod --des 'Exp' \ 5 | #--itr 5 --use_multi_gpu --devices '0,1' 6 | # 7 | #python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_96_48 --model LSTNet --data ETTh1 --features M \ 8 | #--seq_len 96 --label_len 0 --pred_len 48 --enc_in 7 --dec_in 7 --c_out 7 --hidCNN 50 --hidRNN 50 --highway_window 96 --output_fun sigmod --des 'Exp' \ 9 | #--itr 5 --use_multi_gpu --devices '0,1' 10 | # 11 | #python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_96_192 --model LSTNet --data ETTh1 --features M \ 12 | #--seq_len 96 --label_len 0 --pred_len 192 --enc_in 7 --dec_in 7 --c_out 7 --hidCNN 50 --hidRNN 50 --highway_window 96 --output_fun sigmod --des 'Exp' \ 13 | #--itr 5 --use_multi_gpu --devices '0,1' 14 | # 15 | #python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_96_384 --model LSTNet --data ETTh1 --features M \ 16 | #--seq_len 96 --label_len 0 --pred_len 384 --enc_in 7 --dec_in 7 --c_out 7 --hidCNN 50 --hidRNN 50 --highway_window 96 --output_fun sigmod --des 'Exp' \ 17 | #--itr 5 --use_multi_gpu --devices '0,1' 18 | # 19 | #python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_96_768 --model LSTNet --data ETTh1 --features M \ 20 | #--seq_len 96 --label_len 0 --pred_len 768 --enc_in 7 --dec_in 7 --c_out 7 --hidCNN 50 --hidRNN 50 --highway_window 96 --output_fun sigmod --des 'Exp' \ 21 | #--itr 5 --use_multi_gpu --devices '0,1' 22 | 23 | 24 | 25 | 26 | python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_192_12 --model LSTNet --data ETTh1 --features M \ 27 | --seq_len 192 --label_len 0 --pred_len 12 --enc_in 7 --dec_in 7 --c_out 7 --hidCNN 50 --hidRNN 50 --highway_window 192 --output_fun sigmod --des 'Exp' \ 28 | --itr 5 --use_multi_gpu --devices '0,1' 29 | 30 | python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_192_48 --model LSTNet --data ETTh1 --features M \ 31 | --seq_len 192 --label_len 0 --pred_len 48 --enc_in 7 --dec_in 7 --c_out 7 --hidCNN 50 --hidRNN 50 --highway_window 192 --output_fun sigmod --des 'Exp' \ 32 | --itr 5 --use_multi_gpu --devices '0,1' 33 | 34 | python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_192_192 --model LSTNet --data ETTh1 --features M \ 35 | --seq_len 192 --label_len 0 --pred_len 192 --enc_in 7 --dec_in 7 --c_out 7 --hidCNN 50 --hidRNN 50 --highway_window 192 --output_fun sigmod --des 'Exp' \ 36 | --itr 5 --use_multi_gpu --devices '0,1' 37 | 38 | python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_192_384 --model LSTNet --data ETTh1 --features M \ 39 | --seq_len 192 --label_len 0 --pred_len 384 --enc_in 7 --dec_in 7 --c_out 7 --hidCNN 50 --hidRNN 50 --highway_window 192 --output_fun sigmod --des 'Exp' \ 40 | --itr 5 --use_multi_gpu --devices '0,1' 41 | 42 | python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_192_768 --model LSTNet --data ETTh1 --features M \ 43 | --seq_len 192 --label_len 0 --pred_len 768 --enc_in 7 --dec_in 7 --c_out 7 --hidCNN 50 --hidRNN 50 --highway_window 192 --output_fun sigmod --des 'Exp' \ 44 | --itr 5 --use_multi_gpu --devices '0,1' -------------------------------------------------------------------------------- /scripts/MTGNN/MTGNN_COVID19.sh: -------------------------------------------------------------------------------- 1 | 2 | 3 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_36_12 --model MTGNN --data custom --features M \ 4 | --target 'Increase rate' --freq 'd' --seq_len 36 --label_len 0 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 4 --dec_in 4 --c_out 4 \ 5 | --des 'Exp' --itr 5 6 | 7 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_36_24 --model MTGNN --data custom --features M \ 8 | --target 'Increase rate' --freq 'd' --seq_len 36 --label_len 0 --pred_len 24 --e_layers 2 --d_layers 1 --factor 3 --enc_in 4 --dec_in 4 --c_out 4 \ 9 | --des 'Exp' --itr 5 10 | 11 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_36_48 --model MTGNN --data custom --features M \ 12 | --target 'Increase rate' --freq 'd' --seq_len 36 --label_len 0 --pred_len 48 --e_layers 2 --d_layers 1 --factor 3 --enc_in 4 --dec_in 4 --c_out 4 \ 13 | --des 'Exp' --itr 5 14 | 15 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_36_60 --model MTGNN --data custom --features M \ 16 | --target 'Increase rate' --freq 'd' --seq_len 36 --label_len 0 --pred_len 60 --e_layers 2 --d_layers 1 --factor 3 --enc_in 4 --dec_in 4 --c_out 4 \ 17 | --des 'Exp' --itr 5 18 | 19 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_36_72 --model MTGNN --data custom --features M \ 20 | --target 'Increase rate' --freq 'd' --seq_len 36 --label_len 0 --pred_len 72 --e_layers 2 --d_layers 1 --factor 3 --enc_in 4 --dec_in 4 --c_out 4 \ 21 | --des 'Exp' --itr 5 -------------------------------------------------------------------------------- /scripts/MTGNN/MTGNN_PEMS03.sh: -------------------------------------------------------------------------------- 1 | 2 | 3 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_96_12 --model MTGNN --data custom --features M \ 4 | --target '313344' --freq 't' --seq_len 96 --label_len 0 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 5 | --des 'Exp' --itr 1 --batch_size 16 6 | 7 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_96_48 --model MTGNN --data custom --features M \ 8 | --target '313344' --freq 't' --seq_len 96 --label_len 0 --pred_len 48 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 9 | --des 'Exp' --itr 1 --batch_size 16 10 | 11 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_96_192 --model MTGNN --data custom --features M \ 12 | --target '313344' --freq 't' --seq_len 96 --label_len 0 --pred_len 192 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 13 | --des 'Exp' --itr 1 --batch_size 16 14 | 15 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_96_384 --model MTGNN --data custom --features M \ 16 | --target '313344' --freq 't' --seq_len 96 --label_len 0 --pred_len 384 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 17 | --des 'Exp' --itr 1 --batch_size 16 18 | 19 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_96_768 --model MTGNN --data custom --features M \ 20 | --target '313344' --freq 't' --seq_len 96 --label_len 0 --pred_len 768 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 21 | --des 'Exp' --itr 1 --batch_size 16 -------------------------------------------------------------------------------- /scripts/MTGNN/MTGNN_Stcok_apple.sh: -------------------------------------------------------------------------------- 1 | 2 | 3 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path Stock_apple.csv --model_id Stock_96_12 --model MTGNN --data custom --features M \ 4 | --target 'Close_AAPL' --freq 'd' --seq_len 96 --label_len 0 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 5 | --des 'Exp' --itr 5 6 | 7 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path Stock_apple.csv --model_id Stock_96_48 --model MTGNN --data custom --features M \ 8 | --target 'Close_AAPL' --freq 'd' --seq_len 96 --label_len 0 --pred_len 48 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 9 | --des 'Exp' --itr 5 10 | 11 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path Stock_apple.csv --model_id Stock_96_96 --model MTGNN --data custom --features M \ 12 | --target 'Close_AAPL' --freq 'd' --seq_len 96 --label_len 0 --pred_len 96 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 13 | --des 'Exp' --itr 5 14 | 15 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path Stock_apple.csv --model_id Stock_96_288 --model MTGNN --data custom --features M \ 16 | --target 'Close_AAPL' --freq 'd' --seq_len 96 --label_len 0 --pred_len 288 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 17 | --des 'Exp' --itr 5 18 | 19 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path Stock_apple.csv --model_id Stock_96_480 --model MTGNN --data custom --features M \ 20 | --target 'Close_AAPL' --freq 'd' --seq_len 96 --label_len 0 --pred_len 480 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 21 | --des 'Exp' --itr 5 -------------------------------------------------------------------------------- /scripts/MTGNN/MTGNN_WTH.sh: -------------------------------------------------------------------------------- 1 | 2 | 3 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_96_12 --model MTGNN --data custom --features M \ 4 | --target 'WetBulbCelsius' --freq 'h' --seq_len 96 --label_len 0 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 5 | --des 'Exp' --itr 5 6 | 7 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_96_48 --model MTGNN --data custom --features M \ 8 | --target 'WetBulbCelsius' --freq 'h' --seq_len 96 --label_len 0 --pred_len 48 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 9 | --des 'Exp' --itr 5 10 | 11 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_96_192 --model MTGNN --data custom --features M \ 12 | --target 'WetBulbCelsius' --freq 'h' --seq_len 96 --label_len 0 --pred_len 192 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 13 | --des 'Exp' --itr 5 14 | 15 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_96_384 --model MTGNN --data custom --features M \ 16 | --target 'WetBulbCelsius' --freq 'h' --seq_len 96 --label_len 0 --pred_len 384 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 17 | --des 'Exp' --itr 5 18 | 19 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_96_768 --model MTGNN --data custom --features M \ 20 | --target 'WetBulbCelsius' --freq 'h' --seq_len 96 --label_len 0 --pred_len 768 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 21 | --des 'Exp' --itr 5 22 | -------------------------------------------------------------------------------- /scripts/MTGNN/MTGNN_myETT.sh: -------------------------------------------------------------------------------- 1 | 2 | 3 | python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_96_12 --model MTGNN --data ETTh1 --features M \ 4 | --seq_len 96 --label_len 0 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 5 | 6 | python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_96_48 --model MTGNN --data ETTh1 --features M \ 7 | --seq_len 96 --label_len 0 --pred_len 48 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 8 | 9 | 10 | python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_96_192 --model MTGNN --data ETTh1 --features M \ 11 | --seq_len 96 --label_len 0 --pred_len 192 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 12 | 13 | 14 | python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_96_384 --model MTGNN --data ETTh1 --features M \ 15 | --seq_len 96 --label_len 0 --pred_len 384 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 16 | 17 | 18 | python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_96_768 --model MTGNN --data ETTh1 --features M \ 19 | --seq_len 96 --label_len 0 --pred_len 768 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 20 | 21 | 22 | 23 | 24 | 25 | #python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_96_12 --model MTGNN --data ETTh1 --features M \ 26 | #--seq_len 192 --label_len 0 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 27 | # 28 | # 29 | #python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_96_48 --model MTGNN --data ETTh1 --features M \ 30 | #--seq_len 192 --label_len 0 --pred_len 48 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 31 | # 32 | # 33 | #python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_96_192 --model MTGNN --data ETTh1 --features M \ 34 | #--seq_len 192 --label_len 0 --pred_len 192 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 35 | # 36 | # 37 | #python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_96_384 --model MTGNN --data ETTh1 --features M \ 38 | #--seq_len 192 --label_len 0 --pred_len 384 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 39 | # 40 | # 41 | #python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_96_768 --model MTGNN --data ETTh1 --features M \ 42 | #--seq_len 192 --label_len 0 --pred_len 768 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' -------------------------------------------------------------------------------- /scripts/Reformer/Reformer_COVID19_world.sh: -------------------------------------------------------------------------------- 1 | 2 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_24_12_12 --model Reformer --data custom --features M \ 3 | #--target 'Increase rate' --freq 'd' --seq_len 24 --label_len 12 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 4 --dec_in 4 --c_out 4 --bucket_size 4 --n_hashes 4 \ 4 | #--des 'Exp' --itr 1 --use_multi_gpu --devices '0,1' 5 | 6 | 7 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_36_24_12 --model Reformer --data custom --features M \ 8 | --target 'Increase rate' --freq 'd' --seq_len 36 --label_len 24 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 4 --dec_in 4 --c_out 4 \ 9 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 10 | 11 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_36_24_24 --model Reformer --data custom --features M \ 12 | --target 'Increase rate' --freq 'd' --seq_len 36 --label_len 24 --pred_len 24 --e_layers 2 --d_layers 1 --factor 3 --enc_in 4 --dec_in 4 --c_out 4 \ 13 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 14 | 15 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_36_24_48 --model Reformer --data custom --features M \ 16 | --target 'Increase rate' --freq 'd' --seq_len 36 --label_len 24 --pred_len 48 --e_layers 2 --d_layers 1 --factor 3 --enc_in 4 --dec_in 4 --c_out 4 \ 17 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 18 | 19 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_36_24_60 --model Reformer --data custom --features M \ 20 | --target 'Increase rate' --freq 'd' --seq_len 36 --label_len 24 --pred_len 60 --e_layers 2 --d_layers 1 --factor 3 --enc_in 4 --dec_in 4 --c_out 4 \ 21 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 22 | 23 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_36_24_72 --model Reformer --data custom --features M \ 24 | --target 'Increase rate' --freq 'd' --seq_len 36 --label_len 24 --pred_len 72 --e_layers 2 --d_layers 1 --factor 3 --enc_in 4 --dec_in 4 --c_out 4 \ 25 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' -------------------------------------------------------------------------------- /scripts/Reformer/Reformer_PEMS03.sh: -------------------------------------------------------------------------------- 1 | 2 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_48_24_12 --model Reformer --data custom --features M \ 3 | #--target '313344' --freq 't' --seq_len 48 --label_len 24 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 4 | #--des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 5 | 6 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_96_48_12 --model Reformer --data custom --features M \ 7 | --target '313344' --freq 't' --seq_len 96 --label_len 48 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 8 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 9 | 10 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_192_96_12 --model Reformer --data custom --features M \ 11 | #--target '313344' --freq 't' --seq_len 192 --label_len 96 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 12 | #--des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 13 | # 14 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_192_192_12 --model Reformer --data custom --features M \ 15 | #--target '313344' --freq 't' --seq_len 192 --label_len 192 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 16 | #--des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 17 | # 18 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_384_192_12 --model Reformer --data custom --features M \ 19 | #--target '313344' --freq 't' --seq_len 384 --label_len 192 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 20 | #--des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 21 | # 22 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_384_384_12 --model Reformer --data custom --features M \ 23 | #--target '313344' --freq 't' --seq_len 384 --label_len 384 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 24 | #--des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 25 | 26 | 27 | 28 | 29 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_96_48_48 --model Reformer --data custom --features M \ 30 | --target '313344' --freq 't' --seq_len 96 --label_len 48 --pred_len 48 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 31 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 32 | 33 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_96_48_192 --model Reformer --data custom --features M \ 34 | --target '313344' --freq 't' --seq_len 96 --label_len 48 --pred_len 192 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 35 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 36 | 37 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_96_48_384 --model Reformer --data custom --features M \ 38 | --target '313344' --freq 't' --seq_len 96 --label_len 48 --pred_len 384 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 39 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 40 | 41 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_96_48_768 --model Reformer --data custom --features M \ 42 | --target '313344' --freq 't' --seq_len 96 --label_len 48 --pred_len 768 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 43 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' -------------------------------------------------------------------------------- /scripts/Reformer/Reformer_Stock_apple.sh: -------------------------------------------------------------------------------- 1 | 2 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path Stock_apple.csv --model_id Stock_96_48_12 --model Reformer --data custom --features M \ 3 | --target 'Close_AAPL' --freq 'd' --seq_len 96 --label_len 48 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 4 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 5 | 6 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path Stock_apple.csv --model_id Stock_96_48_48 --model Reformer --data custom --features M \ 7 | --target 'Close_AAPL' --freq 'd' --seq_len 96 --label_len 48 --pred_len 48 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 8 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 9 | 10 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path Stock_apple.csv --model_id Stock_96_48_96 --model Reformer --data custom --features M \ 11 | --target 'Close_AAPL' --freq 'd' --seq_len 96 --label_len 48 --pred_len 96 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 12 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 13 | 14 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path Stock_apple.csv --model_id Stock_96_48_288 --model Reformer --data custom --features M \ 15 | --target 'Close_AAPL' --freq 'd' --seq_len 96 --label_len 48 --pred_len 288 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 16 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 17 | 18 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path Stock_apple.csv --model_id Stock_96_48_480 --model Reformer --data custom --features M \ 19 | --target 'Close_AAPL' --freq 'd' --seq_len 96 --label_len 48 --pred_len 480 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 20 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 21 | 22 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path Stock_apple.csv --model_id Stock_384_384_12 --model Reformer --data custom --features M \ 23 | #--target 'Close_AAPL' --freq 'd' --seq_len 96 --label_len 48 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 24 | #--des 'Exp' --itr 1 25 | 26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /scripts/Reformer/Reformer_WTH.sh: -------------------------------------------------------------------------------- 1 | 2 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_48_24_12 --model Reformer --data custom --features M \ 3 | #--target 'WetBulbCelsius' --freq 'h' --seq_len 48 --label_len 24 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 4 | #--des 'Exp' --itr 1 5 | # 6 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_96_48_12 --model Reformer --data custom --features M \ 7 | #--target 'WetBulbCelsius' --freq 'h' --seq_len 96 --label_len 48 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 8 | #--des 'Exp' --itr 1 9 | # 10 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_192_96_12 --model Reformer --data custom --features M \ 11 | #--target 'WetBulbCelsius' --freq 'h' --seq_len 192 --label_len 96 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 12 | #--des 'Exp' --itr 1 13 | # 14 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_192_192_12 --model Reformer --data custom --features M \ 15 | #--target 'WetBulbCelsius' --freq 'h' --seq_len 192 --label_len 192 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 16 | #--des 'Exp' --itr 1 17 | # 18 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_384_192_12 --model Reformer --data custom --features M \ 19 | #--target 'WetBulbCelsius' --freq 'h' --seq_len 384 --label_len 192 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 20 | #--des 'Exp' --itr 1 21 | # 22 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_384_384_12 --model Reformer --data custom --features M \ 23 | #--target 'WetBulbCelsius' --freq 'h' --seq_len 384 --label_len 384 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 24 | #--des 'Exp' --itr 1 25 | 26 | 27 | 28 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_96_48_12 --model Reformer --data custom --features M \ 29 | --target 'WetBulbCelsius' --freq 'h' --seq_len 96 --label_len 48 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 30 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 31 | 32 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_96_48_48 --model Reformer --data custom --features M \ 33 | --target 'WetBulbCelsius' --freq 'h' --seq_len 96 --label_len 48 --pred_len 48 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 34 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 35 | 36 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_96_48_192 --model Reformer --data custom --features M \ 37 | --target 'WetBulbCelsius' --freq 'h' --seq_len 96 --label_len 48 --pred_len 192 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 38 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 39 | 40 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_96_48_384 --model Reformer --data custom --features M \ 41 | --target 'WetBulbCelsius' --freq 'h' --seq_len 96 --label_len 48 --pred_len 384 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 42 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 43 | 44 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_96_48_768 --model Reformer --data custom --features M \ 45 | --target 'WetBulbCelsius' --freq 'h' --seq_len 96 --label_len 48 --pred_len 768 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 46 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 47 | 48 | 49 | -------------------------------------------------------------------------------- /scripts/Reformer/Reformer_myETT.sh: -------------------------------------------------------------------------------- 1 | 2 | python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_96_48_12 --model Reformer --data ETTh1 --features M \ 3 | --seq_len 96 --label_len 48 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 4 | 5 | python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_96_48_48 --model Reformer --data ETTh1 --features M \ 6 | --seq_len 96 --label_len 48 --pred_len 48 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 7 | 8 | 9 | python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_96_48_192 --model Reformer --data ETTh1 --features M \ 10 | --seq_len 96 --label_len 48 --pred_len 192 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 11 | 12 | 13 | python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_96_48_384 --model Reformer --data ETTh1 --features M \ 14 | --seq_len 96 --label_len 48 --pred_len 384 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 15 | 16 | 17 | python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_96_48_768 --model Reformer --data ETTh1 --features M \ 18 | --seq_len 96 --label_len 48 --pred_len 768 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' -------------------------------------------------------------------------------- /scripts/Transformer/Transformer_COVID19_world.sh: -------------------------------------------------------------------------------- 1 | 2 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_24_12_12 --model Transformer --data custom --features M \ 3 | #--target 'Increase rate' --freq 'd' --seq_len 24 --label_len 12 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 4 --dec_in 4 --c_out 4 \ 4 | #--des 'Exp' --itr 1 5 | 6 | 7 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_36_24_12 --model Transformer --data custom --features M \ 8 | --target 'Increase rate' --freq 'd' --seq_len 36 --label_len 24 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 4 --dec_in 4 --c_out 4 \ 9 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 10 | 11 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_36_24_24 --model Transformer --data custom --features M \ 12 | --target 'Increase rate' --freq 'd' --seq_len 36 --label_len 24 --pred_len 24 --e_layers 2 --d_layers 1 --factor 3 --enc_in 4 --dec_in 4 --c_out 4 \ 13 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 14 | 15 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_36_24_48 --model Transformer --data custom --features M \ 16 | --target 'Increase rate' --freq 'd' --seq_len 36 --label_len 24 --pred_len 48 --e_layers 2 --d_layers 1 --factor 3 --enc_in 4 --dec_in 4 --c_out 4 \ 17 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 18 | 19 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_36_24_60 --model Transformer --data custom --features M \ 20 | --target 'Increase rate' --freq 'd' --seq_len 36 --label_len 24 --pred_len 60 --e_layers 2 --d_layers 1 --factor 3 --enc_in 4 --dec_in 4 --c_out 4 \ 21 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 22 | 23 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path COVID19_world.csv --model_id COVID19_36_24_72 --model Transformer --data custom --features M \ 24 | --target 'Increase rate' --freq 'd' --seq_len 36 --label_len 24 --pred_len 72 --e_layers 2 --d_layers 1 --factor 3 --enc_in 4 --dec_in 4 --c_out 4 \ 25 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' -------------------------------------------------------------------------------- /scripts/Transformer/Transformer_PEMS03.sh: -------------------------------------------------------------------------------- 1 | 2 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_48_24_12 --model Transformer --data custom --features M \ 3 | #--target '313344' --freq 't' --seq_len 48 --label_len 24 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 4 | #--des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 5 | 6 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_96_48_12 --model Transformer --data custom --features M \ 7 | --target '313344' --freq 't' --seq_len 96 --label_len 48 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 8 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 9 | 10 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_192_96_12 --model Transformer --data custom --features M \ 11 | #--target '313344' --freq 't' --seq_len 192 --label_len 96 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 12 | #--des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 13 | # 14 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_192_192_12 --model Transformer --data custom --features M \ 15 | #--target '313344' --freq 't' --seq_len 192 --label_len 192 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 16 | #--des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 17 | # 18 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_384_192_12 --model Transformer --data custom --features M \ 19 | #--target '313344' --freq 't' --seq_len 384 --label_len 192 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 20 | #--des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 21 | # 22 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_384_384_12 --model Transformer --data custom --features M \ 23 | #--target '313344' --freq 't' --seq_len 384 --label_len 384 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 24 | #--des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 25 | 26 | 27 | 28 | 29 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_96_48_48 --model Transformer --data custom --features M \ 30 | --target '313344' --freq 't' --seq_len 96 --label_len 48 --pred_len 48 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 31 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 32 | 33 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_96_48_192 --model Transformer --data custom --features M \ 34 | --target '313344' --freq 't' --seq_len 96 --label_len 48 --pred_len 192 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 35 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 36 | 37 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_96_48_384 --model Transformer --data custom --features M \ 38 | --target '313344' --freq 't' --seq_len 96 --label_len 48 --pred_len 384 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 39 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 40 | 41 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path PEMS03_deal.csv --model_id PEMS03_96_48_768 --model Transformer --data custom --features M \ 42 | --target '313344' --freq 't' --seq_len 96 --label_len 48 --pred_len 768 --e_layers 2 --d_layers 1 --factor 3 --enc_in 358 --dec_in 358 --c_out 358 \ 43 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' -------------------------------------------------------------------------------- /scripts/Transformer/Transformer_Stock_apple.sh: -------------------------------------------------------------------------------- 1 | 2 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path Stock_apple.csv --model_id Stock_96_48_12 --model Transformer --data custom --features M \ 3 | --target 'Close_AAPL' --freq 'd' --seq_len 96 --label_len 48 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 4 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 5 | 6 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path Stock_apple.csv --model_id Stock_96_48_48 --model Transformer --data custom --features M \ 7 | --target 'Close_AAPL' --freq 'd' --seq_len 96 --label_len 48 --pred_len 48 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 8 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 9 | 10 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path Stock_apple.csv --model_id Stock_96_48_96 --model Transformer --data custom --features M \ 11 | --target 'Close_AAPL' --freq 'd' --seq_len 96 --label_len 48 --pred_len 96 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 12 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 13 | 14 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path Stock_apple.csv --model_id Stock_96_48_288 --model Transformer --data custom --features M \ 15 | --target 'Close_AAPL' --freq 'd' --seq_len 96 --label_len 48 --pred_len 288 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 16 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 17 | 18 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path Stock_apple.csv --model_id Stock_96_48_480 --model Transformer --data custom --features M \ 19 | --target 'Close_AAPL' --freq 'd' --seq_len 96 --label_len 48 --pred_len 480 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 20 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 21 | 22 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path Stock_apple.csv --model_id Stock_384_384_12 --model Transformer --data custom --features M \ 23 | #--target 'Close_AAPL' --freq 'd' --seq_len 96 --label_len 48 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 24 | #--des 'Exp' --itr 1 25 | 26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /scripts/Transformer/Transformer_WTH.sh: -------------------------------------------------------------------------------- 1 | 2 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_48_24_12 --model Transformer --data custom --features M \ 3 | #--target 'WetBulbCelsius' --freq 'h' --seq_len 48 --label_len 24 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 4 | #--des 'Exp' --itr 1 5 | # 6 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_96_48_12 --model Transformer --data custom --features M \ 7 | #--target 'WetBulbCelsius' --freq 'h' --seq_len 96 --label_len 48 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 8 | #--des 'Exp' --itr 1 9 | # 10 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_192_96_12 --model Transformer --data custom --features M \ 11 | #--target 'WetBulbCelsius' --freq 'h' --seq_len 192 --label_len 96 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 12 | #--des 'Exp' --itr 1 13 | # 14 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_192_192_12 --model Transformer --data custom --features M \ 15 | #--target 'WetBulbCelsius' --freq 'h' --seq_len 192 --label_len 192 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 16 | #--des 'Exp' --itr 1 17 | # 18 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_384_192_12 --model Transformer --data custom --features M \ 19 | #--target 'WetBulbCelsius' --freq 'h' --seq_len 384 --label_len 192 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 20 | #--des 'Exp' --itr 1 21 | # 22 | #python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_384_384_12 --model Transformer --data custom --features M \ 23 | #--target 'WetBulbCelsius' --freq 'h' --seq_len 384 --label_len 384 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 24 | #--des 'Exp' --itr 1 25 | 26 | 27 | 28 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_96_48_12 --model Transformer --data custom --features M \ 29 | --target 'WetBulbCelsius' --freq 'h' --seq_len 96 --label_len 48 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 30 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 31 | 32 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_96_48_48 --model Transformer --data custom --features M \ 33 | --target 'WetBulbCelsius' --freq 'h' --seq_len 96 --label_len 48 --pred_len 48 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 34 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 35 | 36 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_96_48_192 --model Transformer --data custom --features M \ 37 | --target 'WetBulbCelsius' --freq 'h' --seq_len 96 --label_len 48 --pred_len 192 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 38 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 39 | 40 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_96_48_384 --model Transformer --data custom --features M \ 41 | --target 'WetBulbCelsius' --freq 'h' --seq_len 96 --label_len 48 --pred_len 384 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 42 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 43 | 44 | python -u run.py --is_training 1 --root_path ./dataset/my_data/ --data_path WTH.csv --model_id WTH_96_48_768 --model Transformer --data custom --features M \ 45 | --target 'WetBulbCelsius' --freq 'h' --seq_len 96 --label_len 48 --pred_len 768 --e_layers 2 --d_layers 1 --factor 3 --enc_in 12 --dec_in 12 --c_out 12 \ 46 | --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 47 | 48 | 49 | -------------------------------------------------------------------------------- /scripts/Transformer/Transformer_myETT.sh: -------------------------------------------------------------------------------- 1 | 2 | #python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_192_48 --model Transformer --data ETTh1 --features M \ 3 | #--seq_len 192 --label_len 96 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 4 | # 5 | #python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_192_48 --model Transformer --data ETTh1 --features M \ 6 | #--seq_len 192 --label_len 96 --pred_len 48 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 7 | # 8 | # 9 | #python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_192_192 --model Transformer --data ETTh1 --features M \ 10 | #--seq_len 192 --label_len 96 --pred_len 192 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 11 | # 12 | # 13 | #python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_192_384 --model Transformer --data ETTh1 --features M \ 14 | #--seq_len 192 --label_len 96 --pred_len 384 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 15 | # 16 | # 17 | #python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_192_768 --model Transformer --data ETTh1 --features M \ 18 | #--seq_len 192 --label_len 96 --pred_len 768 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 19 | 20 | 21 | 22 | python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_96_12 --model Transformer --data ETTh1 --features M \ 23 | --seq_len 96 --label_len 48 --pred_len 12 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 24 | 25 | python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_96_48 --model Transformer --data ETTh1 --features M \ 26 | --seq_len 96 --label_len 48 --pred_len 48 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 27 | 28 | 29 | python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_96_192 --model Transformer --data ETTh1 --features M \ 30 | --seq_len 96 --label_len 48 --pred_len 192 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 31 | 32 | 33 | python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_96_384 --model Transformer --data ETTh1 --features M \ 34 | --seq_len 96 --label_len 48 --pred_len 384 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' 35 | 36 | 37 | python -u run.py --is_training 1 --root_path ./dataset/ETT-small/ --data_path ETTh1.csv --model_id ETTh1_96_768 --model Transformer --data ETTh1 --features M \ 38 | --seq_len 96 --label_len 48 --pred_len 768 --e_layers 2 --d_layers 1 --factor 3 --enc_in 7 --dec_in 7 --c_out 7 --des 'Exp' --itr 5 --use_multi_gpu --devices '0,1' -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Masterleia/TSF_LSTF_Compare/74945de299f4c91d4ef21ad822c562c57b301f1b/utils/__init__.py -------------------------------------------------------------------------------- /utils/masking.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class TriangularCausalMask(): 5 | def __init__(self, B, L, device="cpu"): 6 | mask_shape = [B, 1, L, L] 7 | with torch.no_grad(): 8 | self._mask = torch.triu(torch.ones(mask_shape, dtype=torch.bool), diagonal=1).to(device) 9 | 10 | @property 11 | def mask(self): 12 | return self._mask 13 | 14 | 15 | class ProbMask(): 16 | def __init__(self, B, H, L, index, scores, device="cpu"): 17 | _mask = torch.ones(L, scores.shape[-1], dtype=torch.bool).to(device).triu(1) 18 | _mask_ex = _mask[None, None, :].expand(B, H, L, scores.shape[-1]) 19 | indicator = _mask_ex[torch.arange(B)[:, None, None], 20 | torch.arange(H)[None, :, None], 21 | index, :].to(device) 22 | self._mask = indicator.view(scores.shape).to(device) 23 | 24 | @property 25 | def mask(self): 26 | return self._mask 27 | -------------------------------------------------------------------------------- /utils/metrics.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def RSE(pred, true): 5 | return np.sqrt(np.sum((true - pred) ** 2)) / np.sqrt(np.sum((true - true.mean()) ** 2)) 6 | 7 | 8 | def CORR(pred, true): 9 | u = ((true - true.mean(0)) * (pred - pred.mean(0))).sum(0) 10 | d = np.sqrt(((true - true.mean(0)) ** 2 * (pred - pred.mean(0)) ** 2).sum(0)) 11 | return (u / d).mean(-1) 12 | 13 | 14 | def MAE(pred, true): 15 | return np.mean(np.abs(pred - true)) 16 | 17 | 18 | def MSE(pred, true): 19 | return np.mean((pred - true) ** 2) 20 | 21 | 22 | def RMSE(pred, true): 23 | return np.sqrt(MSE(pred, true)) 24 | 25 | 26 | def MAPE(pred, true): 27 | return np.mean(np.abs((pred - true) / true)) 28 | 29 | 30 | def MSPE(pred, true): 31 | return np.mean(np.square((pred - true) / true)) 32 | 33 | 34 | def metric(pred, true): 35 | mae = MAE(pred, true) 36 | mse = MSE(pred, true) 37 | rmse = RMSE(pred, true) 38 | mape = MAPE(pred, true) 39 | mspe = MSPE(pred, true) 40 | 41 | return mae, mse, rmse, mape, mspe 42 | -------------------------------------------------------------------------------- /utils/timefeatures.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | import numpy as np 4 | import pandas as pd 5 | from pandas.tseries import offsets 6 | from pandas.tseries.frequencies import to_offset 7 | 8 | 9 | class TimeFeature: 10 | def __init__(self): 11 | pass 12 | 13 | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: 14 | pass 15 | 16 | def __repr__(self): 17 | return self.__class__.__name__ + "()" 18 | 19 | 20 | class SecondOfMinute(TimeFeature): 21 | """Minute of hour encoded as value between [-0.5, 0.5]""" 22 | 23 | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: 24 | return index.second / 59.0 - 0.5 25 | 26 | 27 | class MinuteOfHour(TimeFeature): 28 | """Minute of hour encoded as value between [-0.5, 0.5]""" 29 | 30 | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: 31 | return index.minute / 59.0 - 0.5 32 | 33 | 34 | class HourOfDay(TimeFeature): 35 | """Hour of day encoded as value between [-0.5, 0.5]""" 36 | 37 | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: 38 | return index.hour / 23.0 - 0.5 39 | 40 | 41 | class DayOfWeek(TimeFeature): 42 | """Hour of day encoded as value between [-0.5, 0.5]""" 43 | 44 | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: 45 | return index.dayofweek / 6.0 - 0.5 46 | 47 | 48 | class DayOfMonth(TimeFeature): 49 | """Day of month encoded as value between [-0.5, 0.5]""" 50 | 51 | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: 52 | return (index.day - 1) / 30.0 - 0.5 53 | 54 | 55 | class DayOfYear(TimeFeature): 56 | """Day of year encoded as value between [-0.5, 0.5]""" 57 | 58 | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: 59 | return (index.dayofyear - 1) / 365.0 - 0.5 60 | 61 | 62 | class MonthOfYear(TimeFeature): 63 | """Month of year encoded as value between [-0.5, 0.5]""" 64 | 65 | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: 66 | return (index.month - 1) / 11.0 - 0.5 67 | 68 | 69 | class WeekOfYear(TimeFeature): 70 | """Week of year encoded as value between [-0.5, 0.5]""" 71 | 72 | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: 73 | return (index.isocalendar().week - 1) / 52.0 - 0.5 74 | 75 | 76 | def time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]: 77 | """ 78 | Returns a list of time features that will be appropriate for the given frequency string. 79 | Parameters 80 | ---------- 81 | freq_str 82 | Frequency string of the form [multiple][granularity] such as "12H", "5min", "1D" etc. 83 | """ 84 | 85 | features_by_offsets = { 86 | offsets.YearEnd: [], 87 | offsets.QuarterEnd: [MonthOfYear], 88 | offsets.MonthEnd: [MonthOfYear], 89 | offsets.Week: [DayOfMonth, WeekOfYear], 90 | offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear], 91 | offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear], 92 | offsets.Hour: [HourOfDay, DayOfWeek, DayOfMonth, DayOfYear], 93 | offsets.Minute: [ 94 | MinuteOfHour, 95 | HourOfDay, 96 | DayOfWeek, 97 | DayOfMonth, 98 | DayOfYear, 99 | ], 100 | offsets.Second: [ 101 | SecondOfMinute, 102 | MinuteOfHour, 103 | HourOfDay, 104 | DayOfWeek, 105 | DayOfMonth, 106 | DayOfYear, 107 | ], 108 | } 109 | 110 | offset = to_offset(freq_str) 111 | 112 | for offset_type, feature_classes in features_by_offsets.items(): 113 | if isinstance(offset, offset_type): 114 | return [cls() for cls in feature_classes] 115 | 116 | supported_freq_msg = f""" 117 | Unsupported frequency {freq_str} 118 | The following frequencies are supported: 119 | Y - yearly 120 | alias: A 121 | M - monthly 122 | W - weekly 123 | D - daily 124 | B - business days 125 | H - hourly 126 | T - minutely 127 | alias: min 128 | S - secondly 129 | """ 130 | raise RuntimeError(supported_freq_msg) 131 | 132 | 133 | def time_features(dates, freq='h'): 134 | return np.vstack([feat(dates) for feat in time_features_from_frequency_str(freq)]) 135 | -------------------------------------------------------------------------------- /utils/tools.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import matplotlib.pyplot as plt 4 | 5 | plt.switch_backend('agg') 6 | 7 | 8 | def adjust_learning_rate(optimizer, epoch, args): 9 | # lr = args.learning_rate * (0.2 ** (epoch // 2)) 10 | if args.lradj == 'type1': 11 | lr_adjust = {epoch: args.learning_rate * (0.5 ** ((epoch - 1) // 1))} 12 | elif args.lradj == 'type2': 13 | lr_adjust = { 14 | 2: 5e-5, 4: 1e-5, 6: 5e-6, 8: 1e-6, 15 | 10: 5e-7, 15: 1e-7, 20: 5e-8 16 | } 17 | if epoch in lr_adjust.keys(): 18 | lr = lr_adjust[epoch] 19 | for param_group in optimizer.param_groups: 20 | param_group['lr'] = lr 21 | print('Updating learning rate to {}'.format(lr)) 22 | 23 | 24 | class EarlyStopping: 25 | def __init__(self, patience=7, verbose=False, delta=0): 26 | self.patience = patience 27 | self.verbose = verbose 28 | self.counter = 0 29 | self.best_score = None 30 | self.early_stop = False 31 | self.val_loss_min = np.Inf 32 | self.delta = delta 33 | 34 | def __call__(self, val_loss, model, path): 35 | score = -val_loss 36 | if self.best_score is None: 37 | self.best_score = score 38 | self.save_checkpoint(val_loss, model, path) 39 | elif score < self.best_score + self.delta: 40 | self.counter += 1 41 | print(f'EarlyStopping counter: {self.counter} out of {self.patience}') 42 | if self.counter >= self.patience: 43 | self.early_stop = True 44 | else: 45 | self.best_score = score 46 | self.save_checkpoint(val_loss, model, path) 47 | self.counter = 0 48 | 49 | def save_checkpoint(self, val_loss, model, path): 50 | if self.verbose: 51 | print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...') 52 | torch.save(model.state_dict(), path + '/' + 'checkpoint.pth') 53 | self.val_loss_min = val_loss 54 | 55 | 56 | class dotdict(dict): 57 | """dot.notation access to dictionary attributes""" 58 | __getattr__ = dict.get 59 | __setattr__ = dict.__setitem__ 60 | __delattr__ = dict.__delitem__ 61 | 62 | 63 | class StandardScaler(): 64 | def __init__(self, mean, std): 65 | self.mean = mean 66 | self.std = std 67 | 68 | def transform(self, data): 69 | return (data - self.mean) / self.std 70 | 71 | def inverse_transform(self, data): 72 | return (data * self.std) + self.mean 73 | 74 | 75 | def visual(true, preds=None, name='./pic/test.pdf'): 76 | """ 77 | Results visualization 78 | """ 79 | plt.figure() 80 | plt.plot(true, label='GroundTruth', linewidth=2) 81 | if preds is not None: 82 | plt.plot(preds, label='Prediction', linewidth=2) 83 | plt.legend() 84 | plt.savefig(name, bbox_inches='tight') 85 | --------------------------------------------------------------------------------