├── utils
├── __init__.py
├── initialize.py
├── metric.py
├── common_tools.py
└── data_convert.py
├── src
├── model
│ ├── __init__.py
│ ├── replay.py
│ ├── gcn_conv.py
│ ├── ewc.py
│ └── detect_default.py
├── dataer
│ └── SpatioTemporalDataset.py
└── trainer
│ ├── stkec_trainer.py
│ └── default_trainer.py
├── framework.jpg
├── STRAP_POSTER.pdf
├── font
└── Times New Roman.ttf
├── .gitignore
├── conf
├── AIR
│ ├── retrain.json
│ ├── rap.json
│ ├── pre.json
│ ├── ragraph.json
│ ├── stlora.json
│ ├── eac.json
│ ├── graphpro.json
│ ├── stadapter.json
│ ├── ewc.json
│ ├── replay.json
│ ├── trafficstream.json
│ ├── pecpm.json
│ └── stkec.json
├── PEMS
│ ├── retrain.json
│ ├── rap.json
│ ├── stlora.json
│ ├── eac.json
│ ├── graphpro.json
│ ├── stadapter.json
│ ├── pre.json
│ ├── replay.json
│ ├── ewc.json
│ ├── trafficstream.json
│ ├── pecpm.json
│ └── stkec.json
└── ENERGY-Wind
│ ├── retrain.json
│ ├── rap.json
│ ├── stlora.json
│ ├── eac.json
│ ├── pre.json
│ ├── ragraph.json
│ ├── graphpro.json
│ ├── stadapter.json
│ ├── ewc.json
│ ├── replay.json
│ ├── trafficstream.json
│ ├── pecpm.json
│ └── stkec.json
├── README.md
├── log
└── ENERGY-Wind
│ ├── pre_st-24
│ └── pre_st.log
│ ├── pre_st-100
│ └── pre_st.log
│ ├── pre_st-622
│ └── pre_st.log
│ ├── retrain_st-100
│ └── retrain_st.log
│ ├── eac_st-24
│ └── eac_st.log
│ ├── stlora_st-622
│ └── stlora_st.log
│ ├── pecpm_st-622
│ └── pecpm_st.log
│ ├── retrain_st-622
│ └── retrain_st.log
│ ├── eac_st-100
│ └── eac_st.log
│ ├── stadapter_st-622
│ └── stadapter_st.log
│ ├── stlora_st-24
│ └── stlora_st.log
│ ├── graphpro_st-24
│ └── graphpro_st.log
│ └── replay_st-24
│ └── replay_st.log
└── environment.yaml
/utils/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/model/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/framework.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HoweyZ/STRAP/HEAD/framework.jpg
--------------------------------------------------------------------------------
/STRAP_POSTER.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HoweyZ/STRAP/HEAD/STRAP_POSTER.pdf
--------------------------------------------------------------------------------
/font/Times New Roman.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HoweyZ/STRAP/HEAD/font/Times New Roman.ttf
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.csv
2 | *.pdf
3 | data/
4 |
5 | # 忽略所有 `.log` 文件
6 | tmp.*
7 | repo.tar.gz
8 | *.backup
9 |
10 | # 忽略所有路径下的 Python 缓存文件
11 | **/__pycache__/
12 | *.py[cod]
13 | *$py.class
14 |
15 | .env
16 |
17 | checkpoints/
18 | experiments/
19 | archive/
20 |
21 |
22 | *.pkl
--------------------------------------------------------------------------------
/src/model/replay.py:
--------------------------------------------------------------------------------
1 | import random
2 | import numpy as np
3 |
4 |
5 | def replay_node_selection(args, influence_node_score, topk):
6 | if args.replay_strategy == 'random':
7 | return random_sampling(len(influence_node_score), topk)
8 | elif args.replay_strategy == 'inforeplay':
9 | return np.argpartition(np.asarray(influence_node_score), topk)[:topk]
10 | else:
11 | args.logger.info("repaly node selection mode illegal!")
12 |
13 | def random_sampling(data_size, num_samples):
14 | return np.random.choice(data_size, num_samples)
15 |
--------------------------------------------------------------------------------
/conf/AIR/retrain.json:
--------------------------------------------------------------------------------
1 | {
2 | "begin_year": 2016,
3 | "end_year": 2019,
4 | "dropout": 0.0,
5 | "lr": 0.03,
6 | "batch_size": 128,
7 | "epoch": 100,
8 | "gpuid": 1,
9 | "loss": "mse",
10 | "activation": "relu",
11 | "scheduler": "epo",
12 | "y_len": 12,
13 | "x_len": 12,
14 | "data_process": 0,
15 | "raw_data_path": "data/AIR/RawData/",
16 | "save_data_path": "data/AIR/FastData/",
17 | "graph_path": "data/AIR/graph/",
18 | "model_path": "log/AIR/",
19 | "gcn":{
20 | "in_channel": 12,
21 | "out_channel": 12,
22 | "hidden_channel": 64
23 | },
24 |
25 | "tcn":{
26 | "in_channel": 1,
27 | "out_channel": 1,
28 | "kernel_size": 3,
29 | "dilation": 1
30 | },
31 |
32 |
33 |
34 | "init": false,
35 | "train": 1,
36 | "auto_test": 0,
37 |
38 | "strategy": "retrain",
39 |
40 | "detect": false,
41 |
42 | "ewc": false,
43 |
44 | "replay": false
45 | }
--------------------------------------------------------------------------------
/conf/AIR/rap.json:
--------------------------------------------------------------------------------
1 | {
2 | "begin_year": 2016,
3 | "end_year": 2019,
4 | "dropout": 0,
5 | "lr": 0.03,
6 | "batch_size": 128,
7 | "epoch": 100,
8 | "gpuid": 1,
9 | "loss": "mse",
10 | "activation": "relu",
11 | "scheduler": "epo",
12 | "y_len": 12,
13 | "x_len": 12,
14 | "data_process": 0,
15 | "raw_data_path": "data/AIR/RawData/",
16 | "save_data_path": "data/AIR/FastData/",
17 | "graph_path": "data/AIR/graph/",
18 | "model_path": "log/AIR/",
19 | "gcn":{
20 | "in_channel": 12,
21 | "out_channel": 12,
22 | "hidden_channel": 64
23 | },
24 |
25 | "tcn":{
26 | "in_channel": 1,
27 | "out_channel": 1,
28 | "kernel_size": 3,
29 | "dilation": 1
30 | },
31 |
32 | "method": "RAP",
33 |
34 | "init": true,
35 | "train": 1,
36 | "auto_test": 0,
37 |
38 | "strategy": "retrain",
39 |
40 | "detect": false,
41 |
42 | "ewc": false,
43 |
44 | "replay": false
45 | }
--------------------------------------------------------------------------------
/conf/AIR/pre.json:
--------------------------------------------------------------------------------
1 | {
2 | "begin_year": 2016,
3 | "end_year": 2019,
4 | "dropout": 0,
5 | "lr": 0.03,
6 | "batch_size": 128,
7 | "epoch": 100,
8 | "gpuid": 1,
9 | "loss": "mse",
10 | "activation": "relu",
11 | "scheduler": "epo",
12 | "y_len": 12,
13 | "x_len": 12,
14 | "data_process": 0,
15 |
16 | "raw_data_path": "data/AIR/RawData/",
17 | "save_data_path": "data/AIR/FastData/",
18 | "graph_path": "data/AIR/graph/",
19 | "model_path": "log/AIR/",
20 | "gcn":{
21 | "in_channel": 12,
22 | "out_channel": 12,
23 | "hidden_channel": 64
24 | },
25 |
26 | "tcn":{
27 | "in_channel": 1,
28 | "out_channel": 1,
29 | "kernel_size": 3,
30 | "dilation": 1
31 | },
32 |
33 |
34 | "init": false,
35 | "train": 0,
36 | "auto_test": 1,
37 |
38 |
39 | "strategy": "pretrain",
40 |
41 | "detect": false,
42 |
43 | "ewc": false,
44 |
45 | "replay": false
46 | }
47 |
--------------------------------------------------------------------------------
/conf/PEMS/retrain.json:
--------------------------------------------------------------------------------
1 | {
2 | "begin_year": 2011,
3 | "end_year": 2017,
4 | "dropout": 0.0,
5 | "lr": 0.03,
6 | "batch_size": 128,
7 | "epoch": 100,
8 | "gpuid": 1,
9 | "loss": "mse",
10 | "activation": "relu",
11 | "scheduler": "epo",
12 | "y_len": 12,
13 | "x_len": 12,
14 | "data_process": 0,
15 |
16 | "raw_data_path": "data/PEMS/RawData/",
17 | "save_data_path": "data/PEMS/FastData/",
18 | "graph_path": "data/PEMS/graph/",
19 | "model_path": "log/PEMS/",
20 | "gcn":{
21 | "in_channel": 12,
22 | "out_channel": 12,
23 | "hidden_channel": 64
24 | },
25 |
26 | "tcn":{
27 | "in_channel": 1,
28 | "out_channel": 1,
29 | "kernel_size": 3,
30 | "dilation": 1
31 | },
32 |
33 |
34 |
35 | "init": false,
36 | "train": 1,
37 | "auto_test": 0,
38 |
39 | "strategy": "retrain",
40 |
41 | "detect": false,
42 |
43 | "ewc": false,
44 |
45 | "replay": false
46 | }
--------------------------------------------------------------------------------
/conf/AIR/ragraph.json:
--------------------------------------------------------------------------------
1 | {
2 | "begin_year": 2016,
3 | "end_year": 2019,
4 | "dropout": 0.0,
5 | "lr": 0.03,
6 | "batch_size": 128,
7 | "epoch": 100,
8 | "gpuid": 1,
9 | "loss": "mse",
10 | "activation": "relu",
11 | "scheduler": "epo",
12 | "y_len": 12,
13 | "x_len": 12,
14 | "data_process": 0,
15 | "raw_data_path": "data/AIR/RawData/",
16 | "save_data_path": "data/AIR/FastData/",
17 | "graph_path": "data/AIR/graph/",
18 | "model_path": "log/AIR/",
19 | "gcn":{
20 | "in_channel": 12,
21 | "out_channel": 12,
22 | "hidden_channel": 64
23 | },
24 |
25 | "tcn":{
26 | "in_channel": 1,
27 | "out_channel": 1,
28 | "kernel_size": 3,
29 | "dilation": 1
30 | },
31 |
32 | "method": "RAGraph",
33 |
34 |
35 | "init": true,
36 | "train": 1,
37 | "auto_test": 0,
38 |
39 | "strategy": "retrain",
40 |
41 | "detect": false,
42 |
43 | "ewc": false,
44 |
45 | "replay": false
46 | }
--------------------------------------------------------------------------------
/conf/AIR/stlora.json:
--------------------------------------------------------------------------------
1 | {
2 | "begin_year": 2016,
3 | "end_year": 2019,
4 | "dropout": 0.0,
5 | "lr": 0.03,
6 | "batch_size": 128,
7 | "epoch": 100,
8 | "gpuid": 1,
9 | "loss": "mse",
10 | "activation": "relu",
11 | "scheduler": "epo",
12 | "y_len": 12,
13 | "x_len": 12,
14 | "data_process": 0,
15 | "raw_data_path": "data/AIR/RawData/",
16 | "save_data_path": "data/AIR/FastData/",
17 | "graph_path": "data/AIR/graph/",
18 | "model_path": "log/AIR/",
19 | "gcn":{
20 | "in_channel": 12,
21 | "out_channel": 12,
22 | "hidden_channel": 64
23 | },
24 |
25 | "tcn":{
26 | "in_channel": 1,
27 | "out_channel": 1,
28 | "kernel_size": 3,
29 | "dilation": 1
30 | },
31 |
32 | "method": "STLoRA",
33 |
34 |
35 | "init": true,
36 | "train": 1,
37 | "auto_test": 0,
38 |
39 | "strategy": "retrain",
40 |
41 | "detect": false,
42 |
43 | "ewc": false,
44 |
45 | "replay": false
46 | }
--------------------------------------------------------------------------------
/conf/PEMS/rap.json:
--------------------------------------------------------------------------------
1 | {
2 | "begin_year": 2011,
3 | "end_year": 2017,
4 | "dropout": 0,
5 | "lr": 0.03,
6 | "batch_size": 128,
7 | "epoch": 100,
8 | "gpuid": 1,
9 | "loss": "mse",
10 | "activation": "gelu",
11 | "scheduler": "epo",
12 | "y_len": 12,
13 | "x_len": 12,
14 | "data_process": 0,
15 | "raw_data_path": "data/PEMS/RawData/",
16 | "save_data_path": "data/PEMS/FastData/",
17 | "graph_path": "data/PEMS/graph/",
18 | "model_path": "log/PEMS/",
19 | "gcn":{
20 | "in_channel": 12,
21 | "out_channel": 12,
22 | "hidden_channel": 64
23 | },
24 |
25 | "tcn":{
26 | "in_channel": 1,
27 | "out_channel": 1,
28 | "kernel_size": 3,
29 | "dilation": 1
30 | },
31 |
32 | "method": "RAP",
33 |
34 |
35 | "init": true,
36 | "train": 1,
37 | "auto_test": 0,
38 |
39 | "strategy": "retrain",
40 |
41 | "detect": false,
42 |
43 | "ewc": false,
44 |
45 | "replay": false
46 | }
--------------------------------------------------------------------------------
/conf/AIR/eac.json:
--------------------------------------------------------------------------------
1 | {
2 | "begin_year": 2016,
3 | "end_year": 2019,
4 | "dropout": 0.0,
5 | "lr": 0.03,
6 | "batch_size": 128,
7 | "epoch": 100,
8 | "gpuid": 1,
9 | "loss": "mse",
10 | "activation": "relu",
11 | "scheduler": "epo",
12 | "y_len": 12,
13 | "x_len": 12,
14 | "data_process": 0,
15 | "raw_data_path": "data/AIR/RawData/",
16 | "save_data_path": "data/AIR/FastData/",
17 | "graph_path": "data/AIR/graph/",
18 | "model_path": "log/AIR/",
19 | "gcn":{
20 | "in_channel": 12,
21 | "out_channel": 12,
22 | "hidden_channel": 64
23 | },
24 |
25 | "tcn":{
26 | "in_channel": 1,
27 | "out_channel": 1,
28 | "kernel_size": 3,
29 | "dilation": 1
30 | },
31 |
32 | "method": "EAC",
33 |
34 | "rank": 6,
35 | "init": true,
36 | "train": 1,
37 | "auto_test": 0,
38 |
39 | "strategy": "retrain",
40 |
41 | "detect": false,
42 |
43 | "ewc": false,
44 |
45 | "replay": false
46 | }
--------------------------------------------------------------------------------
/conf/AIR/graphpro.json:
--------------------------------------------------------------------------------
1 | {
2 | "begin_year": 2016,
3 | "end_year": 2019,
4 | "dropout": 0.0,
5 | "lr": 0.03,
6 | "batch_size": 128,
7 | "epoch": 100,
8 | "gpuid": 1,
9 | "loss": "mse",
10 | "activation": "relu",
11 | "scheduler": "epo",
12 | "y_len": 12,
13 | "x_len": 12,
14 | "data_process": 0,
15 | "raw_data_path": "data/AIR/RawData/",
16 | "save_data_path": "data/AIR/FastData/",
17 | "graph_path": "data/AIR/graph/",
18 | "model_path": "log/AIR/",
19 | "gcn":{
20 | "in_channel": 12,
21 | "out_channel": 12,
22 | "hidden_channel": 64
23 | },
24 |
25 | "tcn":{
26 | "in_channel": 1,
27 | "out_channel": 1,
28 | "kernel_size": 3,
29 | "dilation": 1
30 | },
31 |
32 | "method": "GraphPro",
33 |
34 |
35 | "init": true,
36 | "train": 1,
37 | "auto_test": 0,
38 |
39 | "strategy": "retrain",
40 |
41 | "detect": false,
42 |
43 | "ewc": false,
44 |
45 | "replay": false
46 | }
--------------------------------------------------------------------------------
/conf/PEMS/stlora.json:
--------------------------------------------------------------------------------
1 | {
2 | "begin_year": 2011,
3 | "end_year": 2017,
4 | "dropout": 0.0,
5 | "lr": 0.03,
6 | "batch_size": 128,
7 | "epoch": 100,
8 | "gpuid": 1,
9 | "loss": "mse",
10 | "activation": "relu",
11 | "scheduler": "epo",
12 | "y_len": 12,
13 | "x_len": 12,
14 | "data_process": 0,
15 | "raw_data_path": "data/PEMS/RawData/",
16 | "save_data_path": "data/PEMS/FastData/",
17 | "graph_path": "data/PEMS/graph/",
18 | "model_path": "log/PEMS/",
19 | "gcn":{
20 | "in_channel": 12,
21 | "out_channel": 12,
22 | "hidden_channel": 64
23 | },
24 |
25 | "tcn":{
26 | "in_channel": 1,
27 | "out_channel": 1,
28 | "kernel_size": 3,
29 | "dilation": 1
30 | },
31 |
32 | "method": "STLoRA",
33 |
34 |
35 | "init": true,
36 | "train": 1,
37 | "auto_test": 0,
38 |
39 | "strategy": "retrain",
40 |
41 | "detect": false,
42 |
43 | "ewc": false,
44 |
45 | "replay": false
46 | }
--------------------------------------------------------------------------------
/conf/AIR/stadapter.json:
--------------------------------------------------------------------------------
1 | {
2 | "begin_year": 2016,
3 | "end_year": 2019,
4 | "dropout": 0.0,
5 | "lr": 0.03,
6 | "batch_size": 128,
7 | "epoch": 100,
8 | "gpuid": 1,
9 | "loss": "mse",
10 | "activation": "relu",
11 | "scheduler": "epo",
12 | "y_len": 12,
13 | "x_len": 12,
14 | "data_process": 0,
15 | "raw_data_path": "data/AIR/RawData/",
16 | "save_data_path": "data/AIR/FastData/",
17 | "graph_path": "data/AIR/graph/",
18 | "model_path": "log/AIR/",
19 | "gcn":{
20 | "in_channel": 12,
21 | "out_channel": 12,
22 | "hidden_channel": 64
23 | },
24 |
25 | "tcn":{
26 | "in_channel": 1,
27 | "out_channel": 1,
28 | "kernel_size": 3,
29 | "dilation": 1
30 | },
31 |
32 | "method": "ST-Adapter",
33 |
34 |
35 | "init": true,
36 | "train": 1,
37 | "auto_test": 0,
38 |
39 | "strategy": "retrain",
40 |
41 | "detect": false,
42 |
43 | "ewc": false,
44 |
45 | "replay": false
46 | }
--------------------------------------------------------------------------------
/conf/PEMS/eac.json:
--------------------------------------------------------------------------------
1 | {
2 | "begin_year": 2011,
3 | "end_year": 2017,
4 | "dropout": 0,
5 | "lr": 0.03,
6 | "batch_size": 128,
7 | "epoch": 100,
8 | "gpuid": 1,
9 | "loss": "mse",
10 | "activation": "relu",
11 | "scheduler": "epo",
12 | "y_len": 12,
13 | "x_len": 12,
14 | "data_process": 0,
15 | "raw_data_path": "data/PEMS/RawData/",
16 | "save_data_path": "data/PEMS/FastData/",
17 | "graph_path": "data/PEMS/graph/",
18 | "model_path": "log/PEMS/",
19 | "gcn":{
20 | "in_channel": 12,
21 | "out_channel": 12,
22 | "hidden_channel": 64
23 | },
24 |
25 | "tcn":{
26 | "in_channel": 1,
27 | "out_channel": 1,
28 | "kernel_size": 3,
29 | "dilation": 1
30 | },
31 |
32 | "method": "EAC",
33 |
34 | "rank": 6,
35 | "init": true,
36 | "train": 1,
37 | "auto_test": 0,
38 |
39 | "strategy": "retrain",
40 |
41 | "detect": false,
42 |
43 | "ewc": false,
44 |
45 | "replay": false
46 | }
--------------------------------------------------------------------------------
/conf/PEMS/graphpro.json:
--------------------------------------------------------------------------------
1 | {
2 | "begin_year": 2011,
3 | "end_year": 2017,
4 | "dropout": 0.0,
5 | "lr": 0.03,
6 | "batch_size": 128,
7 | "epoch": 100,
8 | "gpuid": 1,
9 | "loss": "mse",
10 | "activation": "relu",
11 | "scheduler": "epo",
12 | "y_len": 12,
13 | "x_len": 12,
14 | "data_process": 0,
15 | "raw_data_path": "data/PEMS/RawData/",
16 | "save_data_path": "data/PEMS/FastData/",
17 | "graph_path": "data/PEMS/graph/",
18 | "model_path": "log/PEMS/",
19 | "gcn":{
20 | "in_channel": 12,
21 | "out_channel": 12,
22 | "hidden_channel": 64
23 | },
24 |
25 | "tcn":{
26 | "in_channel": 1,
27 | "out_channel": 1,
28 | "kernel_size": 3,
29 | "dilation": 1
30 | },
31 |
32 | "method": "GraphPro",
33 |
34 |
35 | "init": true,
36 | "train": 1,
37 | "auto_test": 0,
38 |
39 | "strategy": "retrain",
40 |
41 | "detect": false,
42 |
43 | "ewc": false,
44 |
45 | "replay": false
46 | }
--------------------------------------------------------------------------------
/conf/PEMS/stadapter.json:
--------------------------------------------------------------------------------
1 | {
2 | "begin_year": 2011,
3 | "end_year": 2017,
4 | "dropout": 0.0,
5 | "lr": 0.03,
6 | "batch_size": 128,
7 | "epoch": 100,
8 | "gpuid": 1,
9 | "loss": "mse",
10 | "activation": "relu",
11 | "scheduler": "epo",
12 | "y_len": 12,
13 | "x_len": 12,
14 | "data_process": 0,
15 | "raw_data_path": "data/PEMS/RawData/",
16 | "save_data_path": "data/PEMS/FastData/",
17 | "graph_path": "data/PEMS/graph/",
18 | "model_path": "log/PEMS/",
19 | "gcn":{
20 | "in_channel": 12,
21 | "out_channel": 12,
22 | "hidden_channel": 64
23 | },
24 |
25 | "tcn":{
26 | "in_channel": 1,
27 | "out_channel": 1,
28 | "kernel_size": 3,
29 | "dilation": 1
30 | },
31 |
32 | "method": "ST-Adapter",
33 |
34 |
35 | "init": true,
36 | "train": 1,
37 | "auto_test": 0,
38 |
39 | "strategy": "retrain",
40 |
41 | "detect": false,
42 |
43 | "ewc": false,
44 |
45 | "replay": false
46 | }
--------------------------------------------------------------------------------
/conf/ENERGY-Wind/retrain.json:
--------------------------------------------------------------------------------
1 | {
2 | "begin_year": 0,
3 | "end_year": 3,
4 | "dropout": 0.0,
5 | "lr": 0.03,
6 | "batch_size": 128,
7 | "epoch": 100,
8 | "gpuid": 1,
9 | "loss": "mse",
10 | "activation": "relu",
11 | "scheduler": "epo",
12 | "y_len": 12,
13 | "x_len": 12,
14 | "data_process": 0,
15 |
16 | "raw_data_path": "data/ENERGY-Wind/RawData/",
17 | "save_data_path": "data/ENERGY-Wind/FastData/",
18 | "graph_path": "data/ENERGY-Wind/graph/",
19 | "model_path": "log/ENERGY-Wind/",
20 | "gcn":{
21 | "in_channel": 12,
22 | "out_channel": 12,
23 | "hidden_channel": 64
24 | },
25 |
26 | "tcn":{
27 | "in_channel": 1,
28 | "out_channel": 1,
29 | "kernel_size": 3,
30 | "dilation": 1
31 | },
32 |
33 |
34 |
35 | "init": false,
36 | "train": 1,
37 | "auto_test": 0,
38 |
39 | "strategy": "retrain",
40 |
41 | "detect": false,
42 |
43 | "ewc": false,
44 |
45 | "replay": false
46 | }
--------------------------------------------------------------------------------
/conf/PEMS/pre.json:
--------------------------------------------------------------------------------
1 | {
2 | "begin_year": 2011,
3 | "end_year": 2017,
4 | "dropout": 0.1,
5 | "lr": 0.01,
6 | "batch_size": 128,
7 | "epoch": 100,
8 | "gpuid": 1,
9 | "loss": "mse",
10 | "activation": "relu",
11 | "scheduler": "epo",
12 | "y_len": 12,
13 | "x_len": 12,
14 | "data_process": 0,
15 |
16 | "raw_data_path": "data/PEMS/RawData/",
17 | "save_data_path": "data/PEMS/FastData/",
18 | "graph_path": "data/PEMS/graph/",
19 | "model_path": "log/PEMS/",
20 | "gcn":{
21 | "in_channel": 12,
22 | "out_channel": 12,
23 | "hidden_channel": 64
24 | },
25 |
26 | "tcn":{
27 | "in_channel": 1,
28 | "out_channel": 1,
29 | "kernel_size": 3,
30 | "dilation": 1
31 | },
32 |
33 |
34 |
35 |
36 | "init": false,
37 | "train": 0,
38 | "auto_test": 1,
39 |
40 |
41 | "strategy": "pretrain",
42 |
43 | "detect": false,
44 |
45 | "ewc": false,
46 |
47 | "replay": false
48 | }
49 |
--------------------------------------------------------------------------------
/conf/ENERGY-Wind/rap.json:
--------------------------------------------------------------------------------
1 | {
2 | "begin_year": 0,
3 | "end_year": 3,
4 | "dropout": 0.1,
5 | "lr": 0.03,
6 | "batch_size": 128,
7 | "epoch": 100,
8 | "gpuid": 1,
9 | "loss": "mse",
10 | "activation": "relu",
11 | "scheduler": "epo",
12 | "y_len": 12,
13 | "x_len": 12,
14 | "data_process": 0,
15 | "raw_data_path": "data/ENERGY-Wind/RawData/",
16 | "save_data_path": "data/ENERGY-Wind/FastData/",
17 | "graph_path": "data/ENERGY-Wind/graph/",
18 | "model_path": "log/ENERGY-Wind/",
19 | "gcn":{
20 | "in_channel": 12,
21 | "out_channel": 12,
22 | "hidden_channel": 64
23 | },
24 |
25 | "tcn":{
26 | "in_channel": 1,
27 | "out_channel": 1,
28 | "kernel_size": 3,
29 | "dilation": 1
30 | },
31 |
32 | "method": "RAP",
33 |
34 | "init": true,
35 | "train": 1,
36 | "auto_test": 0,
37 |
38 | "strategy": "retrain",
39 |
40 | "detect": false,
41 |
42 | "ewc": false,
43 |
44 | "replay": false
45 | }
--------------------------------------------------------------------------------
/conf/ENERGY-Wind/stlora.json:
--------------------------------------------------------------------------------
1 | {
2 | "begin_year": 0,
3 | "end_year": 3,
4 | "dropout": 0.0,
5 | "lr": 0.03,
6 | "batch_size": 128,
7 | "epoch": 100,
8 | "gpuid": 1,
9 | "loss": "mse",
10 | "activation": "relu",
11 | "scheduler": "epo",
12 | "y_len": 12,
13 | "x_len": 12,
14 | "data_process": 0,
15 | "raw_data_path": "data/ENERGY-Wind/RawData/",
16 | "save_data_path": "data/ENERGY-Wind/FastData/",
17 | "graph_path": "data/ENERGY-Wind/graph/",
18 | "model_path": "log/ENERGY-Wind/",
19 | "gcn":{
20 | "in_channel": 12,
21 | "out_channel": 12,
22 | "hidden_channel": 64
23 | },
24 |
25 | "tcn":{
26 | "in_channel": 1,
27 | "out_channel": 1,
28 | "kernel_size": 3,
29 | "dilation": 1
30 | },
31 |
32 | "method": "STLoRA",
33 |
34 |
35 | "init": true,
36 | "train": 1,
37 | "auto_test": 0,
38 |
39 | "strategy": "retrain",
40 |
41 | "detect": false,
42 |
43 | "ewc": false,
44 |
45 | "replay": false
46 | }
--------------------------------------------------------------------------------
/conf/ENERGY-Wind/eac.json:
--------------------------------------------------------------------------------
1 | {
2 | "begin_year": 0,
3 | "end_year": 3,
4 | "dropout": 0.0,
5 | "lr": 0.03,
6 | "batch_size": 128,
7 | "epoch": 100,
8 | "gpuid": 1,
9 | "loss": "mse",
10 | "activation": "relu",
11 | "scheduler": "epo",
12 | "y_len": 12,
13 | "x_len": 12,
14 | "data_process": 0,
15 | "raw_data_path": "data/ENERGY-Wind/RawData/",
16 | "save_data_path": "data/ENERGY-Wind/FastData/",
17 | "graph_path": "data/ENERGY-Wind/graph/",
18 | "model_path": "log/ENERGY-Wind/",
19 | "gcn":{
20 | "in_channel": 12,
21 | "out_channel": 12,
22 | "hidden_channel": 64
23 | },
24 |
25 | "tcn":{
26 | "in_channel": 1,
27 | "out_channel": 1,
28 | "kernel_size": 3,
29 | "dilation": 1
30 | },
31 |
32 | "method": "EAC",
33 |
34 | "rank": 6,
35 | "init": true,
36 | "train": 1,
37 | "auto_test": 0,
38 |
39 | "strategy": "retrain",
40 |
41 | "detect": false,
42 |
43 | "ewc": false,
44 |
45 | "replay": false
46 | }
--------------------------------------------------------------------------------
/conf/ENERGY-Wind/pre.json:
--------------------------------------------------------------------------------
1 | {
2 | "begin_year": 0,
3 | "end_year": 3,
4 | "dropout": 0,
5 | "lr": 0.03,
6 | "batch_size": 128,
7 | "epoch": 100,
8 | "gpuid": 1,
9 | "loss": "mse",
10 | "activation": "relu",
11 | "scheduler": "epo",
12 | "y_len": 12,
13 | "x_len": 12,
14 | "data_process": 0,
15 |
16 | "raw_data_path": "data/ENERGY-Wind/RawData/",
17 | "save_data_path": "data/ENERGY-Wind/FastData/",
18 | "graph_path": "data/ENERGY-Wind/graph/",
19 | "model_path": "log/ENERGY-Wind/",
20 | "gcn":{
21 | "in_channel": 12,
22 | "out_channel": 12,
23 | "hidden_channel": 64
24 | },
25 |
26 | "tcn":{
27 | "in_channel": 1,
28 | "out_channel": 1,
29 | "kernel_size": 3,
30 | "dilation": 1
31 | },
32 |
33 |
34 |
35 |
36 | "init": false,
37 | "train": 0,
38 | "auto_test": 1,
39 |
40 |
41 | "strategy": "pretrain",
42 |
43 | "detect": false,
44 |
45 | "ewc": false,
46 |
47 | "replay": false
48 | }
49 |
--------------------------------------------------------------------------------
/conf/ENERGY-Wind/ragraph.json:
--------------------------------------------------------------------------------
1 | {
2 | "begin_year": 0,
3 | "end_year": 3,
4 | "dropout": 0.0,
5 | "lr": 0.03,
6 | "batch_size": 128,
7 | "epoch": 100,
8 | "gpuid": 1,
9 | "loss": "mse",
10 | "activation": "relu",
11 | "scheduler": "epo",
12 | "y_len": 12,
13 | "x_len": 12,
14 | "data_process": 0,
15 |
16 | "raw_data_path": "data/ENERGY-Wind/RawData/",
17 | "save_data_path": "data/ENERGY-Wind/FastData/",
18 | "graph_path": "data/ENERGY-Wind/graph/",
19 | "model_path": "log/ENERGY-Wind/",
20 | "gcn":{
21 | "in_channel": 12,
22 | "out_channel": 12,
23 | "hidden_channel": 64
24 | },
25 |
26 | "tcn":{
27 | "in_channel": 1,
28 | "out_channel": 1,
29 | "kernel_size": 3,
30 | "dilation": 1
31 | },
32 |
33 | "method": "RAGraph",
34 |
35 |
36 | "init": true,
37 | "train": 1,
38 | "auto_test": 0,
39 |
40 | "strategy": "retrain",
41 |
42 | "detect": false,
43 |
44 | "ewc": false,
45 |
46 | "replay": false
47 | }
--------------------------------------------------------------------------------
/conf/ENERGY-Wind/graphpro.json:
--------------------------------------------------------------------------------
1 | {
2 | "begin_year": 0,
3 | "end_year": 3,
4 | "dropout": 0.0,
5 | "lr": 0.03,
6 | "batch_size": 128,
7 | "epoch": 100,
8 | "gpuid": 1,
9 | "loss": "mse",
10 | "activation": "relu",
11 | "scheduler": "epo",
12 | "y_len": 12,
13 | "x_len": 12,
14 | "data_process": 0,
15 |
16 | "raw_data_path": "data/ENERGY-Wind/RawData/",
17 | "save_data_path": "data/ENERGY-Wind/FastData/",
18 | "graph_path": "data/ENERGY-Wind/graph/",
19 | "model_path": "log/ENERGY-Wind/",
20 | "gcn":{
21 | "in_channel": 12,
22 | "out_channel": 12,
23 | "hidden_channel": 64
24 | },
25 |
26 | "tcn":{
27 | "in_channel": 1,
28 | "out_channel": 1,
29 | "kernel_size": 3,
30 | "dilation": 1
31 | },
32 |
33 | "method": "GraphPro",
34 |
35 |
36 | "init": true,
37 | "train": 1,
38 | "auto_test": 0,
39 |
40 | "strategy": "retrain",
41 |
42 | "detect": false,
43 |
44 | "ewc": false,
45 |
46 | "replay": false
47 | }
--------------------------------------------------------------------------------
/conf/ENERGY-Wind/stadapter.json:
--------------------------------------------------------------------------------
1 | {
2 | "begin_year": 0,
3 | "end_year": 3,
4 | "dropout": 0.0,
5 | "lr": 0.03,
6 | "batch_size": 128,
7 | "epoch": 100,
8 | "gpuid": 1,
9 | "loss": "mse",
10 | "activation": "relu",
11 | "scheduler": "epo",
12 | "y_len": 12,
13 | "x_len": 12,
14 | "data_process": 0,
15 |
16 | "raw_data_path": "data/ENERGY-Wind/RawData/",
17 | "save_data_path": "data/ENERGY-Wind/FastData/",
18 | "graph_path": "data/ENERGY-Wind/graph/",
19 | "model_path": "log/ENERGY-Wind/",
20 | "gcn":{
21 | "in_channel": 12,
22 | "out_channel": 12,
23 | "hidden_channel": 64
24 | },
25 |
26 | "tcn":{
27 | "in_channel": 1,
28 | "out_channel": 1,
29 | "kernel_size": 3,
30 | "dilation": 1
31 | },
32 |
33 | "method": "ST-Adapter",
34 |
35 |
36 | "init": true,
37 | "train": 1,
38 | "auto_test": 0,
39 |
40 | "strategy": "retrain",
41 |
42 | "detect": false,
43 |
44 | "ewc": false,
45 |
46 | "replay": false
47 | }
48 |
--------------------------------------------------------------------------------
/src/dataer/SpatioTemporalDataset.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 | from torch_geometric.data import Data, Dataset
4 |
5 |
6 | class SpatioTemporalDataset(Dataset):
7 | def __init__(self, inputs, split, x='', y='', edge_index='', mode='default'):
8 | if mode == 'default':
9 | self.x = inputs[split+'_x'] # [T, Len, N]
10 | self.y = inputs[split+'_y'] # [T, Len, N]
11 | else:
12 | self.x = x
13 | self.y = y
14 |
15 | def __len__(self):
16 | return self.x.shape[0]
17 |
18 | def __getitem__(self, index):
19 | x = torch.Tensor(self.x[index].T)
20 | y = torch.Tensor(self.y[index].T)
21 | return Data(x=x, y=y) # Returns a Data object containing input features and targets, note that [batch, Node, Step] is converted to -> [batch * Node, Step]
22 |
23 | class continue_learning_Dataset(Dataset):
24 | def __init__(self, inputs):
25 | self.x = inputs # [T, Len, N]
26 |
27 | def __len__(self):
28 | return self.x.shape[0]
29 |
30 | def __getitem__(self, index):
31 | x = torch.Tensor(self.x[index].T)
32 | return Data(x=x)
--------------------------------------------------------------------------------
/conf/PEMS/replay.json:
--------------------------------------------------------------------------------
1 | {
2 | "begin_year": 2011,
3 | "end_year": 2017,
4 | "dropout": 0.0,
5 | "lr": 0.03,
6 | "batch_size": 128,
7 | "epoch": 100,
8 | "gpuid": 1,
9 | "loss": "mse",
10 | "activation": "relu",
11 | "scheduler": "epo",
12 | "y_len": 12,
13 | "x_len": 12,
14 | "data_process": 0,
15 |
16 | "raw_data_path": "data/PEMS/RawData/",
17 | "save_data_path": "data/PEMS/FastData/",
18 | "graph_path": "data/PEMS/graph/",
19 | "model_path": "log/PEMS/",
20 | "gcn":{
21 | "in_channel": 12,
22 | "out_channel": 12,
23 | "hidden_channel": 64
24 | },
25 |
26 | "tcn":{
27 | "in_channel": 1,
28 | "out_channel": 1,
29 | "kernel_size": 3,
30 | "dilation": 1
31 | },
32 |
33 | "init": true,
34 | "train": 1,
35 | "auto_test": 0,
36 | "method":"TrafficStream",
37 |
38 | "strategy": "incremental",
39 | "increase": true,
40 | "num_hops": 2,
41 |
42 | "detect": false,
43 | "detect_strategy": "feature",
44 |
45 | "ewc": false,
46 | "ewc_strategy": "ewc",
47 | "ewc_lambda": 0.0001,
48 |
49 | "replay": true,
50 | "replay_strategy": "inforeplay"
51 | }
52 |
--------------------------------------------------------------------------------
/conf/AIR/ewc.json:
--------------------------------------------------------------------------------
1 | {
2 | "begin_year": 2016,
3 | "end_year": 2019,
4 | "dropout": 0.0,
5 | "lr": 0.01,
6 | "batch_size": 128,
7 | "epoch": 100,
8 | "gpuid": 1,
9 | "loss": "mse",
10 | "activation": "relu",
11 | "scheduler": "epo",
12 | "y_len": 12,
13 | "x_len": 12,
14 | "data_process": 0,
15 |
16 | "raw_data_path": "data/AIR/RawData/",
17 | "save_data_path": "data/AIR/FastData/",
18 | "graph_path": "data/AIR/graph/",
19 | "model_path": "log/AIR/",
20 | "gcn":{
21 | "in_channel": 12,
22 | "out_channel": 12,
23 | "hidden_channel": 64
24 | },
25 |
26 | "tcn":{
27 | "in_channel": 1,
28 | "out_channel": 1,
29 | "kernel_size": 3,
30 | "dilation": 1
31 | },
32 |
33 | "method": "TrafficStream",
34 |
35 | "init": true,
36 | "train": 1,
37 | "auto_test": 0,
38 |
39 |
40 | "strategy": "incremental",
41 | "increase": true,
42 | "num_hops": 2,
43 |
44 | "detect": true,
45 | "detect_strategy": "feature",
46 |
47 | "ewc": true,
48 | "ewc_strategy": "ewc",
49 | "ewc_lambda": 0.0001,
50 |
51 | "replay": false,
52 | "replay_strategy": "inforeplay"
53 | }
54 |
--------------------------------------------------------------------------------
/conf/AIR/replay.json:
--------------------------------------------------------------------------------
1 | {
2 | "begin_year": 2016,
3 | "end_year": 2019,
4 | "dropout": 0.0,
5 | "lr": 0.01,
6 | "batch_size": 128,
7 | "epoch": 100,
8 | "gpuid": 1,
9 | "loss": "mse",
10 | "activation": "relu",
11 | "scheduler": "epo",
12 | "y_len": 12,
13 | "x_len": 12,
14 | "data_process": 0,
15 |
16 | "raw_data_path": "data/AIR/RawData/",
17 | "save_data_path": "data/AIR/FastData/",
18 | "graph_path": "data/AIR/graph/",
19 | "model_path": "log/AIR/",
20 | "gcn":{
21 | "in_channel": 12,
22 | "out_channel": 12,
23 | "hidden_channel": 64
24 | },
25 |
26 | "tcn":{
27 | "in_channel": 1,
28 | "out_channel": 1,
29 | "kernel_size": 3,
30 | "dilation": 1
31 | },
32 |
33 | "method": "TrafficStream",
34 |
35 | "init": true,
36 | "train": 1,
37 | "auto_test": 0,
38 |
39 |
40 | "strategy": "incremental",
41 | "increase": true,
42 | "num_hops": 2,
43 |
44 | "detect": false,
45 | "detect_strategy": "feature",
46 |
47 | "ewc": false,
48 | "ewc_strategy": "ewc",
49 | "ewc_lambda": 0.0001,
50 |
51 | "replay": true,
52 | "replay_strategy": "inforeplay"
53 | }
54 |
--------------------------------------------------------------------------------
/conf/PEMS/ewc.json:
--------------------------------------------------------------------------------
1 | {
2 | "begin_year": 2011,
3 | "end_year": 2017,
4 | "dropout": 0.0,
5 | "lr": 0.03,
6 | "batch_size": 128,
7 | "epoch": 100,
8 | "gpuid": 1,
9 | "loss": "mse",
10 | "activation": "relu",
11 | "scheduler": "epo",
12 | "y_len": 12,
13 | "x_len": 12,
14 | "data_process": 0,
15 |
16 | "raw_data_path": "data/PEMS/RawData/",
17 | "save_data_path": "data/PEMS/FastData/",
18 | "graph_path": "data/PEMS/graph/",
19 | "model_path": "log/PEMS/",
20 | "gcn":{
21 | "in_channel": 12,
22 | "out_channel": 12,
23 | "hidden_channel": 64
24 | },
25 |
26 | "tcn":{
27 | "in_channel": 1,
28 | "out_channel": 1,
29 | "kernel_size": 3,
30 | "dilation": 1
31 | },
32 |
33 |
34 |
35 | "init": true,
36 | "train": 1,
37 | "auto_test": 0,
38 | "method":"TrafficStream",
39 |
40 | "strategy": "incremental",
41 | "increase": true,
42 | "num_hops": 2,
43 |
44 | "detect": true,
45 | "detect_strategy": "feature",
46 |
47 | "ewc": true,
48 | "ewc_strategy": "ewc",
49 | "ewc_lambda": 0.0001,
50 |
51 | "replay": false,
52 | "replay_strategy": "inforeplay"
53 | }
54 |
--------------------------------------------------------------------------------
/conf/AIR/trafficstream.json:
--------------------------------------------------------------------------------
1 | {
2 | "begin_year": 2016,
3 | "end_year": 2019,
4 | "dropout": 0.0,
5 | "lr": 0.01,
6 | "batch_size": 128,
7 | "epoch": 100,
8 | "gpuid": 1,
9 | "loss": "mse",
10 | "activation": "relu",
11 | "scheduler": "epo",
12 | "y_len": 12,
13 | "x_len": 12,
14 | "data_process": 0,
15 |
16 | "raw_data_path": "data/AIR/RawData/",
17 | "save_data_path": "data/AIR/FastData/",
18 | "graph_path": "data/AIR/graph/",
19 | "model_path": "log/AIR/",
20 | "gcn":{
21 | "in_channel": 12,
22 | "out_channel": 12,
23 | "hidden_channel": 64
24 | },
25 |
26 | "tcn":{
27 | "in_channel": 1,
28 | "out_channel": 1,
29 | "kernel_size": 3,
30 | "dilation": 1
31 | },
32 |
33 | "method": "TrafficStream",
34 |
35 | "init": true,
36 | "train": 1,
37 | "auto_test": 0,
38 |
39 |
40 | "strategy": "incremental",
41 | "increase": true,
42 | "num_hops": 2,
43 |
44 | "detect": true,
45 | "detect_strategy": "feature",
46 |
47 | "ewc": true,
48 | "ewc_strategy": "ewc",
49 | "ewc_lambda": 0.0001,
50 |
51 | "replay": true,
52 | "replay_strategy": "inforeplay"
53 | }
54 |
--------------------------------------------------------------------------------
/conf/PEMS/trafficstream.json:
--------------------------------------------------------------------------------
1 | {
2 | "begin_year": 2011,
3 | "end_year": 2017,
4 | "dropout": 0.0,
5 | "lr": 0.03,
6 | "batch_size": 128,
7 | "epoch": 100,
8 | "gpuid": 1,
9 | "loss": "mse",
10 | "activation": "relu",
11 | "scheduler": "epo",
12 | "y_len": 12,
13 | "x_len": 12,
14 | "data_process": 0,
15 |
16 | "raw_data_path": "data/PEMS/RawData/",
17 | "save_data_path": "data/PEMS/FastData/",
18 | "graph_path": "data/PEMS/graph/",
19 | "model_path": "log/PEMS/",
20 | "gcn":{
21 | "in_channel": 12,
22 | "out_channel": 12,
23 | "hidden_channel": 64
24 | },
25 |
26 | "tcn":{
27 | "in_channel": 1,
28 | "out_channel": 1,
29 | "kernel_size": 3,
30 | "dilation": 1
31 | },
32 |
33 |
34 |
35 | "init": true,
36 | "train": 1,
37 | "auto_test": 0,
38 | "method":"TrafficStream",
39 |
40 | "strategy": "incremental",
41 | "increase": true,
42 | "num_hops": 2,
43 |
44 | "detect": true,
45 | "detect_strategy": "feature",
46 |
47 | "ewc": true,
48 | "ewc_strategy": "ewc",
49 | "ewc_lambda": 0.0001,
50 |
51 | "replay": true,
52 | "replay_strategy": "inforeplay"
53 | }
54 |
--------------------------------------------------------------------------------
/conf/ENERGY-Wind/ewc.json:
--------------------------------------------------------------------------------
1 | {
2 | "begin_year": 0,
3 | "end_year": 3,
4 | "dropout": 0.0,
5 | "lr": 0.03,
6 | "batch_size": 128,
7 | "epoch": 100,
8 | "gpuid": 1,
9 | "loss": "mse",
10 | "activation": "relu",
11 | "scheduler": "epo",
12 | "y_len": 12,
13 | "x_len": 12,
14 | "data_process": 0,
15 |
16 | "raw_data_path": "data/ENERGY-Wind/RawData/",
17 | "save_data_path": "data/ENERGY-Wind/FastData/",
18 | "graph_path": "data/ENERGY-Wind/graph/",
19 | "model_path": "log/ENERGY-Wind/",
20 | "gcn":{
21 | "in_channel": 12,
22 | "out_channel": 12,
23 | "hidden_channel": 64
24 | },
25 |
26 | "tcn":{
27 | "in_channel": 1,
28 | "out_channel": 1,
29 | "kernel_size": 3,
30 | "dilation": 1
31 | },
32 |
33 | "method": "TrafficStream",
34 |
35 | "init": true,
36 | "train": 1,
37 | "auto_test": 0,
38 |
39 |
40 | "strategy": "incremental",
41 | "increase": true,
42 | "num_hops": 2,
43 |
44 | "detect": false,
45 | "detect_strategy": "feature",
46 |
47 | "ewc": true,
48 | "ewc_strategy": "ewc",
49 | "ewc_lambda": 0.0001,
50 |
51 | "replay": false,
52 | "replay_strategy": "inforeplay"
53 | }
54 |
--------------------------------------------------------------------------------
/conf/ENERGY-Wind/replay.json:
--------------------------------------------------------------------------------
1 | {
2 | "begin_year": 0,
3 | "end_year": 3,
4 | "dropout": 0.0,
5 | "lr": 0.03,
6 | "batch_size": 128,
7 | "epoch": 100,
8 | "gpuid": 1,
9 | "loss": "mse",
10 | "activation": "relu",
11 | "scheduler": "epo",
12 | "y_len": 12,
13 | "x_len": 12,
14 | "data_process": 0,
15 |
16 | "raw_data_path": "data/ENERGY-Wind/RawData/",
17 | "save_data_path": "data/ENERGY-Wind/FastData/",
18 | "graph_path": "data/ENERGY-Wind/graph/",
19 | "model_path": "log/ENERGY-Wind/",
20 | "gcn":{
21 | "in_channel": 12,
22 | "out_channel": 12,
23 | "hidden_channel": 64
24 | },
25 |
26 | "tcn":{
27 | "in_channel": 1,
28 | "out_channel": 1,
29 | "kernel_size": 3,
30 | "dilation": 1
31 | },
32 |
33 | "method": "TrafficStream",
34 |
35 | "init": true,
36 | "train": 1,
37 | "auto_test": 0,
38 |
39 |
40 | "strategy": "incremental",
41 | "increase": true,
42 | "num_hops": 2,
43 |
44 | "detect": false,
45 | "detect_strategy": "feature",
46 |
47 | "ewc": false,
48 | "ewc_strategy": "ewc",
49 | "ewc_lambda": 0.0001,
50 |
51 | "replay": true,
52 | "replay_strategy": "inforeplay"
53 | }
54 |
--------------------------------------------------------------------------------
/conf/ENERGY-Wind/trafficstream.json:
--------------------------------------------------------------------------------
1 | {
2 | "begin_year": 0,
3 | "end_year": 3,
4 | "dropout": 0.0,
5 | "lr": 0.03,
6 | "batch_size": 128,
7 | "epoch": 100,
8 | "gpuid": 1,
9 | "loss": "mse",
10 | "activation": "relu",
11 | "scheduler": "epo",
12 | "y_len": 12,
13 | "x_len": 12,
14 | "data_process": 0,
15 |
16 | "raw_data_path": "data/ENERGY-Wind/RawData/",
17 | "save_data_path": "data/ENERGY-Wind/FastData/",
18 | "graph_path": "data/ENERGY-Wind/graph/",
19 | "model_path": "log/ENERGY-Wind/",
20 | "gcn":{
21 | "in_channel": 12,
22 | "out_channel": 12,
23 | "hidden_channel": 64
24 | },
25 |
26 | "tcn":{
27 | "in_channel": 1,
28 | "out_channel": 1,
29 | "kernel_size": 3,
30 | "dilation": 1
31 | },
32 |
33 | "method": "TrafficStream",
34 |
35 | "init": true,
36 | "train": 1,
37 | "auto_test": 0,
38 |
39 |
40 | "strategy": "incremental",
41 | "increase": true,
42 | "num_hops": 2,
43 |
44 | "detect": true,
45 | "detect_strategy": "feature",
46 |
47 | "ewc": true,
48 | "ewc_strategy": "ewc",
49 | "ewc_lambda": 0.0001,
50 |
51 | "replay": true,
52 | "replay_strategy": "inforeplay"
53 | }
54 |
--------------------------------------------------------------------------------
/conf/PEMS/pecpm.json:
--------------------------------------------------------------------------------
1 | {
2 | "method": "PECPM",
3 | "begin_year": 2011,
4 | "end_year": 2017,
5 | "year": 2011,
6 |
7 | "dropout": 0.0,
8 | "lr": 0.03,
9 | "batch_size": 128,
10 | "epoch": 100,
11 | "gpuid": 1,
12 | "loss": "mse",
13 | "activation": "relu",
14 | "scheduler": "epo",
15 |
16 | "y_len": 12,
17 | "x_len": 12,
18 |
19 | "data_process": 0,
20 | "raw_data_path": "data/PEMS/RawData/",
21 | "save_data_path": "data/PEMS/FastData/",
22 | "graph_path": "data/PEMS/graph/",
23 | "model_path": "log/PEMS/",
24 |
25 | "gcn": {
26 | "in_channel": 12,
27 | "out_channel": 12,
28 | "hidden_channel": 64
29 | },
30 |
31 | "tcn": {
32 | "in_channel": 1,
33 | "out_channel": 1,
34 | "kernel_size": 3,
35 | "dilation": 1
36 | },
37 |
38 |
39 | "init": true,
40 | "train": 1,
41 | "auto_test": 0,
42 | "strategy": "retrain",
43 |
44 | "detect": false,
45 | "ewc": false,
46 | "replay": false,
47 |
48 | "attention_weight": {
49 | "0": 5,
50 | "1": 5,
51 | "2": 5,
52 | "3": 5
53 | },
54 |
55 | "last_clusterc": null,
56 | "cluster_num": 3,
57 | "pattern_matching": true,
58 | "max_patterns": 1000
59 | }
60 |
--------------------------------------------------------------------------------
/conf/ENERGY-Wind/pecpm.json:
--------------------------------------------------------------------------------
1 | {
2 | "method": "PECPM",
3 | "begin_year": 0,
4 | "end_year": 3,
5 | "year": 0,
6 |
7 | "dropout": 0.0,
8 | "lr": 0.03,
9 | "batch_size": 128,
10 | "epoch": 100,
11 | "gpuid": 1,
12 | "loss": "mse",
13 | "activation": "relu",
14 | "scheduler": "epo",
15 |
16 | "y_len": 12,
17 | "x_len": 12,
18 |
19 | "data_process": 0,
20 | "raw_data_path": "data/ENERGY-Wind/RawData/",
21 | "save_data_path": "data/ENERGY-Wind/FastData/",
22 | "graph_path": "data/ENERGY-Wind/graph/",
23 | "model_path": "log/ENERGY-Wind/",
24 |
25 | "gcn": {
26 | "in_channel": 12,
27 | "out_channel": 12,
28 | "hidden_channel": 64
29 | },
30 |
31 | "tcn": {
32 | "in_channel": 1,
33 | "out_channel": 1,
34 | "kernel_size": 3,
35 | "dilation": 1
36 | },
37 |
38 |
39 | "init": true,
40 | "train": 1,
41 | "auto_test": 0,
42 | "strategy": "retrain",
43 |
44 | "detect": false,
45 | "ewc": false,
46 | "replay": false,
47 |
48 | "attention_weight": {
49 | "0": 5,
50 | "1": 5,
51 | "2": 5,
52 | "3": 5
53 | },
54 |
55 | "last_clusterc": null,
56 | "cluster_num": 3,
57 | "pattern_matching": true,
58 | "max_patterns": 1000
59 | }
60 |
--------------------------------------------------------------------------------
/conf/AIR/pecpm.json:
--------------------------------------------------------------------------------
1 | {
2 | "method": "PECPM",
3 | "begin_year": 2016,
4 | "end_year": 2019,
5 | "year": 2016,
6 |
7 | "dropout": 0.0,
8 | "lr": 0.03,
9 | "batch_size": 128,
10 | "epoch": 100,
11 | "gpuid": 1,
12 | "loss": "mse",
13 | "activation": "relu",
14 | "scheduler": "epo",
15 |
16 | "y_len": 12,
17 | "x_len": 12,
18 |
19 | "data_process": 0,
20 | "raw_data_path": "data/AIR/RawData/",
21 | "save_data_path": "data/AIR/FastData/",
22 | "graph_path": "data/AIR/graph/",
23 | "model_path": "log/AIR/",
24 |
25 | "gcn": {
26 | "in_channel": 12,
27 | "out_channel": 12,
28 | "hidden_channel": 64
29 | },
30 |
31 | "tcn": {
32 | "in_channel": 1,
33 | "out_channel": 1,
34 | "kernel_size": 3,
35 | "dilation": 1
36 | },
37 |
38 |
39 |
40 | "init": true,
41 | "train": 1,
42 | "auto_test": 0,
43 | "strategy": "retrain",
44 | "increase": true,
45 | "num_hops": 2,
46 |
47 |
48 | "detect": false,
49 | "ewc": false,
50 | "replay": false,
51 |
52 | "attention_weight": {
53 | "0": 5,
54 | "1": 5,
55 | "2": 5,
56 | "3": 5
57 | },
58 |
59 | "last_clusterc": null,
60 | "cluster_num": 3,
61 | "pattern_matching": true,
62 | "max_patterns": 1000
63 | }
64 |
--------------------------------------------------------------------------------
/conf/PEMS/stkec.json:
--------------------------------------------------------------------------------
1 | {
2 | "begin_year": 2011,
3 | "end_year": 2017,
4 | "dropout": 0.0,
5 | "lr": 0.03,
6 | "batch_size": 128,
7 | "epoch": 100,
8 | "gpuid": 1,
9 | "loss": "mse",
10 | "activation": "relu",
11 | "scheduler": "epo",
12 | "y_len": 12,
13 | "x_len": 12,
14 | "influe_length": 168,
15 | "data_process": 0,
16 | "batch_influ_size":4,
17 | "beita": 1,
18 | "cluster": 24,
19 | "raw_data_path": "data/PEMS/RawData/",
20 | "save_data_path": "data/PEMS/FastData/",
21 | "graph_path": "data/PEMS/graph/",
22 | "model_path": "log/PEMS/",
23 | "influence_path":"Please enter 'outdir' in your path",
24 | "gcn":{
25 | "in_channel": 12,
26 | "out_channel": 12,
27 | "hidden_channel": 64
28 | },
29 |
30 | "tcn":{
31 | "in_channel": 1,
32 | "out_channel": 1,
33 | "kernel_size": 3,
34 | "dilation": 1
35 | },
36 |
37 |
38 |
39 | "init": true,
40 | "train": 1,
41 | "auto_test": 0,
42 |
43 | "strategy": "incremental",
44 | "increase": true,
45 | "num_hops": 2,
46 |
47 | "detect": true,
48 | "detect_strategy": "feature",
49 |
50 | "detect_infl_strategy": true,
51 | "infl_topk": 10,
52 |
53 | "adp_adj": false,
54 | "skip_dim": 64,
55 | "end_dim": 64,
56 | "hidden_channels": 32,
57 | "dilation_channels": 64,
58 | "residual_channels": 64,
59 | "input_dim": 1,
60 | "output_dim": 1,
61 |
62 | "ewc": true,
63 | "ewc_strategy": "ewc",
64 | "ewc_lambda":0.0005,
65 |
66 | "replay": true,
67 | "replay_strategy": "random",
68 | "repaly_num_samples": 100
69 | }
--------------------------------------------------------------------------------
/conf/AIR/stkec.json:
--------------------------------------------------------------------------------
1 | {
2 | "begin_year": 2016,
3 | "end_year": 2019,
4 | "dropout": 0.0,
5 | "lr": 0.01,
6 | "batch_size": 128,
7 | "epoch": 100,
8 | "gpuid": 1,
9 | "loss": "mse",
10 | "activation": "relu",
11 | "scheduler": "epo",
12 | "y_len": 12,
13 | "x_len": 12,
14 | "influe_length": 168,
15 | "data_process": 0,
16 | "batch_influ_size":4,
17 | "beita": 1,
18 | "cluster": 24,
19 | "raw_data_path": "data/AIR/RawData/",
20 | "save_data_path": "data/AIR/FastData/",
21 | "graph_path": "data/AIR/graph/",
22 | "model_path": "log/AIR/",
23 | "influence_path":"Please enter 'outdir' in your path",
24 | "gcn":{
25 | "in_channel": 12,
26 | "out_channel": 12,
27 | "hidden_channel": 64
28 | },
29 |
30 | "tcn":{
31 | "in_channel": 1,
32 | "out_channel": 1,
33 | "kernel_size": 3,
34 | "dilation": 1
35 | },
36 |
37 | "method": "STKEC",
38 |
39 | "init": true,
40 | "train": 1,
41 | "auto_test": 0,
42 |
43 | "strategy": "incremental",
44 | "increase": true,
45 | "num_hops": 2,
46 |
47 | "detect": true,
48 | "detect_strategy": "feature",
49 |
50 | "detect_infl_strategy": true,
51 | "infl_topk": 10,
52 |
53 | "adp_adj": false,
54 | "skip_dim": 64,
55 | "end_dim": 64,
56 | "hidden_channels": 32,
57 | "dilation_channels": 64,
58 | "residual_channels": 64,
59 | "input_dim": 1,
60 | "output_dim": 1,
61 |
62 | "ewc": true,
63 | "ewc_strategy": "ewc",
64 | "ewc_lambda":0.0005,
65 |
66 | "replay": true,
67 | "replay_strategy": "random",
68 | "repaly_num_samples": 100
69 | }
--------------------------------------------------------------------------------
/conf/ENERGY-Wind/stkec.json:
--------------------------------------------------------------------------------
1 | {
2 | "begin_year": 0,
3 | "end_year": 3,
4 | "dropout": 0.0,
5 | "lr": 0.01,
6 | "batch_size": 128,
7 | "epoch": 100,
8 | "gpuid": 1,
9 | "loss": "mse",
10 | "activation": "relu",
11 | "scheduler": "epo",
12 | "y_len": 12,
13 | "x_len": 12,
14 | "influe_length": 168,
15 | "data_process": 0,
16 | "batch_influ_size":4,
17 | "beita": 1,
18 | "cluster": 24,
19 | "raw_data_path": "data/ENERGY-Wind/RawData/",
20 | "save_data_path": "data/ENERGY-Wind/FastData/",
21 | "graph_path": "data/ENERGY-Wind/graph/",
22 | "model_path": "log/ENERGY-Wind/",
23 | "influence_path":"Please enter 'outdir' in your path",
24 | "gcn":{
25 | "in_channel": 12,
26 | "out_channel": 12,
27 | "hidden_channel": 64
28 | },
29 |
30 | "tcn":{
31 | "in_channel": 1,
32 | "out_channel": 1,
33 | "kernel_size": 3,
34 | "dilation": 1
35 | },
36 |
37 |
38 |
39 | "init": true,
40 | "train": 1,
41 | "auto_test": 0,
42 |
43 | "strategy": "incremental",
44 | "increase": true,
45 | "num_hops": 2,
46 |
47 | "detect": true,
48 | "detect_strategy": "feature",
49 |
50 | "detect_infl_strategy": true,
51 | "infl_topk": 10,
52 |
53 | "adp_adj": false,
54 | "skip_dim": 64,
55 | "end_dim": 64,
56 | "hidden_channels": 32,
57 | "dilation_channels": 64,
58 | "residual_channels": 64,
59 | "input_dim": 1,
60 | "output_dim": 1,
61 |
62 | "ewc": false,
63 | "ewc_strategy": "ewc",
64 | "ewc_lambda":0.0005,
65 |
66 | "replay": false,
67 | "replay_strategy": "random",
68 | "repaly_num_samples": 100
69 | }
--------------------------------------------------------------------------------
/utils/initialize.py:
--------------------------------------------------------------------------------
1 | import sys, random, torch, logging
2 | import numpy as np
3 | import os.path as osp
4 | from utils import common_tools as ct
5 |
6 |
7 | def init(args):
8 | '''
9 | Step 1.1 : Initialize configuration parameters
10 | '''
11 | def _update(src, tmp):
12 | # Iterate over each key-value pair in the tmp dictionary, and if the key is not "gpuid", add it to the src dictionary
13 | for key in tmp:
14 | if key!= "gpuid":
15 | src[key] = tmp[key]
16 |
17 | conf_path = osp.join(args.conf) # Concatenate the configuration file path into a complete path
18 | info = ct.load_json_file(conf_path) # Loading a configuration file in JSON format
19 | _update(vars(args), info) # Update the configuration information to the args parameter
20 | vars(args)["path"] = osp.join(args.model_path, args.logname+"-"+str(args.seed)) # Create a model save path and store it in the args parameter
21 | ct.mkdirs(args.path) # Create the corresponding directory
22 | del info # Delete the configuration information dictionary
23 |
24 |
25 | def seed_anything(seed=42):
26 | '''
27 | Step 1.2: Initialize random seed
28 | '''
29 | random.seed(seed)
30 | np.random.seed(seed)
31 | torch.manual_seed(seed)
32 | torch.cuda.manual_seed(seed)
33 | torch.cuda.manual_seed_all(seed)
34 | torch.backends.cudnn.benchmark = False
35 | torch.backends.cudnn.deterministic = True
36 |
37 |
38 | def init_log(args):
39 | '''
40 | Step 1.3: Initialize the logging object
41 | '''
42 | log_dir, log_filename = args.path, args.logname
43 | logger = logging.getLogger(__name__)
44 | ct.mkdirs(log_dir)
45 | logger.setLevel(logging.INFO)
46 | fh = logging.FileHandler(osp.join(log_dir, log_filename+".log"))
47 | fh.setLevel(logging.INFO)
48 | ch = logging.StreamHandler(sys.stdout)
49 | ch.setLevel(logging.INFO)
50 | formatter = logging.Formatter("%(asctime)s - %(message)s")
51 | fh.setFormatter(formatter)
52 | ch.setFormatter(formatter)
53 | logger.addHandler(fh)
54 | logger.addHandler(ch)
55 | logger.info("logger name:%s", osp.join(log_dir, log_filename+".log"))
56 | vars(args)["logger"] = logger
57 |
58 |
--------------------------------------------------------------------------------
/utils/metric.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | def mask_np(array, null_val):
4 | if np.isnan(null_val):
5 | return (~np.isnan(null_val)).astype('float32')
6 | else:
7 | return np.not_equal(array, null_val).astype('float32')
8 |
9 |
10 | def masked_mape_np(y_true, y_pred, null_val=np.nan):
11 | with np.errstate(divide='ignore', invalid='ignore'):
12 | mask = mask_np(y_true, null_val)
13 | mask /= mask.mean()
14 | mape = np.abs((y_pred - y_true) / y_true)
15 | mape = np.nan_to_num(mask * mape)
16 | return np.mean(mape) * 100
17 |
18 |
19 | def masked_mse_np(y_true, y_pred, null_val=np.nan):
20 | mask = mask_np(y_true, null_val)
21 | mask /= mask.mean()
22 | mse = (y_true - y_pred) ** 2
23 | return np.mean(np.nan_to_num(mask * mse))
24 |
25 |
26 | def masked_mae_np(y_true, y_pred, null_val=np.nan):
27 | mask = mask_np(y_true, null_val)
28 | mask /= mask.mean()
29 | mae = np.abs(y_true - y_pred)
30 | return np.mean(np.nan_to_num(mask * mae))
31 |
32 |
33 | def cal_metric(ground_truth, prediction, args):
34 | args.logger.info("[*] year {}, testing".format(args.year))
35 | mae_list, rmse_list, mape_list = [], [], []
36 | for i in range(1, 13):
37 | mae = masked_mae_np(ground_truth[:, :, :i], prediction[:, :, :i], 0)
38 | rmse = masked_mse_np(ground_truth[:, :, :i], prediction[:, :, :i], 0) ** 0.5
39 | mape = masked_mape_np(ground_truth[:, :, :i], prediction[:, :, :i], 0)
40 | mae_list.append(mae)
41 | rmse_list.append(rmse)
42 | mape_list.append(mape)
43 | if i==3 or i==6 or i==12:
44 | args.logger.info("T:{:d}\tMAE\t{:.4f}\tRMSE\t{:.4f}\tMAPE\t{:.4f}".format(i,mae,rmse,mape))
45 | args.result[str(i)][" MAE"][args.year] = mae
46 | args.result[str(i)]["MAPE"][args.year] = mape
47 | args.result[str(i)]["RMSE"][args.year] = rmse
48 | args.result["Avg"][" MAE"][args.year] = np.mean(mae_list)
49 | args.result["Avg"]["RMSE"][args.year] = np.mean(rmse_list)
50 | args.result["Avg"]["MAPE"][args.year] = np.mean(mape_list)
51 | args.logger.info("T:Avg\tMAE\t{:.4f}\tRMSE\t{:.4f}\tMAPE\t{:.4f}".format(np.mean(mae_list), np.mean(rmse_list), np.mean(mape_list)))
--------------------------------------------------------------------------------
/utils/common_tools.py:
--------------------------------------------------------------------------------
1 | import os, re, json, torch
2 | import os.path as osp
3 | import numpy as np
4 | from Bio.Cluster import kcluster
5 |
6 |
7 | def mkdirs(path):
8 | if not os.path.exists(path):
9 | os.makedirs(path)
10 |
11 |
12 | def load_json_file(file_path):
13 | with open(file_path, "r") as f:
14 | s = f.read()
15 | s = re.sub('\s',"", s)
16 | return json.loads(s)
17 |
18 |
19 | def load_best_model(args):
20 | if (args.load_first_year and args.year <= args.begin_year + 1) or args.train == 0: # Determine whether to load the first year's model
21 | load_path = args.first_year_model_path # Set the loading path to the first year model path
22 | loss = load_path.split("/")[-1].replace(".pkl", "") # Get the model file name and remove the extension
23 | else:
24 | loss = []
25 | for filename in os.listdir(osp.join(args.model_path, args.logname+"-"+str(args.seed), str(args.year-1))): # Traverse the files under the model path of the previous year and get all loss values
26 | loss.append(filename[0:-4])
27 | loss = sorted(loss)
28 | load_path = osp.join(args.model_path, args.logname+"-"+str(args.seed), str(args.year-1), loss[0]+".pkl") # Set the loading path to the model file corresponding to the minimum loss value
29 |
30 | args.logger.info("[*] load from {}".format(load_path)) # Recording Load Path
31 | state_dict = torch.load(load_path, map_location=args.device)["model_state_dict"] # Loading the model state dictionary
32 |
33 | model = args.methods[args.method](args) # Initialize the model
34 |
35 | if args.method == 'EAC':
36 | if args.year == args.begin_year:
37 | model.expand_adaptive_params(args.base_node_size)
38 | else:
39 | for idx, _ in enumerate(range(args.year - args.begin_year)):
40 | model.expand_adaptive_params(args.graph_size_list[idx])
41 |
42 | if args.method == 'Universal' and args.use_eac == True:
43 | if args.year == args.begin_year:
44 | model.expand_adaptive_params(args.base_node_size)
45 | else:
46 | for idx, _ in enumerate(range(args.year - args.begin_year)):
47 | model.expand_adaptive_params(args.graph_size_list[idx])
48 |
49 | model.load_state_dict(state_dict) # Load the state dictionary into the model
50 | model = model.to(args.device) # Move the model to the specified device
51 | return model, loss[0] # Returns the model and the minimum loss value
52 |
53 |
54 | def long_term_pattern(args, long_pattern):
55 | attention, _, _ = kcluster(long_pattern, nclusters=args.cluster, dist='u') # [number of nodes, average number of days per day] -> [number of nodes] ranges from 0 to k-1
56 | np_attention = np.zeros((len(attention), args.cluster)) # [number of nodes, clusters]
57 | for i in attention:
58 | np_attention[i][attention[i]] = 1.0
59 | return np_attention.astype(np.float32)
60 |
61 |
62 | def get_max_columns(matrix):
63 | tensor = torch.tensor(matrix)
64 | max_columns, _ = torch.max(tensor, dim=1)
65 | return max_columns
--------------------------------------------------------------------------------
/src/model/gcn_conv.py:
--------------------------------------------------------------------------------
1 | import math
2 | import pdb
3 | import torch
4 | import torch.nn as nn
5 | import torch.nn.init as init
6 |
7 | class BatchGCNConv(nn.Module):
8 | """
9 | Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
10 | """
11 | def __init__(self, in_features, out_features, bias=True, gcn=True):
12 | super(BatchGCNConv, self).__init__()
13 | self.in_features = in_features
14 | self.out_features = out_features
15 | self.weight_neigh = nn.Linear(in_features, out_features, bias=bias)
16 | if not gcn:
17 | self.weight_self = nn.Linear(in_features, out_features, bias=False)
18 | else:
19 | self.register_parameter('weight_self', None)
20 |
21 | self.reset_parameters()
22 |
23 | def reset_parameters(self):
24 | self.weight_neigh.reset_parameters()
25 | if self.weight_self is not None:
26 | self.weight_self.reset_parameters()
27 |
28 |
29 |
30 | def forward(self, x, adj):
31 | if x.device != adj.device:
32 | adj = adj.to(x.device)
33 |
34 | input_x = torch.matmul(adj, x) # [N, N] * [bs, N, in_features] = [bs, N, in_features]
35 | # x: [bs, N, in_features], adj: [N, N]
36 | # [N, N] * [bs, N, in_features] = [bs, N, in_features]
37 | output = self.weight_neigh(input_x) # [bs, N, in_features] * [in_features, out_features] = [bs, N, out_features]
38 | if self.weight_self is not None:
39 | output += self.weight_self(x) # [bs, N, out_features]
40 | return output
41 |
42 |
43 |
44 |
45 | class ChebGraphConv(nn.Module):
46 | def __init__(self, c_in, c_out):
47 | super(ChebGraphConv, self).__init__()
48 | self.c_in = c_in
49 | self.c_out = c_out
50 | self.Ks = 3
51 | self.weight = nn.Parameter(torch.FloatTensor(self.Ks, c_in, c_out))
52 | self.bias = nn.Parameter(torch.FloatTensor(c_out))
53 | self.reset_parameters()
54 |
55 |
56 | def reset_parameters(self):
57 | init.kaiming_uniform_(self.weight, a=math.sqrt(5))
58 | fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
59 | bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
60 | init.uniform_(self.bias, -bound, bound)
61 |
62 |
63 | def forward(self, x, adj):
64 |
65 | x = x.unsqueeze(dim=1)
66 |
67 | if self.Ks - 1 < 0:
68 | raise ValueError(f'ERROR: the graph convolution kernel size Ks has to be a positive integer, but received {self.Ks}.')
69 | elif self.Ks - 1 == 0:
70 | x_0 = x
71 | x_list = [x_0]
72 | elif self.Ks - 1 == 1:
73 | x_0 = x
74 | x_1 = torch.einsum('hi,btij->bthj', adj, x)
75 | x_list = [x_0, x_1]
76 | elif self.Ks - 1 >= 2:
77 | x_0 = x
78 | x_1 = torch.einsum('hi,btij->bthj', adj, x)
79 | x_list = [x_0, x_1]
80 | for k in range(2, self.Ks):
81 | x_list.append(torch.einsum('hi,btij->bthj', 2 * adj, x_list[k - 1]) - x_list[k - 2])
82 |
83 | x = torch.stack(x_list, dim=2)
84 |
85 | cheb_graph_conv = torch.einsum('btkhi,kij->bthj', x, self.weight)
86 | cheb_graph_conv = torch.add(cheb_graph_conv, self.bias)
87 |
88 | x = cheb_graph_conv.squeeze(dim=1)
89 |
90 | return x
91 |
--------------------------------------------------------------------------------
/src/model/ewc.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 | import torch.optim as optim
5 | from torch import autograd
6 |
7 | import numpy as np
8 | import logging
9 | import pdb
10 |
11 | from torch_geometric.data import Data
12 |
13 |
14 | class EWC(nn.Module):
15 |
16 | def __init__(self, model, adj, ewc_lambda = 0, ewc_type = 'ewc'):
17 | super(EWC, self).__init__()
18 | self.model = model
19 | self.ewc_lambda = ewc_lambda
20 | self.ewc_type = ewc_type
21 | self.adj = adj
22 |
23 | def _update_mean_params(self):
24 | for param_name, param in self.model.named_parameters():
25 | _buff_param_name = param_name.replace('.', '__')
26 | self.register_buffer(_buff_param_name + '_estimated_mean', param.data.clone())
27 |
28 | def _update_fisher_params(self, loader, lossfunc, device):
29 | _buff_param_names = [param[0].replace('.', '__') for param in self.model.named_parameters()]
30 | est_fisher_info = {name: 0.0 for name in _buff_param_names}
31 | for i, data in enumerate(loader):
32 | data = data.to(device, non_blocking=True)
33 | pred = self.model.forward(data, self.adj)
34 | log_likelihood = lossfunc(data.y, pred, reduction='mean')
35 | grad_log_liklihood = autograd.grad(log_likelihood, self.model.parameters())
36 | for name, grad in zip(_buff_param_names, grad_log_liklihood):
37 | est_fisher_info[name] += grad.data.clone() ** 2
38 | for name in _buff_param_names:
39 | self.register_buffer(name + '_estimated_fisher', est_fisher_info[name])
40 |
41 |
42 | def _update_fisher_params_for_stkec(self, loader, lossfunc, device):
43 | _buff_param_names = [param[0].replace('.', '__') for param in self.model.named_parameters()]
44 | est_fisher_info = {name: 0.0 for name in _buff_param_names}
45 | for i, data in enumerate(loader):
46 | data = data.to(device, non_blocking=True)
47 | pred, _ = self.model.forward(data, self.adj)
48 | log_likelihood = lossfunc(data.y, pred, reduction='mean')
49 | grad_log_liklihood = autograd.grad(log_likelihood, self.model.parameters())
50 | for name, grad in zip(_buff_param_names, grad_log_liklihood):
51 | est_fisher_info[name] += grad.data.clone() ** 2
52 | for name in _buff_param_names:
53 | self.register_buffer(name + '_estimated_fisher', est_fisher_info[name])
54 |
55 |
56 | def register_ewc_params(self, loader, lossfunc, device):
57 | self._update_fisher_params(loader, lossfunc, device)
58 | self._update_mean_params()
59 |
60 |
61 | def register_ewc_params_for_stkec(self, loader, lossfunc, device):
62 | self._update_fisher_params_for_stkec(loader, lossfunc, device)
63 | self._update_mean_params()
64 |
65 |
66 | def compute_consolidation_loss(self):
67 | losses = []
68 | for param_name, param in self.model.named_parameters():
69 | _buff_param_name = param_name.replace('.', '__')
70 | estimated_mean = getattr(self, '{}_estimated_mean'.format(_buff_param_name))
71 | estimated_fisher = getattr(self, '{}_estimated_fisher'.format(_buff_param_name))
72 | if estimated_fisher == None:
73 | losses.append(0)
74 | elif self.ewc_type == 'l2':
75 | losses.append((10e-6 * (param - estimated_mean) ** 2).sum())
76 | else:
77 | losses.append((estimated_fisher * (param - estimated_mean) ** 2).sum())
78 | return 1 * (self.ewc_lambda / 2) * sum(losses)
79 |
80 | def forward(self, data, adj):
81 | return self.model(data, adj)
--------------------------------------------------------------------------------
/utils/data_convert.py:
--------------------------------------------------------------------------------
1 | import tqdm
2 | import numpy as np
3 |
4 |
5 | def z_score(data):
6 | """
7 | Calculate the standardized value of the data, that is, subtract the mean from the data
8 | and divide it by the standard deviation to ensure that the data follows a standard normal distribution in a statistical sense
9 | """
10 | return (data - np.mean(data)) / np.std(data)
11 |
12 | def generate_dataset(data, idx, x_len=12, y_len=12):
13 | """"
14 | Generates a dataset of input x and output y from the input data at the given index idx
15 | """
16 | res = data[idx] # Get data by index
17 | node_size = data.shape[1] # Get the number of nodes
18 | t = len(idx)-1
19 | idic = 0
20 | x_index, y_index = [], [] # Initialize the x and y index lists
21 |
22 | # Traverse the index to generate the index of x and y
23 | for i in tqdm.tqdm(range(t, 0, -1)):
24 | if i-x_len-y_len>=0:
25 | x_index.extend(list(range(i-x_len-y_len, i-y_len)))
26 | y_index.extend(list(range(i-y_len, i)))
27 |
28 | x_index = np.asarray(x_index) # Convert to numpy array
29 | y_index = np.asarray(y_index)
30 | x = res[x_index].reshape((-1, x_len, node_size)) # Reshape the data
31 | y = res[y_index].reshape((-1, y_len, node_size))
32 |
33 | return x, y
34 |
35 | def generate_samples(days, savepath, data, graph, train_rate=0.6, val_rate=0.2, test_rate=0.2, val_test_mix=False):
36 | """
37 | Generate training, validation and test datasets and save them as .npz files
38 | """
39 | edge_index = np.array(list(graph.edges)).T # Get the edge index of the graph and transpose it
40 | del graph
41 |
42 | if savepath.split('/')[1] =='PEMS':
43 | data = data[0:days*288, :] # Extract data based on days
44 |
45 |
46 |
47 | t, n = data.shape[0], data.shape[1] # Get the time step and number of nodes of the data
48 |
49 | # Split the training, validation, and test set indices according to the ratio
50 | train_idx = [i for i in range(int(t*train_rate))]
51 | #train_idx = [i for i in range(int(t*0.2))] # for few-shot setting
52 | val_idx = [i for i in range(int(t*train_rate), int(t*(train_rate+val_rate)))]
53 | test_idx = [i for i in range(int(t*(train_rate+val_rate)), t)]
54 |
55 | train_x, train_y = generate_dataset(data, train_idx)
56 | val_x, val_y = generate_dataset(data, val_idx)
57 | test_x, test_y = generate_dataset(data, test_idx)
58 |
59 | # If you need to mix validation and test sets
60 | if val_test_mix:
61 | val_test_x = np.concatenate((val_x, test_x), 0) # Combine validation and test sets x data
62 | val_test_y = np.concatenate((val_y, test_y), 0) # Combine validation and test set y data
63 | val_test_idx = np.arange(val_x.shape[0]+test_x.shape[0]) # Generate Index
64 | np.random.shuffle(val_test_idx) # Shuffle the index order
65 | val_x, val_y = val_test_x[val_test_idx[:int(t*val_rate)]], val_test_y[val_test_idx[:int(t*val_rate)]] # Re-partition validation and test set data
66 | test_x, test_y = val_test_x[val_test_idx[int(t*val_rate):]], val_test_y[val_test_idx[int(t*val_rate):]]
67 |
68 | # Normalize the data to z-scores
69 | """
70 | Important Note:
71 | It would be more reasonable to use only the mean and standard deviation of the training data to normalize the validation and test sets.
72 | However, for consistency reasons, we follow TrafficStream's approach, which is currently acceptable.
73 | """
74 | train_x = z_score(train_x)
75 | val_x = z_score(val_x)
76 | test_x = z_score(test_x)
77 |
78 | # Save data to file
79 | np.savez(savepath, train_x=train_x, train_y=train_y, val_x=val_x, val_y=val_y, test_x=test_x, test_y=test_y, edge_index=edge_index)
80 |
81 | # Build the returned data dictionary
82 | data = {"train_x":train_x, "train_y":train_y, "val_x":val_x, "val_y":val_y, "test_x":test_x, "test_y":test_y, "edge_index":edge_index}
83 | return data
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | #
STRAP: Spatio-Temporal Pattern Retrieval for Out-of-Distribution Generalization
2 |
3 |
4 |
5 | [](https://opensource.org/licenses/Apache-2.0)
6 | [](https://www.python.org/)
7 | [](https://pytorch.org/)
8 | [](https://arxiv.org/abs/2505.19547/)
9 | [](https://github.com/HoweyZ/STRAP)
10 |
11 | [📄 Paper](https://arxiv.org/abs/2505.19547) | [📊 Datasets](https://drive.google.com/drive/folders/1OiMLuFBdc56CLekileRjH0xyhDWuoC6C)
12 |
13 |
14 |
15 | ---
16 |
17 | ## 📋 Table of Contents
18 |
19 | - [✨ Overview](#-overview)
20 | - [🏗️ Repository Structure](#️-repository-structure)
21 | - [🚀 Getting Started](#-getting-started)
22 | - [🙏 Acknowledgements](#-acknowledgements)
23 | - [📝 Citation](#-citation)
24 | - [🌟 Star History](#-star-history)
25 |
26 | ---
27 |
28 | ## ✨ Overview
29 |
30 | Spatio-Temporal Graph Neural Networks (STGNNs) have emerged as a powerful tool for modeling dynamic graph-structured data across diverse domains. However, they often fail to generalize in Spatio-Temporal Out-of-Distribution (STOOD) scenarios, where both temporal dynamics and spatial structures evolve beyond the training distribution. To address this problem, we propose STRAP, which enhances model generalization by integrating retrieval-augmented learning into the STGNN continue learning pipeline. Extensive experiments across multiple real-world streaming graph datasets show that \methodname consistently outperforms state-of-the-art STGNN baselines on STOOD tasks, demonstrating its robustness, adaptability, and strong generalization capability without task-specific fine-tuning.
31 |
32 |

33 |
STRAP Framework Architecture
34 |
35 |
36 |
37 | ---
38 |
39 | ## 🏗️ Repository Structure
40 | ```
41 | STRAP/
42 | │
43 | ├── 📄 README.md # Project documentation
44 | ├── 📄 LICENSE # Apache 2.0 License
45 | ├── 📄 environment.yaml # Conda environment configuration
46 | ├── 🚀 main.py # Main entry point for experiments
47 | ├── 🚀 stkec_main.py # STKEC experiments entry point
48 | ├── 📜 run.sh # Batch experiment execution script
49 | │
50 | ├── 📁 conf/ # ⚙️ Configuration files
51 | │ ├── AIR/ # Air quality dataset configs
52 | │ ├── ENERGY-Wind/ # Wind energy dataset configs
53 | │ └── PEMS/ # Traffic dataset configs
54 | │ ├── strap.json
55 | │ ├── ewc.json
56 | │ └── ...
57 | │
58 | ├── 📁 src/ # 💻 Source code
59 | │ ├── dataer/ # Data loading and preprocessing
60 | │ │ ├── ...
61 | │ │
62 | │ ├── model/ # Model implementations
63 | │ │ ├── ... # Model components
64 | │ │
65 | │ └── trainer/ # Training and evaluation
66 | │ ├── ...
67 | │
68 | ├── 📁 utils/ # 🛠️ Utility functions
69 | │ ├── ...
70 | │
71 | ├── 📁 font/ # Font files for visualization
72 | ├── 📁 log/ # 📊 Training logs and checkpoints
73 | └── 📁 data/ # 💾 Dataset storage (create this)
74 | ```
75 | ---
76 |
77 | ## 🚀 Getting Started
78 |
79 | ### 📋 Prerequisites
80 |
81 | Before you begin, ensure you have the following installed:
82 |
83 | - **Conda** or **Miniconda** ([Download](https://www.anaconda.com/products/distribution))
84 | - **NVIDIA GPU** with CUDA support (recommended)
85 | - **Python 3.8+**
86 |
87 | ### 💻 Usage
88 |
89 | ```bash
90 | # ENERGY-Wind, the same for other datasets.
91 | bash run.sh
92 | ```
93 |
94 | ---
95 |
96 | ## 🙏 Acknowledgements
97 |
98 | We would like to express our gratitude to:
99 |
100 | - **EAC**: We thank the authors for their excellent work. Our implementation builds upon their codebase: [EAC Repository](https://github.com/Onedean/EAC)
101 |
102 | ---
103 |
104 | ## 📝 Citation
105 |
106 | If you find this work useful for your research, please consider citing our paper:
107 |
108 | ```bibtex
109 | @article{zhang2025strap,
110 | title={STRAP: Spatio-Temporal Pattern Retrieval for Out-of-Distribution Generalization},
111 | author={Zhang, Haoyu and Zhang, Wentao and Miao, Hao and Jiang, Xinke and Fang, Yuchen and Zhang, Yifan},
112 | journal={arXiv preprint arXiv:2505.19547},
113 | year={2025}
114 | }
115 | ```
116 | ---
117 |
118 | ## 🌟 Star History
119 |
120 | [](https://www.star-history.com/#HoweyZ/STRAP&type=date&legend=top-left)
121 |
122 | ---
123 |
124 |
125 |
126 |
127 |
128 |
--------------------------------------------------------------------------------
/log/ENERGY-Wind/pre_st-24/pre_st.log:
--------------------------------------------------------------------------------
1 | 2025-04-27 22:28:24,695 - logger name:log/ENERGY-Wind/pre_st-24/pre_st.log
2 | 2025-04-27 22:28:24,696 - params : {'conf': 'conf/ENERGY-Wind/pre.json', 'seed': 24, 'paral': 0, 'backbone_type': 'stgnn', 'gpuid': 2, 'logname': 'pre_st', 'method': 'TrafficStream', 'load_first_year': 1, 'first_year_model_path': 'log/ENERGY-Wind/retrain_st-24/0/3.0127.pkl', 'device': device(type='cuda', index=2), 'methods': {'PECPM': , 'ST-Adapter': , 'GraphPro': , 'RAGraph': , 'STGNN_Model': , 'ASTGNN_Model': , 'DCRNN_Model': , 'TGCN_Model': , 'STLoRA': , 'RAP': , 'TrafficStream': , 'STKEC': , 'EAC': }, 'begin_year': 0, 'end_year': 3, 'dropout': 0, 'lr': 0.03, 'batch_size': 128, 'epoch': 100, 'loss': 'mse', 'activation': 'relu', 'scheduler': 'epo', 'y_len': 12, 'x_len': 12, 'data_process': 0, 'raw_data_path': 'data/ENERGY-Wind/RawData/', 'save_data_path': 'data/ENERGY-Wind/FastData/', 'graph_path': 'data/ENERGY-Wind/graph/', 'model_path': 'log/ENERGY-Wind/', 'gcn': {'in_channel': 12, 'out_channel': 12, 'hidden_channel': 64}, 'tcn': {'in_channel': 1, 'out_channel': 1, 'kernel_size': 3, 'dilation': 1}, 'init': False, 'train': 0, 'auto_test': 1, 'strategy': 'pretrain', 'detect': False, 'ewc': False, 'replay': False, 'path': 'log/ENERGY-Wind/pre_st-24', 'logger': }
3 | 2025-04-27 22:28:24,746 - [*] Year 0 load from data/ENERGY-Wind/FastData/0.npz
4 | 2025-04-27 22:28:25,103 - [*] load from log/ENERGY-Wind/retrain_st-24/0/3.0127.pkl
5 | 2025-04-27 22:28:26,358 - [*] loss:32.8293
6 | 2025-04-27 22:28:26,368 - [*] year 0, testing
7 | 2025-04-27 22:28:26,438 - T:3 MAE 5.5025 RMSE 5.6020 MAPE 14.8563
8 | 2025-04-27 22:28:26,571 - T:6 MAE 5.5199 RMSE 5.6570 MAPE 14.9031
9 | 2025-04-27 22:28:26,981 - T:12 MAE 5.5156 RMSE 5.7379 MAPE 14.8488
10 | 2025-04-27 22:28:26,982 - T:Avg MAE 5.5145 RMSE 5.6609 MAPE 14.8820
11 | 2025-04-27 22:28:27,008 - [*] Year 1 load from data/ENERGY-Wind/FastData/1.npz
12 | 2025-04-27 22:28:27,010 - [*] load from log/ENERGY-Wind/retrain_st-24/0/3.0127.pkl
13 | 2025-04-27 22:28:27,643 - [*] loss:20.3058
14 | 2025-04-27 22:28:27,653 - [*] year 1, testing
15 | 2025-04-27 22:28:27,713 - T:3 MAE 3.9775 RMSE 4.2551 MAPE 11.4653
16 | 2025-04-27 22:28:27,808 - T:6 MAE 3.9936 RMSE 4.3257 MAPE 11.5093
17 | 2025-04-27 22:28:28,131 - T:12 MAE 4.0045 RMSE 4.4481 MAPE 11.4900
18 | 2025-04-27 22:28:28,131 - T:Avg MAE 3.9917 RMSE 4.3375 MAPE 11.4959
19 | 2025-04-27 22:28:28,131 - [*] No increasing nodes at year 1, store model of the last year.
20 | 2025-04-27 22:28:28,158 - [*] Year 2 load from data/ENERGY-Wind/FastData/2.npz
21 | 2025-04-27 22:28:28,159 - [*] load from log/ENERGY-Wind/retrain_st-24/0/3.0127.pkl
22 | 2025-04-27 22:28:28,831 - [*] loss:99.4910
23 | 2025-04-27 22:28:28,845 - [*] year 2, testing
24 | 2025-04-27 22:28:28,909 - T:3 MAE 9.7813 RMSE 9.9255 MAPE 42.9258
25 | 2025-04-27 22:28:29,013 - T:6 MAE 9.7665 RMSE 9.9391 MAPE 42.8918
26 | 2025-04-27 22:28:29,406 - T:12 MAE 9.7859 RMSE 10.0133 MAPE 43.1276
27 | 2025-04-27 22:28:29,406 - T:Avg MAE 9.7737 RMSE 9.9523 MAPE 42.9397
28 | 2025-04-27 22:28:29,406 - [*] No increasing nodes at year 2, store model of the last year.
29 | 2025-04-27 22:28:29,439 - [*] Year 3 load from data/ENERGY-Wind/FastData/3.npz
30 | 2025-04-27 22:28:29,442 - [*] load from log/ENERGY-Wind/retrain_st-24/0/3.0127.pkl
31 | 2025-04-27 22:28:30,236 - [*] loss:546.1401
32 | 2025-04-27 22:28:30,248 - [*] year 3, testing
33 | 2025-04-27 22:28:30,327 - T:3 MAE 23.2776 RMSE 23.3329 MAPE 603.4615
34 | 2025-04-27 22:28:30,444 - T:6 MAE 23.2518 RMSE 23.3292 MAPE 603.9399
35 | 2025-04-27 22:28:30,855 - T:12 MAE 23.2635 RMSE 23.3878 MAPE 608.1330
36 | 2025-04-27 22:28:30,856 - T:Avg MAE 23.2596 RMSE 23.3419 MAPE 604.4192
37 | 2025-04-27 22:28:30,856 - [*] No increasing nodes at year 3, store model of the last year.
38 | 2025-04-27 22:28:30,856 -
39 |
40 |
41 | 2025-04-27 22:28:30,856 - 3 MAE 5.50 3.98 9.78 23.28 10.63
42 | 2025-04-27 22:28:30,856 - 3 RMSE 5.60 4.26 9.93 23.33 10.78
43 | 2025-04-27 22:28:30,856 - 3 MAPE 14.86 11.47 42.93 603.46 168.18
44 | 2025-04-27 22:28:30,856 - 6 MAE 5.52 3.99 9.77 23.25 10.63
45 | 2025-04-27 22:28:30,856 - 6 RMSE 5.66 4.33 9.94 23.33 10.81
46 | 2025-04-27 22:28:30,856 - 6 MAPE 14.90 11.51 42.89 603.94 168.31
47 | 2025-04-27 22:28:30,856 - 12 MAE 5.52 4.00 9.79 23.26 10.64
48 | 2025-04-27 22:28:30,856 - 12 RMSE 5.74 4.45 10.01 23.39 10.90
49 | 2025-04-27 22:28:30,857 - 12 MAPE 14.85 11.49 43.13 608.13 169.40
50 | 2025-04-27 22:28:30,857 - Avg MAE 5.51 3.99 9.77 23.26 10.63
51 | 2025-04-27 22:28:30,857 - Avg RMSE 5.66 4.34 9.95 23.34 10.82
52 | 2025-04-27 22:28:30,857 - Avg MAPE 14.88 11.50 42.94 604.42 168.43
53 | 2025-04-27 22:28:30,857 - total time: 0.0000
54 |
--------------------------------------------------------------------------------
/log/ENERGY-Wind/pre_st-100/pre_st.log:
--------------------------------------------------------------------------------
1 | 2025-04-27 22:28:45,256 - logger name:log/ENERGY-Wind/pre_st-100/pre_st.log
2 | 2025-04-27 22:28:45,256 - params : {'conf': 'conf/ENERGY-Wind/pre.json', 'seed': 100, 'paral': 0, 'backbone_type': 'stgnn', 'gpuid': 2, 'logname': 'pre_st', 'method': 'TrafficStream', 'load_first_year': 1, 'first_year_model_path': 'log/ENERGY-Wind/retrain_st-100/0/2.0238.pkl', 'device': device(type='cuda', index=2), 'methods': {'PECPM': , 'ST-Adapter': , 'GraphPro': , 'RAGraph': , 'STGNN_Model': , 'ASTGNN_Model': , 'DCRNN_Model': , 'TGCN_Model': , 'STLoRA': , 'RAP': , 'TrafficStream': , 'STKEC': , 'EAC': }, 'begin_year': 0, 'end_year': 3, 'dropout': 0, 'lr': 0.03, 'batch_size': 128, 'epoch': 100, 'loss': 'mse', 'activation': 'relu', 'scheduler': 'epo', 'y_len': 12, 'x_len': 12, 'data_process': 0, 'raw_data_path': 'data/ENERGY-Wind/RawData/', 'save_data_path': 'data/ENERGY-Wind/FastData/', 'graph_path': 'data/ENERGY-Wind/graph/', 'model_path': 'log/ENERGY-Wind/', 'gcn': {'in_channel': 12, 'out_channel': 12, 'hidden_channel': 64}, 'tcn': {'in_channel': 1, 'out_channel': 1, 'kernel_size': 3, 'dilation': 1}, 'init': False, 'train': 0, 'auto_test': 1, 'strategy': 'pretrain', 'detect': False, 'ewc': False, 'replay': False, 'path': 'log/ENERGY-Wind/pre_st-100', 'logger': }
3 | 2025-04-27 22:28:45,267 - [*] Year 0 load from data/ENERGY-Wind/FastData/0.npz
4 | 2025-04-27 22:28:45,589 - [*] load from log/ENERGY-Wind/retrain_st-100/0/2.0238.pkl
5 | 2025-04-27 22:28:46,371 - [*] loss:19.0199
6 | 2025-04-27 22:28:46,380 - [*] year 0, testing
7 | 2025-04-27 22:28:46,443 - T:3 MAE 5.1983 RMSE 5.4105 MAPE 13.6366
8 | 2025-04-27 22:28:46,571 - T:6 MAE 3.9582 RMSE 4.4382 MAPE 10.4497
9 | 2025-04-27 22:28:46,974 - T:12 MAE 3.8678 RMSE 4.4074 MAPE 10.1850
10 | 2025-04-27 22:28:46,974 - T:Avg MAE 4.3847 RMSE 4.7815 MAPE 11.5351
11 | 2025-04-27 22:28:46,980 - [*] Year 1 load from data/ENERGY-Wind/FastData/1.npz
12 | 2025-04-27 22:28:46,982 - [*] load from log/ENERGY-Wind/retrain_st-100/0/2.0238.pkl
13 | 2025-04-27 22:28:47,590 - [*] loss:11.6444
14 | 2025-04-27 22:28:47,599 - [*] year 1, testing
15 | 2025-04-27 22:28:47,660 - T:3 MAE 3.8388 RMSE 4.0670 MAPE 10.6253
16 | 2025-04-27 22:28:47,753 - T:6 MAE 2.8658 RMSE 3.3425 MAPE 7.9932
17 | 2025-04-27 22:28:48,067 - T:12 MAE 2.8216 RMSE 3.4136 MAPE 7.8382
18 | 2025-04-27 22:28:48,067 - T:Avg MAE 3.1977 RMSE 3.6202 MAPE 8.8817
19 | 2025-04-27 22:28:48,067 - [*] No increasing nodes at year 1, store model of the last year.
20 | 2025-04-27 22:28:48,075 - [*] Year 2 load from data/ENERGY-Wind/FastData/2.npz
21 | 2025-04-27 22:28:48,077 - [*] load from log/ENERGY-Wind/retrain_st-100/0/2.0238.pkl
22 | 2025-04-27 22:28:48,689 - [*] loss:138.2838
23 | 2025-04-27 22:28:48,704 - [*] year 2, testing
24 | 2025-04-27 22:28:48,773 - T:3 MAE 10.0925 RMSE 10.1872 MAPE 45.5651
25 | 2025-04-27 22:28:48,880 - T:6 MAE 11.4124 RMSE 11.6208 MAPE 51.3026
26 | 2025-04-27 22:28:49,283 - T:12 MAE 11.5345 RMSE 11.7715 MAPE 51.9936
27 | 2025-04-27 22:28:49,283 - T:Avg MAE 10.9598 RMSE 11.1310 MAPE 49.3869
28 | 2025-04-27 22:28:49,283 - [*] No increasing nodes at year 2, store model of the last year.
29 | 2025-04-27 22:28:49,293 - [*] Year 3 load from data/ENERGY-Wind/FastData/3.npz
30 | 2025-04-27 22:28:49,295 - [*] load from log/ENERGY-Wind/retrain_st-100/0/2.0238.pkl
31 | 2025-04-27 22:28:49,964 - [*] loss:647.8937
32 | 2025-04-27 22:28:49,975 - [*] year 3, testing
33 | 2025-04-27 22:28:50,048 - T:3 MAE 23.8506 RMSE 23.9180 MAPE 655.7446
34 | 2025-04-27 22:28:50,163 - T:6 MAE 25.1943 RMSE 25.3184 MAPE 683.7297
35 | 2025-04-27 22:28:50,614 - T:12 MAE 25.3023 RMSE 25.4545 MAPE 689.5411
36 | 2025-04-27 22:28:50,614 - T:Avg MAE 24.7268 RMSE 24.8366 MAPE 675.2180
37 | 2025-04-27 22:28:50,614 - [*] No increasing nodes at year 3, store model of the last year.
38 | 2025-04-27 22:28:50,615 -
39 |
40 |
41 | 2025-04-27 22:28:50,615 - 3 MAE 5.20 3.84 10.09 23.85 10.75
42 | 2025-04-27 22:28:50,615 - 3 RMSE 5.41 4.07 10.19 23.92 10.90
43 | 2025-04-27 22:28:50,615 - 3 MAPE 13.64 10.63 45.57 655.74 181.39
44 | 2025-04-27 22:28:50,615 - 6 MAE 3.96 2.87 11.41 25.19 10.86
45 | 2025-04-27 22:28:50,615 - 6 RMSE 4.44 3.34 11.62 25.32 11.18
46 | 2025-04-27 22:28:50,615 - 6 MAPE 10.45 7.99 51.30 683.73 188.37
47 | 2025-04-27 22:28:50,615 - 12 MAE 3.87 2.82 11.53 25.30 10.88
48 | 2025-04-27 22:28:50,615 - 12 RMSE 4.41 3.41 11.77 25.45 11.26
49 | 2025-04-27 22:28:50,615 - 12 MAPE 10.19 7.84 51.99 689.54 189.89
50 | 2025-04-27 22:28:50,615 - Avg MAE 4.38 3.20 10.96 24.73 10.82
51 | 2025-04-27 22:28:50,615 - Avg RMSE 4.78 3.62 11.13 24.84 11.09
52 | 2025-04-27 22:28:50,615 - Avg MAPE 11.54 8.88 49.39 675.22 186.26
53 | 2025-04-27 22:28:50,615 - total time: 0.0000
54 |
--------------------------------------------------------------------------------
/log/ENERGY-Wind/pre_st-622/pre_st.log:
--------------------------------------------------------------------------------
1 | 2025-04-27 22:29:04,215 - logger name:log/ENERGY-Wind/pre_st-622/pre_st.log
2 | 2025-04-27 22:29:04,215 - params : {'conf': 'conf/ENERGY-Wind/pre.json', 'seed': 622, 'paral': 0, 'backbone_type': 'stgnn', 'gpuid': 2, 'logname': 'pre_st', 'method': 'TrafficStream', 'load_first_year': 1, 'first_year_model_path': 'log/ENERGY-Wind/retrain_st-622/0/3.0058.pkl', 'device': device(type='cuda', index=2), 'methods': {'PECPM': , 'ST-Adapter': , 'GraphPro': , 'RAGraph': , 'STGNN_Model': , 'ASTGNN_Model': , 'DCRNN_Model': , 'TGCN_Model': , 'STLoRA': , 'RAP': , 'TrafficStream': , 'STKEC': , 'EAC': }, 'begin_year': 0, 'end_year': 3, 'dropout': 0, 'lr': 0.03, 'batch_size': 128, 'epoch': 100, 'loss': 'mse', 'activation': 'relu', 'scheduler': 'epo', 'y_len': 12, 'x_len': 12, 'data_process': 0, 'raw_data_path': 'data/ENERGY-Wind/RawData/', 'save_data_path': 'data/ENERGY-Wind/FastData/', 'graph_path': 'data/ENERGY-Wind/graph/', 'model_path': 'log/ENERGY-Wind/', 'gcn': {'in_channel': 12, 'out_channel': 12, 'hidden_channel': 64}, 'tcn': {'in_channel': 1, 'out_channel': 1, 'kernel_size': 3, 'dilation': 1}, 'init': False, 'train': 0, 'auto_test': 1, 'strategy': 'pretrain', 'detect': False, 'ewc': False, 'replay': False, 'path': 'log/ENERGY-Wind/pre_st-622', 'logger': }
3 | 2025-04-27 22:29:04,226 - [*] Year 0 load from data/ENERGY-Wind/FastData/0.npz
4 | 2025-04-27 22:29:04,528 - [*] load from log/ENERGY-Wind/retrain_st-622/0/3.0058.pkl
5 | 2025-04-27 22:29:05,271 - [*] loss:33.6749
6 | 2025-04-27 22:29:05,280 - [*] year 0, testing
7 | 2025-04-27 22:29:05,343 - T:3 MAE 5.4772 RMSE 5.6311 MAPE 14.7622
8 | 2025-04-27 22:29:05,461 - T:6 MAE 5.5267 RMSE 5.7102 MAPE 14.8765
9 | 2025-04-27 22:29:05,849 - T:12 MAE 5.5298 RMSE 5.7927 MAPE 14.8589
10 | 2025-04-27 22:29:05,849 - T:Avg MAE 5.5166 RMSE 5.7104 MAPE 14.8505
11 | 2025-04-27 22:29:05,856 - [*] Year 1 load from data/ENERGY-Wind/FastData/1.npz
12 | 2025-04-27 22:29:05,858 - [*] load from log/ENERGY-Wind/retrain_st-622/0/3.0058.pkl
13 | 2025-04-27 22:29:06,438 - [*] loss:21.0801
14 | 2025-04-27 22:29:06,446 - [*] year 1, testing
15 | 2025-04-27 22:29:06,509 - T:3 MAE 4.0630 RMSE 4.3367 MAPE 11.6252
16 | 2025-04-27 22:29:06,603 - T:6 MAE 4.1120 RMSE 4.4166 MAPE 11.7425
17 | 2025-04-27 22:29:06,926 - T:12 MAE 4.1325 RMSE 4.5411 MAPE 11.7583
18 | 2025-04-27 22:29:06,926 - T:Avg MAE 4.1061 RMSE 4.4282 MAPE 11.7238
19 | 2025-04-27 22:29:06,926 - [*] No increasing nodes at year 1, store model of the last year.
20 | 2025-04-27 22:29:06,933 - [*] Year 2 load from data/ENERGY-Wind/FastData/2.npz
21 | 2025-04-27 22:29:06,935 - [*] load from log/ENERGY-Wind/retrain_st-622/0/3.0058.pkl
22 | 2025-04-27 22:29:07,524 - [*] loss:100.7212
23 | 2025-04-27 22:29:07,533 - [*] year 2, testing
24 | 2025-04-27 22:29:07,600 - T:3 MAE 9.8589 RMSE 10.0250 MAPE 43.5974
25 | 2025-04-27 22:29:07,701 - T:6 MAE 9.8134 RMSE 9.9998 MAPE 43.4460
26 | 2025-04-27 22:29:08,075 - T:12 MAE 9.8226 RMSE 10.0662 MAPE 43.5747
27 | 2025-04-27 22:29:08,075 - T:Avg MAE 9.8251 RMSE 10.0200 MAPE 43.4963
28 | 2025-04-27 22:29:08,076 - [*] No increasing nodes at year 2, store model of the last year.
29 | 2025-04-27 22:29:08,083 - [*] Year 3 load from data/ENERGY-Wind/FastData/3.npz
30 | 2025-04-27 22:29:08,085 - [*] load from log/ENERGY-Wind/retrain_st-622/0/3.0058.pkl
31 | 2025-04-27 22:29:08,697 - [*] loss:558.8682
32 | 2025-04-27 22:29:08,708 - [*] year 3, testing
33 | 2025-04-27 22:29:08,778 - T:3 MAE 23.5787 RMSE 23.6539 MAPE 633.1286
34 | 2025-04-27 22:29:08,893 - T:6 MAE 23.5274 RMSE 23.6222 MAPE 633.1327
35 | 2025-04-27 22:29:09,265 - T:12 MAE 23.5236 RMSE 23.6650 MAPE 634.6260
36 | 2025-04-27 22:29:09,265 - T:Avg MAE 23.5379 RMSE 23.6381 MAPE 633.1798
37 | 2025-04-27 22:29:09,266 - [*] No increasing nodes at year 3, store model of the last year.
38 | 2025-04-27 22:29:09,266 -
39 |
40 |
41 | 2025-04-27 22:29:09,266 - 3 MAE 5.48 4.06 9.86 23.58 10.74
42 | 2025-04-27 22:29:09,266 - 3 RMSE 5.63 4.34 10.03 23.65 10.91
43 | 2025-04-27 22:29:09,266 - 3 MAPE 14.76 11.63 43.60 633.13 175.78
44 | 2025-04-27 22:29:09,266 - 6 MAE 5.53 4.11 9.81 23.53 10.74
45 | 2025-04-27 22:29:09,266 - 6 RMSE 5.71 4.42 10.00 23.62 10.94
46 | 2025-04-27 22:29:09,266 - 6 MAPE 14.88 11.74 43.45 633.13 175.80
47 | 2025-04-27 22:29:09,266 - 12 MAE 5.53 4.13 9.82 23.52 10.75
48 | 2025-04-27 22:29:09,266 - 12 RMSE 5.79 4.54 10.07 23.67 11.02
49 | 2025-04-27 22:29:09,266 - 12 MAPE 14.86 11.76 43.57 634.63 176.20
50 | 2025-04-27 22:29:09,266 - Avg MAE 5.52 4.11 9.83 23.54 10.75
51 | 2025-04-27 22:29:09,266 - Avg RMSE 5.71 4.43 10.02 23.64 10.95
52 | 2025-04-27 22:29:09,266 - Avg MAPE 14.85 11.72 43.50 633.18 175.81
53 | 2025-04-27 22:29:09,266 - total time: 0.0000
54 |
--------------------------------------------------------------------------------
/src/model/detect_default.py:
--------------------------------------------------------------------------------
1 | import sys
2 | sys.path.append('src/')
3 | import numpy as np
4 | from scipy.stats import entropy as kldiv
5 | from datetime import datetime
6 | from torch_geometric.utils import to_dense_batch
7 | from dataer.SpatioTemporalDataset import continue_learning_Dataset
8 | from torch_geometric.loader import DataLoader
9 | import torch
10 | from scipy.spatial import distance
11 | import os.path as osp
12 |
13 |
14 | def get_feature(data, graph, args, model, adj):
15 | node_size = data.shape[1] # Determine the number of nodes
16 | data = np.reshape(data[-288*7-1:-1,:], (-1, args.x_len, node_size)) # Reshape the last week’s data into a 3D array with shape (number of samples, x_len, node_size) Note: [288*7, node_size] -> [288*7/12, 12, node_size]
17 | dataloader = DataLoader(continue_learning_Dataset(data), batch_size=data.shape[0], shuffle=False, pin_memory=True, num_workers=32) # Create a DataLoader object to iterate over the reshaped data. The batch_size is set to the number of samples, so one iteration is completed.
18 | for data in dataloader:
19 | data = data.to(args.device, non_blocking=True)
20 | feature, _ = to_dense_batch(model.feature(data, adj), batch=data.batch) # Use the model to extract features from the data and convert it into a dense batch format, with the shape of `feature` being [batch_size, num_nodes, feature_dim]
21 | node_size = feature.size()[1] # Update node_size to match the size of the extracted features
22 | feature = feature.permute(1,0,2) # Transposed feature dimensions are [num_nodes, batch_size, feature_dim]
23 | return feature.cpu().detach().numpy()
24 |
25 |
26 | def get_adj(year, args):
27 | adj = np.load(osp.join(args.graph_path, str(year)+"_adj.npz"))["x"] # Load an adjacency matrix from a .npz file for a specified year
28 | adj = adj / (np.sum(adj, 1, keepdims=True) + 1e-6) # Normalize the adjacency matrix by dividing each row by its sum (plus a small value to avoid division by zero)
29 | return torch.from_numpy(adj).to(torch.float).to(args.device)
30 |
31 |
32 | def score_func(pre_data, cur_data, args):
33 | node_size = pre_data.shape[1] # Determine the number of nodes
34 | score = []
35 | for node in range(node_size):
36 | max_val = max(max(pre_data[:,node]), max(cur_data[:,node])) # Find the maximum and minimum values of the node in the last week period
37 | min_val = min(min(pre_data[:,node]), min(cur_data[:,node]))
38 | pre_prob, _ = np.histogram(pre_data[:,node], bins=10, range=(min_val, max_val)) # Create a histogram of the data for the node in two time periods, with 10 bins in total, and normalize the histogram to get the probability distribution
39 | pre_prob = pre_prob *1.0 / sum(pre_prob)
40 | cur_prob, _ = np.histogram(cur_data[:,node], bins=10, range=(min_val, max_val))
41 | cur_prob = cur_prob * 1.0 /sum(cur_prob)
42 | score.append(kldiv(pre_prob, cur_prob)) # Compute the KL divergence between the two distributions and add it to the list of scores
43 | return np.argpartition(np.asarray(score), -args.topk)[-args.topk:] # Returns the indices of the top-k nodes with the highest KL divergence scores
44 |
45 |
46 | def influence_node_selection(model, args, pre_data, cur_data, pre_graph, cur_graph):
47 | if args.detect_strategy == 'original': # Check the detection strategy specified in the parameters
48 | pre_data = pre_data[-288*7-1:-1,:] # Select the last week (7 days) of data for both datasets
49 | cur_data = cur_data[-288*7-1:-1,:]
50 | node_size = pre_data.shape[1] # Determine the number of nodes in the data
51 | score = []
52 | for node in range(node_size): # Iterate over each node to calculate its KL divergence score
53 | max_val = max(max(pre_data[:,node]), max(cur_data[:,node])) # Find the maximum and minimum values of a node in two time periods
54 | min_val = min(min(pre_data[:,node]), min(cur_data[:,node]))
55 | pre_prob, _ = np.histogram(pre_data[:,node], bins=10, range=(min_val, max_val)) # Create a histogram of the data for the node in two time periods, with 10 bins in total, and normalize the histogram to get the probability distribution
56 | pre_prob = pre_prob *1.0 / sum(pre_prob)
57 | cur_prob, _ = np.histogram(cur_data[:,node], bins=10, range=(min_val, max_val))
58 | cur_prob = cur_prob * 1.0 /sum(cur_prob)
59 | score.append(kldiv(pre_prob, cur_prob)) # Compute the KL divergence between the two distributions and add it to the list of scores
60 | return score
61 | elif args.detect_strategy == 'feature':
62 | model.eval() # Set the model to evaluation mode
63 | pre_adj = get_adj(args.year-1, args) # Get the adjacency matrix of the previous year and the current year
64 | cur_adj = get_adj(args.year, args)
65 |
66 | pre_data = get_feature(pre_data, pre_graph, args, model, pre_adj) # Use the model to extract features from previous and current data, the feature dimension is [num_nodes, batch_size, feature_dim]
67 | cur_data = get_feature(cur_data, cur_graph, args, model, cur_adj)
68 | score = []
69 | for i in range(pre_data.shape[0]): # Traverse the nodes in the feature data
70 | score_ = 0.0
71 | for j in range(pre_data.shape[2]): # Traverse each feature dimension
72 | # if max(pre_data[i,:,j]) - min(pre_data[i,:,j]) == 0 and max(cur_data[i,:,j]) - min(cur_data[i,:,j]) == 0: continue
73 | pre_data[i,:,j] = (pre_data[i,:,j] - min(pre_data[i,:,j]))/(max(pre_data[i,:,j]) - min(pre_data[i,:,j])) # Normalize the eigenvalues to the range [0, 1]
74 | cur_data[i,:,j] = (cur_data[i,:,j] - min(cur_data[i,:,j]))/(max(cur_data[i,:,j]) - min(cur_data[i,:,j]))
75 |
76 | pre_prob, _ = np.histogram(pre_data[i,:,j], bins=10, range=(0, 1)) # Create a histogram of the distribution of the feature over two time periods, with 10 bins, and normalize the histogram to get a probability distribution
77 | pre_prob = pre_prob *1.0 / sum(pre_prob)
78 | cur_prob, _ = np.histogram(cur_data[i,:,j], bins=10, range=(0, 1))
79 | cur_prob = cur_prob * 1.0 /sum(cur_prob)
80 | score_ += distance.jensenshannon(pre_prob, cur_prob) # Calculate the Jensen-Shannon distance between the two distributions and add to the score
81 | score.append(score_) # Add the total score of the node to the score list
82 | return score # Returns the indices of the top-k nodes with the highest scores
83 | else: args.logger.info("node selection mode illegal!")
84 |
85 |
--------------------------------------------------------------------------------
/environment.yaml:
--------------------------------------------------------------------------------
1 | name: stg
2 | channels:
3 | - pytorch
4 | - nvidia
5 | - conda-forge
6 | - defaults
7 | dependencies:
8 | - _libgcc_mutex=0.1=main
9 | - _openmp_mutex=5.1=1_gnu
10 | - arrow-cpp=17.0.0=h865e1df_1
11 | - asttokens=2.0.5=pyhd3eb1b0_0
12 | - aws-c-auth=0.6.19=h5eee18b_0
13 | - aws-c-cal=0.5.20=hdbd6064_0
14 | - aws-c-common=0.8.5=h5eee18b_0
15 | - aws-c-compression=0.2.16=h5eee18b_0
16 | - aws-c-event-stream=0.2.15=h6a678d5_0
17 | - aws-c-http=0.6.25=h5eee18b_0
18 | - aws-c-io=0.13.10=h5eee18b_0
19 | - aws-c-mqtt=0.7.13=h5eee18b_0
20 | - aws-c-s3=0.1.51=hdbd6064_0
21 | - aws-c-sdkutils=0.1.6=h5eee18b_0
22 | - aws-checksums=0.1.13=h5eee18b_0
23 | - aws-crt-cpp=0.18.16=h6a678d5_0
24 | - aws-sdk-cpp=1.11.212=hecad206_0
25 | - blas=1.0=mkl
26 | - boost-cpp=1.82.0=hdb19cb5_2
27 | - brotli-python=1.0.9=py311h6a678d5_7
28 | - bzip2=1.0.8=h5eee18b_6
29 | - c-ares=1.19.1=h5eee18b_0
30 | - ca-certificates=2024.12.31=h06a4308_0
31 | - cachetools=5.3.3=pyhd8ed1ab_0
32 | - certifi=2024.12.14=py311h06a4308_0
33 | - charset-normalizer=2.0.4=pyhd3eb1b0_0
34 | - comm=0.2.1=py311h06a4308_0
35 | - cuda-cudart=12.1.105=0
36 | - cuda-cupti=12.1.105=0
37 | - cuda-libraries=12.1.0=0
38 | - cuda-nvrtc=12.1.105=0
39 | - cuda-nvtx=12.1.105=0
40 | - cuda-opencl=12.4.99=0
41 | - cuda-runtime=12.1.0=0
42 | - debugpy=1.6.7=py311h6a678d5_0
43 | - decorator=5.1.1=pyhd3eb1b0_0
44 | - executing=0.8.3=pyhd3eb1b0_0
45 | - ffmpeg=4.3=hf484d3e_0
46 | - filelock=3.13.1=py311h06a4308_0
47 | - freetype=2.12.1=h4a9f257_0
48 | - gflags=2.2.2=h6a678d5_1
49 | - glog=0.5.0=h6a678d5_1
50 | - gmp=6.2.1=h295c915_3
51 | - gmpy2=2.1.2=py311hc9b5ff0_0
52 | - gnutls=3.6.15=he1e5248_0
53 | - icu=73.1=h6a678d5_0
54 | - idna=3.4=py311h06a4308_0
55 | - intel-openmp=2023.1.0=hdb19cb5_46306
56 | - ipykernel=6.28.0=py311h06a4308_0
57 | - ipython=8.20.0=py311h06a4308_0
58 | - jedi=0.18.1=py311h06a4308_1
59 | - jinja2=3.1.3=py311h06a4308_0
60 | - joblib=1.4.0=py311h06a4308_0
61 | - jpeg=9e=h5eee18b_1
62 | - jupyter_client=8.6.0=py311h06a4308_0
63 | - jupyter_core=5.5.0=py311h06a4308_0
64 | - krb5=1.20.1=h143b758_1
65 | - lame=3.100=h7b6447c_0
66 | - lcms2=2.12=h3be6417_0
67 | - ld_impl_linux-64=2.38=h1181459_1
68 | - lerc=3.0=h295c915_0
69 | - libabseil=20240116.2=cxx17_h6a678d5_0
70 | - libboost=1.82.0=h109eef0_2
71 | - libbrotlicommon=1.0.9=h5eee18b_9
72 | - libbrotlidec=1.0.9=h5eee18b_9
73 | - libbrotlienc=1.0.9=h5eee18b_9
74 | - libcublas=12.1.0.26=0
75 | - libcufft=11.0.2.4=0
76 | - libcufile=1.9.0.20=0
77 | - libcurand=10.3.5.119=0
78 | - libcurl=8.11.1=hc9e6f67_0
79 | - libcusolver=11.4.4.55=0
80 | - libcusparse=12.0.2.55=0
81 | - libdeflate=1.17=h5eee18b_1
82 | - libedit=3.1.20230828=h5eee18b_0
83 | - libev=4.33=h7f8727e_1
84 | - libevent=2.1.12=hdbd6064_1
85 | - libffi=3.4.4=h6a678d5_1
86 | - libgcc-ng=11.2.0=h1234567_1
87 | - libgfortran-ng=11.2.0=h00389a5_1
88 | - libgfortran5=11.2.0=h1234567_1
89 | - libgomp=11.2.0=h1234567_1
90 | - libgrpc=1.62.2=h2d74bed_0
91 | - libiconv=1.16=h7f8727e_2
92 | - libidn2=2.3.4=h5eee18b_0
93 | - libjpeg-turbo=2.0.0=h9bf148f_0
94 | - libnghttp2=1.57.0=h2d74bed_0
95 | - libnpp=12.0.2.50=0
96 | - libnvjitlink=12.1.105=0
97 | - libnvjpeg=12.1.1.14=0
98 | - libpng=1.6.39=h5eee18b_0
99 | - libprotobuf=4.25.3=he621ea3_0
100 | - libsodium=1.0.18=h7b6447c_0
101 | - libssh2=1.11.1=h251f7ec_0
102 | - libstdcxx-ng=11.2.0=h1234567_1
103 | - libtasn1=4.19.0=h5eee18b_0
104 | - libthrift=0.15.0=h1795dd8_2
105 | - libtiff=4.5.1=h6a678d5_0
106 | - libunistring=0.9.10=h27cfd23_0
107 | - libuuid=1.41.5=h5eee18b_0
108 | - libwebp-base=1.3.2=h5eee18b_0
109 | - llvm-openmp=14.0.6=h9e868ea_0
110 | - lz4-c=1.9.4=h6a678d5_0
111 | - markupsafe=2.1.3=py311h5eee18b_0
112 | - matplotlib-inline=0.1.6=py311h06a4308_0
113 | - mkl=2023.1.0=h213fc3f_46344
114 | - mkl-service=2.4.0=py311h5eee18b_1
115 | - mkl_fft=1.3.8=py311h5eee18b_0
116 | - mkl_random=1.2.4=py311hdb19cb5_0
117 | - mpc=1.1.0=h10f8cd9_1
118 | - mpfr=4.0.2=hb69a4c5_1
119 | - mpmath=1.3.0=py311h06a4308_0
120 | - ncurses=6.4=h6a678d5_0
121 | - nest-asyncio=1.6.0=py311h06a4308_0
122 | - nettle=3.7.3=hbbd107a_1
123 | - numpy=1.26.4=py311h08b1b3b_0
124 | - numpy-base=1.26.4=py311hf175353_0
125 | - nvidia-ml-py=12.535.133=pyhd8ed1ab_0
126 | - nvitop=1.3.2=py311h38be061_0
127 | - openh264=2.1.1=h4ff587b_0
128 | - openjpeg=2.4.0=h3ad879b_0
129 | - openssl=3.0.15=h5eee18b_0
130 | - orc=2.0.1=h2d29ad5_0
131 | - parso=0.8.3=pyhd3eb1b0_0
132 | - pexpect=4.8.0=pyhd3eb1b0_3
133 | - pillow=10.2.0=py311h5eee18b_0
134 | - pip=24.0=py311h06a4308_0
135 | - platformdirs=3.10.0=py311h06a4308_0
136 | - prompt-toolkit=3.0.43=py311h06a4308_0
137 | - prompt_toolkit=3.0.43=hd3eb1b0_0
138 | - psutil=5.9.0=py311h5eee18b_0
139 | - ptyprocess=0.7.0=pyhd3eb1b0_2
140 | - pure_eval=0.2.2=pyhd3eb1b0_0
141 | - pyarrow=17.0.0=py311ha02d727_0
142 | - pybind11-abi=4=hd3eb1b0_1
143 | - pygments=2.15.1=py311h06a4308_1
144 | - pysocks=1.7.1=py311h06a4308_0
145 | - python=3.11.9=h955ad1f_0
146 | - python-dateutil=2.9.0post0=py311h06a4308_2
147 | - python_abi=3.11=2_cp311
148 | - pytorch=2.2.1=py3.11_cuda12.1_cudnn8.9.2_0
149 | - pytorch-cuda=12.1=ha16c6d3_5
150 | - pytorch-mutex=1.0=cuda
151 | - pyyaml=6.0.1=py311h5eee18b_0
152 | - pyzmq=25.1.2=py311h6a678d5_0
153 | - re2=2022.04.01=h295c915_0
154 | - readline=8.2=h5eee18b_0
155 | - requests=2.31.0=py311h06a4308_1
156 | - s2n=1.3.27=hdbd6064_0
157 | - scikit-learn=1.4.2=py311ha02d727_1
158 | - six=1.16.0=pyhd3eb1b0_1
159 | - snappy=1.2.1=h6a678d5_0
160 | - sqlite=3.45.3=h5eee18b_0
161 | - stack_data=0.2.0=pyhd3eb1b0_0
162 | - sympy=1.12=py311h06a4308_0
163 | - tbb=2021.8.0=hdb19cb5_0
164 | - termcolor=2.4.0=pyhd8ed1ab_0
165 | - threadpoolctl=2.2.0=pyh0d69192_0
166 | - tk=8.6.14=h39e8969_0
167 | - torchaudio=2.2.1=py311_cu121
168 | - torchtriton=2.2.0=py311
169 | - torchvision=0.17.1=py311_cu121
170 | - tornado=6.3.3=py311h5eee18b_0
171 | - traitlets=5.7.1=py311h06a4308_0
172 | - typing_extensions=4.9.0=py311h06a4308_1
173 | - urllib3=2.1.0=py311h06a4308_1
174 | - utf8proc=2.6.1=h5eee18b_1
175 | - wcwidth=0.2.5=pyhd3eb1b0_0
176 | - wheel=0.43.0=py311h06a4308_0
177 | - xz=5.4.6=h5eee18b_1
178 | - yaml=0.2.5=h7b6447c_0
179 | - zeromq=4.3.5=h6a678d5_0
180 | - zlib=1.2.13=h5eee18b_1
181 | - zstd=1.5.5=hc292b87_0
182 | - pip:
183 | - absl-py==2.1.0
184 | - aiohttp==3.9.5
185 | - aiosignal==1.3.1
186 | - attrs==23.2.0
187 | - biopython==1.84
188 | - blosc2==2.6.2
189 | - cartopy==0.23.0
190 | - contourpy==1.2.1
191 | - cycler==0.12.1
192 | - easy-torch==1.3.2
193 | - easydict==1.10
194 | - einops==0.8.0
195 | - fonttools==4.53.0
196 | - frozenlist==1.4.1
197 | - fsspec==2024.5.0
198 | - grpcio==1.64.0
199 | - kiwisolver==1.4.5
200 | - markdown==3.6
201 | - matplotlib==3.9.0
202 | - msgpack==1.0.8
203 | - multidict==6.0.5
204 | - ndindex==1.8
205 | - networkx==3.3
206 | - numexpr==2.10.0
207 | - packaging==23.1
208 | - pandas==2.2.2
209 | - positional-encodings==6.0.1
210 | - prefetch-generator==1.0.3
211 | - protobuf==5.27.0
212 | - py-cpuinfo==9.0.0
213 | - pyparsing==3.1.2
214 | - pyproj==3.6.1
215 | - pyshp==2.3.1
216 | - pytz==2024.1
217 | - scipy==1.13.1
218 | - seaborn==0.13.2
219 | - setproctitle==1.3.3
220 | - setuptools==59.5.0
221 | - shapely==2.0.6
222 | - tables==3.9.2
223 | - tensorboard==2.16.2
224 | - tensorboard-data-server==0.7.2
225 | - timm==0.6.7
226 | - torch-geometric==2.5.3
227 | - tqdm==4.66.4
228 | - tzdata==2024.1
229 | - werkzeug==3.0.3
230 | - yarl==1.9.4
231 |
232 |
--------------------------------------------------------------------------------
/log/ENERGY-Wind/retrain_st-100/retrain_st.log:
--------------------------------------------------------------------------------
1 | 2025-04-27 21:28:30,084 - logger name:log/ENERGY-Wind/retrain_st-100/retrain_st.log
2 | 2025-04-27 21:28:30,085 - params : {'conf': 'conf/ENERGY-Wind/retrain.json', 'seed': 100, 'paral': 0, 'backbone_type': 'stgnn', 'gpuid': 2, 'logname': 'retrain_st', 'method': 'TrafficStream', 'load_first_year': 0, 'first_year_model_path': '/home/haoyu.zhang/MoE_GNN/new/code/log/AIR/stlora_T-24/2016/21.3875.pkl', 'device': device(type='cuda', index=2), 'methods': {'PECPM': , 'ST-Adapter': , 'GraphPro': , 'RAGraph': , 'STGNN_Model': , 'ASTGNN_Model': , 'DCRNN_Model': , 'TGCN_Model': , 'STLoRA': , 'RAP': , 'TrafficStream': , 'STKEC': , 'EAC': }, 'begin_year': 0, 'end_year': 3, 'dropout': 0.0, 'lr': 0.03, 'batch_size': 128, 'epoch': 100, 'loss': 'mse', 'activation': 'relu', 'scheduler': 'epo', 'y_len': 12, 'x_len': 12, 'data_process': 0, 'raw_data_path': 'data/ENERGY-Wind/RawData/', 'save_data_path': 'data/ENERGY-Wind/FastData/', 'graph_path': 'data/ENERGY-Wind/graph/', 'model_path': 'log/ENERGY-Wind/', 'gcn': {'in_channel': 12, 'out_channel': 12, 'hidden_channel': 64}, 'tcn': {'in_channel': 1, 'out_channel': 1, 'kernel_size': 3, 'dilation': 1}, 'init': False, 'train': 1, 'auto_test': 0, 'strategy': 'retrain', 'detect': False, 'ewc': False, 'replay': False, 'path': 'log/ENERGY-Wind/retrain_st-100', 'logger': }
3 | 2025-04-27 21:28:30,099 - [*] Year 0 load from data/ENERGY-Wind/FastData/0.npz
4 | 2025-04-27 21:28:30,577 - [*] Year 0 Dataset load!
5 | 2025-04-27 21:28:30,580 - [*] Year 0 Training start
6 | 2025-04-27 21:28:30,959 - node number torch.Size([13184, 12])
7 | 2025-04-27 21:28:32,347 - epoch:0, training loss:225.4257 validation loss:2.0238
8 | 2025-04-27 21:28:33,895 - epoch:1, training loss:4.4564 validation loss:3.2258
9 | 2025-04-27 21:28:35,387 - epoch:2, training loss:2.6371 validation loss:3.2146
10 | 2025-04-27 21:28:36,920 - epoch:3, training loss:2.3228 validation loss:3.4737
11 | 2025-04-27 21:28:38,447 - epoch:4, training loss:2.2574 validation loss:2.9943
12 | 2025-04-27 21:28:39,948 - epoch:5, training loss:2.2159 validation loss:2.8308
13 | 2025-04-27 21:28:41,486 - epoch:6, training loss:2.1844 validation loss:3.2155
14 | 2025-04-27 21:28:42,263 - [*] loss:19.0199
15 | 2025-04-27 21:28:42,269 - [*] year 0, testing
16 | 2025-04-27 21:28:42,330 - T:3 MAE 5.1983 RMSE 5.4105 MAPE 13.6366
17 | 2025-04-27 21:28:42,450 - T:6 MAE 3.9582 RMSE 4.4382 MAPE 10.4497
18 | 2025-04-27 21:28:42,853 - T:12 MAE 3.8678 RMSE 4.4074 MAPE 10.1850
19 | 2025-04-27 21:28:42,854 - T:Avg MAE 4.3847 RMSE 4.7815 MAPE 11.5351
20 | 2025-04-27 21:28:42,854 - Finished optimization, total time:6.47 s, best model:log/ENERGY-Wind/retrain_st-100/0/2.0238.pkl
21 | 2025-04-27 21:28:42,865 - [*] Year 1 load from data/ENERGY-Wind/FastData/1.npz
22 | 2025-04-27 21:28:43,051 - [*] Year 1 Dataset load!
23 | 2025-04-27 21:28:43,053 - [*] Year 1 Training start
24 | 2025-04-27 21:28:43,658 - node number torch.Size([14464, 12])
25 | 2025-04-27 21:28:44,816 - epoch:0, training loss:355.6856 validation loss:2.5475
26 | 2025-04-27 21:28:46,539 - epoch:1, training loss:7.3722 validation loss:1.4116
27 | 2025-04-27 21:28:48,307 - epoch:2, training loss:3.6216 validation loss:1.3033
28 | 2025-04-27 21:28:50,033 - epoch:3, training loss:2.8801 validation loss:1.1751
29 | 2025-04-27 21:28:51,837 - epoch:4, training loss:2.5470 validation loss:1.0964
30 | 2025-04-27 21:28:53,574 - epoch:5, training loss:2.3183 validation loss:1.1243
31 | 2025-04-27 21:28:55,292 - epoch:6, training loss:2.1758 validation loss:1.1107
32 | 2025-04-27 21:28:56,946 - epoch:7, training loss:2.0729 validation loss:1.0291
33 | 2025-04-27 21:28:58,632 - epoch:8, training loss:2.0323 validation loss:1.3247
34 | 2025-04-27 21:29:00,427 - epoch:9, training loss:2.0584 validation loss:1.0928
35 | 2025-04-27 21:29:02,072 - epoch:10, training loss:2.1172 validation loss:1.0292
36 | 2025-04-27 21:29:03,721 - epoch:11, training loss:1.9144 validation loss:1.2508
37 | 2025-04-27 21:29:05,593 - epoch:12, training loss:1.9077 validation loss:1.0596
38 | 2025-04-27 21:29:07,282 - epoch:13, training loss:1.8992 validation loss:1.0960
39 | 2025-04-27 21:29:08,025 - [*] loss:6.1480
40 | 2025-04-27 21:29:08,037 - [*] year 1, testing
41 | 2025-04-27 21:29:08,094 - T:3 MAE 2.0285 RMSE 2.1961 MAPE 5.5796
42 | 2025-04-27 21:29:08,190 - T:6 MAE 2.0788 RMSE 2.2852 MAPE 5.7483
43 | 2025-04-27 21:29:08,515 - T:12 MAE 2.1866 RMSE 2.4943 MAPE 6.0965
44 | 2025-04-27 21:29:08,515 - T:Avg MAE 2.0875 RMSE 2.3064 MAPE 5.7790
45 | 2025-04-27 21:29:08,515 - Finished optimization, total time:14.10 s, best model:log/ENERGY-Wind/retrain_st-100/1/1.0291.pkl
46 | 2025-04-27 21:29:08,524 - [*] Year 2 load from data/ENERGY-Wind/FastData/2.npz
47 | 2025-04-27 21:29:08,722 - [*] Year 2 Dataset load!
48 | 2025-04-27 21:29:08,723 - [*] Year 2 Training start
49 | 2025-04-27 21:29:09,319 - node number torch.Size([15616, 12])
50 | 2025-04-27 21:29:10,445 - epoch:0, training loss:140.8938 validation loss:8.4717
51 | 2025-04-27 21:29:12,170 - epoch:1, training loss:3.8823 validation loss:8.1794
52 | 2025-04-27 21:29:13,816 - epoch:2, training loss:2.3954 validation loss:7.9038
53 | 2025-04-27 21:29:15,548 - epoch:3, training loss:2.2051 validation loss:8.0695
54 | 2025-04-27 21:29:17,176 - epoch:4, training loss:2.1054 validation loss:8.1912
55 | 2025-04-27 21:29:18,876 - epoch:5, training loss:2.0754 validation loss:8.1588
56 | 2025-04-27 21:29:20,582 - epoch:6, training loss:2.0188 validation loss:8.2261
57 | 2025-04-27 21:29:22,458 - epoch:7, training loss:1.9779 validation loss:8.3471
58 | 2025-04-27 21:29:24,114 - epoch:8, training loss:1.9675 validation loss:8.2937
59 | 2025-04-27 21:29:24,858 - [*] loss:62.3939
60 | 2025-04-27 21:29:24,871 - [*] year 2, testing
61 | 2025-04-27 21:29:24,936 - T:3 MAE 7.7504 RMSE 7.8261 MAPE 34.5207
62 | 2025-04-27 21:29:25,043 - T:6 MAE 7.7561 RMSE 7.8536 MAPE 34.6232
63 | 2025-04-27 21:29:25,422 - T:12 MAE 7.7502 RMSE 7.9088 MAPE 34.7084
64 | 2025-04-27 21:29:25,422 - T:Avg MAE 7.7538 RMSE 7.8599 MAPE 34.6126
65 | 2025-04-27 21:29:25,422 - Finished optimization, total time:8.88 s, best model:log/ENERGY-Wind/retrain_st-100/2/7.9038.pkl
66 | 2025-04-27 21:29:25,432 - [*] Year 3 load from data/ENERGY-Wind/FastData/3.npz
67 | 2025-04-27 21:29:25,653 - [*] Year 3 Dataset load!
68 | 2025-04-27 21:29:25,655 - [*] Year 3 Training start
69 | 2025-04-27 21:29:26,317 - node number torch.Size([17152, 12])
70 | 2025-04-27 21:29:27,464 - epoch:0, training loss:54.0943 validation loss:6.3762
71 | 2025-04-27 21:29:29,210 - epoch:1, training loss:3.4168 validation loss:6.6348
72 | 2025-04-27 21:29:30,954 - epoch:2, training loss:2.7114 validation loss:6.7959
73 | 2025-04-27 21:29:32,621 - epoch:3, training loss:2.3331 validation loss:6.3674
74 | 2025-04-27 21:29:34,366 - epoch:4, training loss:2.2396 validation loss:6.8152
75 | 2025-04-27 21:29:36,093 - epoch:5, training loss:2.2216 validation loss:6.6965
76 | 2025-04-27 21:29:37,787 - epoch:6, training loss:2.1084 validation loss:6.7878
77 | 2025-04-27 21:29:39,516 - epoch:7, training loss:2.0852 validation loss:6.5687
78 | 2025-04-27 21:29:41,231 - epoch:8, training loss:2.0293 validation loss:6.5747
79 | 2025-04-27 21:29:42,931 - epoch:9, training loss:2.0250 validation loss:6.4814
80 | 2025-04-27 21:29:43,693 - [*] loss:53.5346
81 | 2025-04-27 21:29:43,699 - [*] year 3, testing
82 | 2025-04-27 21:29:43,762 - T:3 MAE 6.9991 RMSE 7.2323 MAPE 156.1163
83 | 2025-04-27 21:29:43,872 - T:6 MAE 6.9682 RMSE 7.2722 MAPE 157.5299
84 | 2025-04-27 21:29:44,250 - T:12 MAE 6.9170 RMSE 7.3521 MAPE 161.1680
85 | 2025-04-27 21:29:44,251 - T:Avg MAE 6.9650 RMSE 7.2791 MAPE 157.9249
86 | 2025-04-27 21:29:44,251 - Finished optimization, total time:10.03 s, best model:log/ENERGY-Wind/retrain_st-100/3/6.3674.pkl
87 | 2025-04-27 21:29:44,253 -
88 |
89 |
90 | 2025-04-27 21:29:44,253 - 3 MAE 5.20 2.03 7.75 7.00 5.49
91 | 2025-04-27 21:29:44,253 - 3 RMSE 5.41 2.20 7.83 7.23 5.67
92 | 2025-04-27 21:29:44,253 - 3 MAPE 13.64 5.58 34.52 156.12 52.46
93 | 2025-04-27 21:29:44,253 - 6 MAE 3.96 2.08 7.76 6.97 5.19
94 | 2025-04-27 21:29:44,253 - 6 RMSE 4.44 2.29 7.85 7.27 5.46
95 | 2025-04-27 21:29:44,253 - 6 MAPE 10.45 5.75 34.62 157.53 52.09
96 | 2025-04-27 21:29:44,253 - 12 MAE 3.87 2.19 7.75 6.92 5.18
97 | 2025-04-27 21:29:44,253 - 12 RMSE 4.41 2.49 7.91 7.35 5.54
98 | 2025-04-27 21:29:44,253 - 12 MAPE 10.19 6.10 34.71 161.17 53.04
99 | 2025-04-27 21:29:44,253 - Avg MAE 4.38 2.09 7.75 6.97 5.30
100 | 2025-04-27 21:29:44,254 - Avg RMSE 4.78 2.31 7.86 7.28 5.56
101 | 2025-04-27 21:29:44,254 - Avg MAPE 11.54 5.78 34.61 157.92 52.46
102 | 2025-04-27 21:29:44,254 - year 0 total_time 6.4746 average_time 0.9250 epoch 7
103 | 2025-04-27 21:29:44,254 - year 1 total_time 14.1014 average_time 1.0073 epoch 14
104 | 2025-04-27 21:29:44,254 - year 2 total_time 8.8753 average_time 0.9862 epoch 9
105 | 2025-04-27 21:29:44,254 - year 3 total_time 10.0269 average_time 1.0027 epoch 10
106 | 2025-04-27 21:29:44,254 - total time: 39.4781
107 |
--------------------------------------------------------------------------------
/log/ENERGY-Wind/eac_st-24/eac_st.log:
--------------------------------------------------------------------------------
1 | 2025-04-27 18:00:29,031 - logger name:log/ENERGY-Wind/eac_st-24/eac_st.log
2 | 2025-04-27 18:00:29,032 - params : {'conf': 'conf/ENERGY-Wind/eac.json', 'seed': 24, 'paral': 0, 'backbone_type': 'stgnn', 'gpuid': 2, 'logname': 'eac_st', 'method': 'EAC', 'load_first_year': 0, 'first_year_model_path': '/home/haoyu.zhang/MoE_GNN/new/code/log/AIR/stlora_T-24/2016/21.3875.pkl', 'device': device(type='cuda', index=2), 'methods': {'PECPM': , 'ST-Adapter': , 'GraphPro': , 'RAGraph': , 'STGNN_Model': , 'ASTGNN_Model': , 'DCRNN_Model': , 'TGCN_Model': , 'STLoRA': , 'RAP': , 'TrafficStream': , 'STKEC': , 'EAC': }, 'begin_year': 0, 'end_year': 3, 'dropout': 0.0, 'lr': 0.03, 'batch_size': 128, 'epoch': 100, 'loss': 'mse', 'activation': 'relu', 'scheduler': 'epo', 'y_len': 12, 'x_len': 12, 'data_process': 0, 'raw_data_path': 'data/ENERGY-Wind/RawData/', 'save_data_path': 'data/ENERGY-Wind/FastData/', 'graph_path': 'data/ENERGY-Wind/graph/', 'model_path': 'log/ENERGY-Wind/', 'gcn': {'in_channel': 12, 'out_channel': 12, 'hidden_channel': 64}, 'tcn': {'in_channel': 1, 'out_channel': 1, 'kernel_size': 3, 'dilation': 1}, 'rank': 6, 'init': True, 'train': 1, 'auto_test': 0, 'strategy': 'retrain', 'detect': False, 'ewc': False, 'replay': False, 'path': 'log/ENERGY-Wind/eac_st-24', 'logger': }
3 | 2025-04-27 18:00:29,045 - [*] Year 0 load from data/ENERGY-Wind/FastData/0.npz
4 | 2025-04-27 18:00:29,531 - [*] Year 0 Dataset load!
5 | 2025-04-27 18:00:29,533 - RAP initialized with backbone stgnn
6 | 2025-04-27 18:00:29,534 - [*] Year 0 Training start
7 | 2025-04-27 18:00:29,946 - node number torch.Size([13184, 12])
8 | 2025-04-27 18:00:31,325 - epoch:0, training loss:184.6102 validation loss:2.3702
9 | 2025-04-27 18:00:32,845 - epoch:1, training loss:4.1069 validation loss:3.0105
10 | 2025-04-27 18:00:34,328 - epoch:2, training loss:2.3998 validation loss:3.1437
11 | 2025-04-27 18:00:35,785 - epoch:3, training loss:2.1881 validation loss:3.1606
12 | 2025-04-27 18:00:37,261 - epoch:4, training loss:2.1551 validation loss:3.2416
13 | 2025-04-27 18:00:38,729 - epoch:5, training loss:2.0965 validation loss:3.3198
14 | 2025-04-27 18:00:40,266 - epoch:6, training loss:2.0335 validation loss:3.2225
15 | 2025-04-27 18:00:40,920 - [*] loss:24.3819
16 | 2025-04-27 18:00:40,929 - [*] year 0, testing
17 | 2025-04-27 18:00:40,992 - T:3 MAE 4.5458 RMSE 4.8979 MAPE 11.8466
18 | 2025-04-27 18:00:41,114 - T:6 MAE 4.0301 RMSE 4.4814 MAPE 10.5427
19 | 2025-04-27 18:00:41,499 - T:12 MAE 4.4176 RMSE 4.9831 MAPE 11.5426
20 | 2025-04-27 18:00:41,499 - T:Avg MAE 4.3031 RMSE 4.7534 MAPE 11.2399
21 | 2025-04-27 18:00:41,500 - Finished optimization, total time:6.53 s, best model:log/ENERGY-Wind/eac_st-24/0/2.3702.pkl
22 | 2025-04-27 18:00:41,508 - [*] Year 1 load from data/ENERGY-Wind/FastData/1.npz
23 | 2025-04-27 18:00:41,691 - [*] Year 1 Dataset load!
24 | 2025-04-27 18:00:41,693 - [*] load from log/ENERGY-Wind/eac_st-24/0/2.3702.pkl
25 | 2025-04-27 18:00:41,698 - RAP initialized with backbone stgnn
26 | 2025-04-27 18:00:41,702 - [*] Year 1 Training start
27 | 2025-04-27 18:00:42,177 - node number torch.Size([14464, 12])
28 | 2025-04-27 18:00:43,303 - epoch:0, training loss:8.6945 validation loss:1.8806
29 | 2025-04-27 18:00:44,839 - epoch:1, training loss:5.9875 validation loss:1.8737
30 | 2025-04-27 18:00:46,393 - epoch:2, training loss:5.7959 validation loss:1.9022
31 | 2025-04-27 18:00:47,946 - epoch:3, training loss:5.6919 validation loss:1.9054
32 | 2025-04-27 18:00:49,645 - epoch:4, training loss:5.5564 validation loss:1.8606
33 | 2025-04-27 18:00:51,260 - epoch:5, training loss:5.4323 validation loss:1.7816
34 | 2025-04-27 18:00:52,809 - epoch:6, training loss:5.3950 validation loss:1.7583
35 | 2025-04-27 18:00:54,330 - epoch:7, training loss:5.3902 validation loss:1.7402
36 | 2025-04-27 18:00:55,937 - epoch:8, training loss:5.3810 validation loss:1.7875
37 | 2025-04-27 18:00:57,522 - epoch:9, training loss:5.2892 validation loss:1.8461
38 | 2025-04-27 18:00:59,010 - epoch:10, training loss:5.1077 validation loss:1.8472
39 | 2025-04-27 18:01:00,571 - epoch:11, training loss:5.4639 validation loss:1.7927
40 | 2025-04-27 18:01:02,147 - epoch:12, training loss:5.5078 validation loss:1.7599
41 | 2025-04-27 18:01:03,663 - epoch:13, training loss:5.1592 validation loss:1.9337
42 | 2025-04-27 18:01:04,322 - [*] loss:9.1935
43 | 2025-04-27 18:01:04,330 - [*] year 1, testing
44 | 2025-04-27 18:01:04,389 - T:3 MAE 1.9759 RMSE 2.3566 MAPE 5.6613
45 | 2025-04-27 18:01:04,477 - T:6 MAE 2.4801 RMSE 2.9473 MAPE 7.0500
46 | 2025-04-27 18:01:04,742 - T:12 MAE 2.4420 RMSE 3.0029 MAPE 6.9369
47 | 2025-04-27 18:01:04,742 - T:Avg MAE 2.3074 RMSE 2.7548 MAPE 6.5646
48 | 2025-04-27 18:01:04,745 - Finished optimization, total time:12.78 s, best model:log/ENERGY-Wind/eac_st-24/1/1.7402.pkl
49 | 2025-04-27 18:01:04,754 - [*] Year 2 load from data/ENERGY-Wind/FastData/2.npz
50 | 2025-04-27 18:01:04,948 - [*] Year 2 Dataset load!
51 | 2025-04-27 18:01:04,950 - [*] load from log/ENERGY-Wind/eac_st-24/1/1.7402.pkl
52 | 2025-04-27 18:01:04,956 - RAP initialized with backbone stgnn
53 | 2025-04-27 18:01:04,957 - [*] Year 2 Training start
54 | 2025-04-27 18:01:05,480 - node number torch.Size([15616, 12])
55 | 2025-04-27 18:01:06,571 - epoch:0, training loss:10.9981 validation loss:7.8587
56 | 2025-04-27 18:01:08,116 - epoch:1, training loss:4.9616 validation loss:8.2564
57 | 2025-04-27 18:01:09,681 - epoch:2, training loss:4.9070 validation loss:8.0345
58 | 2025-04-27 18:01:11,206 - epoch:3, training loss:4.8675 validation loss:8.3320
59 | 2025-04-27 18:01:12,807 - epoch:4, training loss:4.9497 validation loss:8.5113
60 | 2025-04-27 18:01:14,503 - epoch:5, training loss:4.8259 validation loss:8.1815
61 | 2025-04-27 18:01:16,117 - epoch:6, training loss:4.8514 validation loss:8.1082
62 | 2025-04-27 18:01:16,762 - [*] loss:64.1548
63 | 2025-04-27 18:01:16,770 - [*] year 2, testing
64 | 2025-04-27 18:01:16,832 - T:3 MAE 7.6606 RMSE 7.8329 MAPE 34.3631
65 | 2025-04-27 18:01:16,932 - T:6 MAE 8.0780 RMSE 8.2875 MAPE 36.2115
66 | 2025-04-27 18:01:17,234 - T:12 MAE 7.6828 RMSE 8.0096 MAPE 34.6065
67 | 2025-04-27 18:01:17,234 - T:Avg MAE 7.8153 RMSE 8.0434 MAPE 35.0790
68 | 2025-04-27 18:01:17,238 - Finished optimization, total time:6.55 s, best model:log/ENERGY-Wind/eac_st-24/2/7.8587.pkl
69 | 2025-04-27 18:01:17,248 - [*] Year 3 load from data/ENERGY-Wind/FastData/3.npz
70 | 2025-04-27 18:01:17,459 - [*] Year 3 Dataset load!
71 | 2025-04-27 18:01:17,461 - [*] load from log/ENERGY-Wind/eac_st-24/2/7.8587.pkl
72 | 2025-04-27 18:01:17,467 - RAP initialized with backbone stgnn
73 | 2025-04-27 18:01:17,468 - [*] Year 3 Training start
74 | 2025-04-27 18:01:18,038 - node number torch.Size([17152, 12])
75 | 2025-04-27 18:01:19,121 - epoch:0, training loss:22.2815 validation loss:5.9804
76 | 2025-04-27 18:01:20,662 - epoch:1, training loss:4.1247 validation loss:6.6516
77 | 2025-04-27 18:01:22,212 - epoch:2, training loss:3.8347 validation loss:6.5244
78 | 2025-04-27 18:01:23,757 - epoch:3, training loss:3.8194 validation loss:6.3916
79 | 2025-04-27 18:01:25,296 - epoch:4, training loss:3.7989 validation loss:6.7655
80 | 2025-04-27 18:01:26,851 - epoch:5, training loss:3.7967 validation loss:6.8366
81 | 2025-04-27 18:01:28,404 - epoch:6, training loss:3.7753 validation loss:6.5997
82 | 2025-04-27 18:01:29,067 - [*] loss:49.2990
83 | 2025-04-27 18:01:29,081 - [*] year 3, testing
84 | 2025-04-27 18:01:29,147 - T:3 MAE 6.5525 RMSE 6.9010 MAPE 149.4760
85 | 2025-04-27 18:01:29,258 - T:6 MAE 6.7271 RMSE 7.1512 MAPE 153.7438
86 | 2025-04-27 18:01:29,626 - T:12 MAE 6.4868 RMSE 7.0562 MAPE 153.7288
87 | 2025-04-27 18:01:29,626 - T:Avg MAE 6.5891 RMSE 7.0247 MAPE 151.6354
88 | 2025-04-27 18:01:29,626 - Finished optimization, total time:6.42 s, best model:log/ENERGY-Wind/eac_st-24/3/5.9804.pkl
89 | 2025-04-27 18:01:29,628 -
90 |
91 |
92 | 2025-04-27 18:01:29,628 - 3 MAE 4.55 1.98 7.66 6.55 5.18
93 | 2025-04-27 18:01:29,628 - 3 RMSE 4.90 2.36 7.83 6.90 5.50
94 | 2025-04-27 18:01:29,628 - 3 MAPE 11.85 5.66 34.36 149.48 50.34
95 | 2025-04-27 18:01:29,628 - 6 MAE 4.03 2.48 8.08 6.73 5.33
96 | 2025-04-27 18:01:29,628 - 6 RMSE 4.48 2.95 8.29 7.15 5.72
97 | 2025-04-27 18:01:29,628 - 6 MAPE 10.54 7.05 36.21 153.74 51.89
98 | 2025-04-27 18:01:29,628 - 12 MAE 4.42 2.44 7.68 6.49 5.26
99 | 2025-04-27 18:01:29,628 - 12 RMSE 4.98 3.00 8.01 7.06 5.76
100 | 2025-04-27 18:01:29,629 - 12 MAPE 11.54 6.94 34.61 153.73 51.70
101 | 2025-04-27 18:01:29,629 - Avg MAE 4.30 2.31 7.82 6.59 5.25
102 | 2025-04-27 18:01:29,629 - Avg RMSE 4.75 2.75 8.04 7.02 5.64
103 | 2025-04-27 18:01:29,629 - Avg MAPE 11.24 6.56 35.08 151.64 51.13
104 | 2025-04-27 18:01:29,629 - year 0 total_time 6.5312 average_time 0.9331 epoch 7
105 | 2025-04-27 18:01:29,629 - year 1 total_time 12.7849 average_time 0.9132 epoch 14
106 | 2025-04-27 18:01:29,629 - year 2 total_time 6.5509 average_time 0.9359 epoch 7
107 | 2025-04-27 18:01:29,629 - year 3 total_time 6.4198 average_time 0.9171 epoch 7
108 | 2025-04-27 18:01:29,629 - total time: 32.2868
109 |
--------------------------------------------------------------------------------
/src/trainer/stkec_trainer.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import numpy as np
4 | import os.path as osp
5 | import networkx as nx
6 | import torch.nn.functional as func
7 | from torch import optim
8 | from datetime import datetime
9 | from torch_geometric.utils import to_dense_batch
10 |
11 | from src.model.ewc import EWC
12 | from torch_geometric.loader import DataLoader
13 | from dataer.SpatioTemporalDataset import SpatioTemporalDataset
14 | from utils.metric import cal_metric, masked_mae_np
15 | from utils.common_tools import mkdirs, load_best_model, get_max_columns
16 |
17 |
18 | def train(inputs, args):
19 | path = osp.join(args.path, str(args.year)) # Define the current year model save path
20 | mkdirs(path)
21 |
22 | # Setting the loss function
23 | if args.loss == "mse":
24 | lossfunc = func.mse_loss
25 | elif args.loss == "huber":
26 | lossfunc = func.smooth_l1_loss
27 |
28 | cluster_lossfunc = nn.CrossEntropyLoss()
29 |
30 | # Dataset definition
31 | if args.strategy == 'incremental' and args.year > args.begin_year:
32 | # Incremental Policy Data Loader
33 | train_loader = DataLoader(SpatioTemporalDataset("", "", x=inputs["train_x"][:, :, args.subgraph.numpy()], y=inputs["train_y"][:, :, args.subgraph.numpy()], \
34 | edge_index="", mode="subgraph"), batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=32)
35 | val_loader = DataLoader(SpatioTemporalDataset("", "", x=inputs["val_x"][:, :, args.subgraph.numpy()], y=inputs["val_y"][:, :, args.subgraph.numpy()], \
36 | edge_index="", mode="subgraph"), batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=32)
37 | # Construct the adjacency matrix of the subgraph
38 | graph = nx.Graph()
39 | graph.add_nodes_from(range(args.subgraph.size(0)))
40 | graph.add_edges_from(args.subgraph_edge_index.numpy().T)
41 | adj = nx.to_numpy_array(graph) # Convert to adjacency matrix
42 | adj = adj / (np.sum(adj, 1, keepdims=True) + 1e-6) # Normalized adjacency matrix
43 | vars(args)["sub_adj"] = torch.from_numpy(adj).to(torch.float).to(args.device)
44 | else:
45 | # Common data loader
46 | train_loader = DataLoader(SpatioTemporalDataset(inputs, "train"), batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=32)
47 | val_loader = DataLoader(SpatioTemporalDataset(inputs, "val"), batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=32)
48 | vars(args)["sub_adj"] = vars(args)["adj"] # Use the adjacency matrix of the entire graph
49 |
50 | # Testing the Data Loader
51 | test_loader = DataLoader(SpatioTemporalDataset(inputs, "test"), batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=32)
52 |
53 | vars(args)["past_adj"]=args.sub_adj
54 |
55 | args.logger.info("[*] Year " + str(args.year) + " Dataset load!") # Record dataset loading log
56 |
57 | # Model definition
58 | if args.init == True and args.year > args.begin_year:
59 | gnn_model, _ = load_best_model(args) # If it is not the first year, load the optimal model
60 | if args.ewc: # If you use the ewc strategy, use the ewc model
61 | args.logger.info("[*] EWC! lambda {:.6f}".format(args.ewc_lambda)) # Record EWC related parameters
62 | model = EWC(gnn_model, args.adj, args.ewc_lambda, args.ewc_strategy) # Initialize the EWC model
63 | ewc_loader = DataLoader(SpatioTemporalDataset(inputs, "train"), batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=32)
64 | model.register_ewc_params_for_stkec(ewc_loader, lossfunc, args.device) # Register EWC parameters
65 | else:
66 | model = gnn_model # Otherwise, use the best model loaded
67 | else:
68 | gnn_model = args.methods[args.method](args).to(args.device) # If it is the first year, use the base model
69 | model = gnn_model
70 |
71 | # Model Optimizer
72 | optimizer = optim.AdamW(model.parameters(), lr=args.lr)
73 |
74 | args.logger.info("[*] Year " + str(args.year) + " Training start")
75 | lowest_validation_loss = 1e7
76 | counter = 0
77 | patience = 5
78 | model.train()
79 | use_time = []
80 |
81 | for epoch in range(args.epoch):
82 |
83 | start_time = datetime.now()
84 |
85 | # Training the model
86 | cn = 0
87 | training_loss = 0.0
88 | loss_cluster = 0.0
89 | for batch_idx, data in enumerate(train_loader):
90 | if epoch == 0 and batch_idx == 0:
91 | args.logger.info("node number {}".format(data.x.shape))
92 | data = data.to(args.device, non_blocking=True)
93 | optimizer.zero_grad()
94 |
95 | pred, attention = model(data, args.sub_adj)
96 |
97 | batch_att = pred.shape[0] // args.sub_adj.shape[0]
98 | if args.year == args.begin_year:
99 | attention_label = torch.from_numpy(args.attention.repeat(batch_att, axis=0)).to(args.device)
100 | loss_cluster = cluster_lossfunc(attention.data.cpu(), get_max_columns(attention_label).data.cpu().long())
101 |
102 |
103 | if args.strategy == "incremental" and args.year > args.begin_year:
104 | pred, _ = to_dense_batch(pred, batch=data.batch) # to_dense_batch is used to convert a batch of sparse adjacency matrices into a batch of dense adjacency matrices
105 | data.y, _ = to_dense_batch(data.y, batch=data.batch)
106 | pred = pred[:, args.mapping, :] # Slice according to the mapping to obtain the prediction and true value of the change node
107 | data.y = data.y[:, args.mapping, :]
108 |
109 | loss = lossfunc(data.y, pred, reduction="mean") + loss_cluster * 0.1 # Calculating Losses
110 |
111 | if args.ewc and args.year > args.begin_year:
112 | loss += model.compute_consolidation_loss() # Calculate and add ewc loss
113 |
114 | training_loss += float(loss)
115 | cn += 1
116 |
117 | loss.backward()
118 | optimizer.step()
119 |
120 | if epoch == 0:
121 | total_time = (datetime.now() - start_time).total_seconds()
122 | else:
123 | total_time += (datetime.now() - start_time).total_seconds()
124 | use_time.append((datetime.now() - start_time).total_seconds())
125 | training_loss = training_loss / cn
126 |
127 | # Validate the model
128 | validation_loss = 0.0
129 | cn = 0
130 | with torch.no_grad():
131 | for batch_idx, data in enumerate(val_loader):
132 | data = data.to(args.device, non_blocking=True)
133 | pred, attention = model(data, args.sub_adj)
134 | if args.strategy == "incremental" and args.year > args.begin_year:
135 | pred, _ = to_dense_batch(pred, batch=data.batch)
136 | data.y, _ = to_dense_batch(data.y, batch=data.batch)
137 | pred = pred[:, args.mapping, :]
138 | data.y = data.y[:, args.mapping, :]
139 |
140 | loss = masked_mae_np(data.y.cpu().data.numpy(), pred.cpu().data.numpy(), 0)
141 | validation_loss += float(loss)
142 | cn += 1
143 | validation_loss = float(validation_loss/cn)
144 |
145 |
146 | args.logger.info(f"epoch:{epoch}, training loss:{training_loss:.4f} validation loss:{validation_loss:.4f}")
147 |
148 | # Early Stopping Strategy
149 | if validation_loss <= lowest_validation_loss:
150 | counter = 0
151 | lowest_validation_loss = round(validation_loss, 4)
152 | torch.save({'model_state_dict': gnn_model.state_dict()}, osp.join(path, str(round(validation_loss,4))+".pkl"))
153 | else:
154 | counter += 1
155 | if counter > patience:
156 | break
157 |
158 | best_model_path = osp.join(path, str(lowest_validation_loss)+".pkl") # The model with the lowest validation loss is selected as the optimal model
159 | best_model = args.methods[args.method](args)
160 | best_model.load_state_dict(torch.load(best_model_path, args.device)["model_state_dict"])
161 | best_model = best_model.to(args.device)
162 |
163 | # Test the Model
164 | test_model(best_model, args, test_loader, True)
165 | args.result[args.year] = {"total_time": total_time, "average_time": sum(use_time)/len(use_time), "epoch_num": epoch+1}
166 | args.logger.info("Finished optimization, total time:{:.2f} s, best model:{}".format(total_time, best_model_path))
167 |
168 |
169 | def test_model(model, args, testset, pin_memory):
170 | model.eval()
171 | pred_ = []
172 | truth_ = []
173 | loss = 0.0
174 | with torch.no_grad():
175 | cn = 0
176 | for data in testset:
177 | data = data.to(args.device, non_blocking=pin_memory)
178 | pred, attention = model(data, args.adj)
179 | loss += func.mse_loss(data.y, pred, reduction="mean")
180 | pred, _ = to_dense_batch(pred, batch=data.batch)
181 | data.y, _ = to_dense_batch(data.y, batch=data.batch)
182 | pred_.append(pred.cpu().data.numpy())
183 | truth_.append(data.y.cpu().data.numpy())
184 | cn += 1
185 | loss = loss / cn
186 | args.logger.info("[*] loss:{:.4f}".format(loss))
187 | pred_ = np.concatenate(pred_, 0)
188 | truth_ = np.concatenate(truth_, 0)
189 | cal_metric(truth_, pred_, args)
--------------------------------------------------------------------------------
/log/ENERGY-Wind/stlora_st-622/stlora_st.log:
--------------------------------------------------------------------------------
1 | 2025-04-27 17:12:41,414 - logger name:log/ENERGY-Wind/stlora_st-622/stlora_st.log
2 | 2025-04-27 17:12:41,415 - params : {'conf': 'conf/ENERGY-Wind/stlora.json', 'seed': 622, 'paral': 0, 'backbone_type': 'stgnn', 'gpuid': 2, 'logname': 'stlora_st', 'method': 'STLoRA', 'load_first_year': 0, 'first_year_model_path': '/home/haoyu.zhang/MoE_GNN/new/code/log/AIR/stlora_T-24/2016/21.3875.pkl', 'device': device(type='cuda', index=2), 'methods': {'PECPM': , 'ST-Adapter': , 'GraphPro': , 'RAGraph': , 'STGNN_Model': , 'ASTGNN_Model': , 'DCRNN_Model': , 'TGCN_Model': , 'STLoRA': , 'RAP': , 'TrafficStream': , 'STKEC': , 'EAC': }, 'begin_year': 0, 'end_year': 3, 'dropout': 0.0, 'lr': 0.03, 'batch_size': 128, 'epoch': 100, 'loss': 'mse', 'activation': 'relu', 'scheduler': 'epo', 'y_len': 12, 'x_len': 12, 'data_process': 0, 'raw_data_path': 'data/ENERGY-Wind/RawData/', 'save_data_path': 'data/ENERGY-Wind/FastData/', 'graph_path': 'data/ENERGY-Wind/graph/', 'model_path': 'log/ENERGY-Wind/', 'gcn': {'in_channel': 12, 'out_channel': 12, 'hidden_channel': 64}, 'tcn': {'in_channel': 1, 'out_channel': 1, 'kernel_size': 3, 'dilation': 1}, 'init': True, 'train': 1, 'auto_test': 0, 'strategy': 'retrain', 'detect': False, 'ewc': False, 'replay': False, 'path': 'log/ENERGY-Wind/stlora_st-622', 'logger': }
3 | 2025-04-27 17:12:41,432 - [*] Year 0 load from data/ENERGY-Wind/FastData/0.npz
4 | 2025-04-27 17:12:41,925 - [*] Year 0 Dataset load!
5 | 2025-04-27 17:12:41,928 - RAP initialized with backbone stgnn
6 | 2025-04-27 17:12:41,929 - [*] Year 0 Training start
7 | 2025-04-27 17:12:42,315 - node number torch.Size([13184, 12])
8 | 2025-04-27 17:12:43,607 - epoch:0, training loss:159.0935 validation loss:4.0373
9 | 2025-04-27 17:12:45,098 - epoch:1, training loss:4.5645 validation loss:3.0058
10 | 2025-04-27 17:12:46,595 - epoch:2, training loss:2.8883 validation loss:3.1767
11 | 2025-04-27 17:12:48,057 - epoch:3, training loss:2.5437 validation loss:3.4735
12 | 2025-04-27 17:12:49,634 - epoch:4, training loss:2.3660 validation loss:3.3305
13 | 2025-04-27 17:12:51,144 - epoch:5, training loss:2.2076 validation loss:3.1783
14 | 2025-04-27 17:12:52,657 - epoch:6, training loss:2.1160 validation loss:3.3866
15 | 2025-04-27 17:12:54,185 - epoch:7, training loss:2.0848 validation loss:3.6188
16 | 2025-04-27 17:12:54,990 - [*] loss:33.6749
17 | 2025-04-27 17:12:54,998 - [*] year 0, testing
18 | 2025-04-27 17:12:55,061 - T:3 MAE 5.4772 RMSE 5.6311 MAPE 14.7622
19 | 2025-04-27 17:12:55,189 - T:6 MAE 5.5267 RMSE 5.7102 MAPE 14.8765
20 | 2025-04-27 17:12:55,593 - T:12 MAE 5.5298 RMSE 5.7927 MAPE 14.8589
21 | 2025-04-27 17:12:55,594 - T:Avg MAE 5.5166 RMSE 5.7104 MAPE 14.8505
22 | 2025-04-27 17:12:55,594 - Finished optimization, total time:7.41 s, best model:log/ENERGY-Wind/stlora_st-622/0/3.0058.pkl
23 | 2025-04-27 17:12:55,602 - [*] Year 1 load from data/ENERGY-Wind/FastData/1.npz
24 | 2025-04-27 17:12:55,771 - [*] Year 1 Dataset load!
25 | 2025-04-27 17:12:55,773 - [*] load from log/ENERGY-Wind/stlora_st-622/0/3.0058.pkl
26 | 2025-04-27 17:12:55,778 - RAP initialized with backbone stgnn
27 | 2025-04-27 17:12:55,779 - [*] Year 1 Training start
28 | 2025-04-27 17:12:56,343 - node number torch.Size([14464, 12])
29 | 2025-04-27 17:12:57,459 - epoch:0, training loss:21.9692 validation loss:1.9010
30 | 2025-04-27 17:12:59,119 - epoch:1, training loss:2.8808 validation loss:1.1262
31 | 2025-04-27 17:13:00,729 - epoch:2, training loss:2.3170 validation loss:1.1208
32 | 2025-04-27 17:13:02,368 - epoch:3, training loss:2.0935 validation loss:1.1340
33 | 2025-04-27 17:13:04,038 - epoch:4, training loss:2.0285 validation loss:0.9937
34 | 2025-04-27 17:13:05,663 - epoch:5, training loss:2.0013 validation loss:1.0164
35 | 2025-04-27 17:13:07,330 - epoch:6, training loss:1.9507 validation loss:1.1910
36 | 2025-04-27 17:13:09,061 - epoch:7, training loss:1.9189 validation loss:0.9609
37 | 2025-04-27 17:13:10,731 - epoch:8, training loss:1.9130 validation loss:1.1238
38 | 2025-04-27 17:13:12,365 - epoch:9, training loss:1.9158 validation loss:1.2690
39 | 2025-04-27 17:13:14,030 - epoch:10, training loss:1.8708 validation loss:1.1772
40 | 2025-04-27 17:13:15,672 - epoch:11, training loss:1.8595 validation loss:1.0127
41 | 2025-04-27 17:13:17,304 - epoch:12, training loss:1.8525 validation loss:1.0892
42 | 2025-04-27 17:13:18,948 - epoch:13, training loss:1.9453 validation loss:1.0746
43 | 2025-04-27 17:13:19,607 - [*] loss:5.9015
44 | 2025-04-27 17:13:19,619 - [*] year 1, testing
45 | 2025-04-27 17:13:19,695 - T:3 MAE 1.9775 RMSE 2.1173 MAPE 5.5211
46 | 2025-04-27 17:13:19,794 - T:6 MAE 2.0360 RMSE 2.2188 MAPE 5.6855
47 | 2025-04-27 17:13:20,086 - T:12 MAE 2.1488 RMSE 2.4347 MAPE 6.0400
48 | 2025-04-27 17:13:20,086 - T:Avg MAE 2.0437 RMSE 2.2406 MAPE 5.7149
49 | 2025-04-27 17:13:20,088 - Finished optimization, total time:13.50 s, best model:log/ENERGY-Wind/stlora_st-622/1/0.9609.pkl
50 | 2025-04-27 17:13:20,097 - [*] Year 2 load from data/ENERGY-Wind/FastData/2.npz
51 | 2025-04-27 17:13:20,278 - [*] Year 2 Dataset load!
52 | 2025-04-27 17:13:20,280 - [*] load from log/ENERGY-Wind/stlora_st-622/1/0.9609.pkl
53 | 2025-04-27 17:13:20,284 - RAP initialized with backbone stgnn
54 | 2025-04-27 17:13:20,286 - [*] Year 2 Training start
55 | 2025-04-27 17:13:20,876 - node number torch.Size([15616, 12])
56 | 2025-04-27 17:13:22,150 - epoch:0, training loss:12.0245 validation loss:8.1088
57 | 2025-04-27 17:13:23,829 - epoch:1, training loss:2.4584 validation loss:7.9579
58 | 2025-04-27 17:13:25,513 - epoch:2, training loss:2.0802 validation loss:7.7236
59 | 2025-04-27 17:13:27,170 - epoch:3, training loss:2.0267 validation loss:7.6915
60 | 2025-04-27 17:13:28,868 - epoch:4, training loss:2.0682 validation loss:8.5721
61 | 2025-04-27 17:13:30,511 - epoch:5, training loss:1.9799 validation loss:8.0759
62 | 2025-04-27 17:13:32,276 - epoch:6, training loss:1.9361 validation loss:8.3791
63 | 2025-04-27 17:13:33,897 - epoch:7, training loss:1.9065 validation loss:8.2439
64 | 2025-04-27 17:13:35,525 - epoch:8, training loss:1.9802 validation loss:8.3355
65 | 2025-04-27 17:13:37,190 - epoch:9, training loss:1.8963 validation loss:7.9385
66 | 2025-04-27 17:13:37,882 - [*] loss:59.3369
67 | 2025-04-27 17:13:37,891 - [*] year 2, testing
68 | 2025-04-27 17:13:37,956 - T:3 MAE 7.5540 RMSE 7.6566 MAPE 33.3095
69 | 2025-04-27 17:13:38,053 - T:6 MAE 7.5543 RMSE 7.6836 MAPE 33.3458
70 | 2025-04-27 17:13:38,377 - T:12 MAE 7.5483 RMSE 7.7355 MAPE 33.4738
71 | 2025-04-27 17:13:38,377 - T:Avg MAE 7.5565 RMSE 7.6921 MAPE 33.3786
72 | 2025-04-27 17:13:38,379 - Finished optimization, total time:9.83 s, best model:log/ENERGY-Wind/stlora_st-622/2/7.6915.pkl
73 | 2025-04-27 17:13:38,388 - [*] Year 3 load from data/ENERGY-Wind/FastData/3.npz
74 | 2025-04-27 17:13:38,583 - [*] Year 3 Dataset load!
75 | 2025-04-27 17:13:38,585 - [*] load from log/ENERGY-Wind/stlora_st-622/2/7.6915.pkl
76 | 2025-04-27 17:13:38,590 - RAP initialized with backbone stgnn
77 | 2025-04-27 17:13:38,591 - [*] Year 3 Training start
78 | 2025-04-27 17:13:39,235 - node number torch.Size([17152, 12])
79 | 2025-04-27 17:13:40,399 - epoch:0, training loss:19.7316 validation loss:6.1117
80 | 2025-04-27 17:13:42,485 - epoch:1, training loss:2.6828 validation loss:6.6503
81 | 2025-04-27 17:13:44,526 - epoch:2, training loss:2.3135 validation loss:6.9487
82 | 2025-04-27 17:13:46,448 - epoch:3, training loss:2.1560 validation loss:6.7094
83 | 2025-04-27 17:13:48,294 - epoch:4, training loss:2.0987 validation loss:6.6572
84 | 2025-04-27 17:13:50,271 - epoch:5, training loss:2.0827 validation loss:6.8779
85 | 2025-04-27 17:13:52,146 - epoch:6, training loss:2.0461 validation loss:7.0988
86 | 2025-04-27 17:13:52,925 - [*] loss:49.7021
87 | 2025-04-27 17:13:52,933 - [*] year 3, testing
88 | 2025-04-27 17:13:53,035 - T:3 MAE 6.7252 RMSE 6.9290 MAPE 164.9929
89 | 2025-04-27 17:13:53,207 - T:6 MAE 6.6856 RMSE 6.9646 MAPE 166.2911
90 | 2025-04-27 17:13:53,596 - T:12 MAE 6.6480 RMSE 7.0589 MAPE 171.9975
91 | 2025-04-27 17:13:53,597 - T:Avg MAE 6.6918 RMSE 6.9759 MAPE 167.5772
92 | 2025-04-27 17:13:53,602 - Finished optimization, total time:7.69 s, best model:log/ENERGY-Wind/stlora_st-622/3/6.1117.pkl
93 | 2025-04-27 17:13:53,605 -
94 |
95 |
96 | 2025-04-27 17:13:53,605 - 3 MAE 5.48 1.98 7.55 6.73 5.43
97 | 2025-04-27 17:13:53,605 - 3 RMSE 5.63 2.12 7.66 6.93 5.58
98 | 2025-04-27 17:13:53,605 - 3 MAPE 14.76 5.52 33.31 164.99 54.65
99 | 2025-04-27 17:13:53,605 - 6 MAE 5.53 2.04 7.55 6.69 5.45
100 | 2025-04-27 17:13:53,605 - 6 RMSE 5.71 2.22 7.68 6.96 5.64
101 | 2025-04-27 17:13:53,605 - 6 MAPE 14.88 5.69 33.35 166.29 55.05
102 | 2025-04-27 17:13:53,606 - 12 MAE 5.53 2.15 7.55 6.65 5.47
103 | 2025-04-27 17:13:53,606 - 12 RMSE 5.79 2.43 7.74 7.06 5.76
104 | 2025-04-27 17:13:53,606 - 12 MAPE 14.86 6.04 33.47 172.00 56.59
105 | 2025-04-27 17:13:53,606 - Avg MAE 5.52 2.04 7.56 6.69 5.45
106 | 2025-04-27 17:13:53,606 - Avg RMSE 5.71 2.24 7.69 6.98 5.65
107 | 2025-04-27 17:13:53,606 - Avg MAPE 14.85 5.71 33.38 167.58 55.38
108 | 2025-04-27 17:13:53,606 - year 0 total_time 7.4118 average_time 0.9265 epoch 8
109 | 2025-04-27 17:13:53,606 - year 1 total_time 13.5038 average_time 0.9646 epoch 14
110 | 2025-04-27 17:13:53,606 - year 2 total_time 9.8269 average_time 0.9827 epoch 10
111 | 2025-04-27 17:13:53,606 - year 3 total_time 7.6886 average_time 1.0984 epoch 7
112 | 2025-04-27 17:13:53,606 - total time: 38.4311
113 |
--------------------------------------------------------------------------------
/log/ENERGY-Wind/pecpm_st-622/pecpm_st.log:
--------------------------------------------------------------------------------
1 | 2025-04-27 20:33:50,533 - logger name:log/ENERGY-Wind/pecpm_st-622/pecpm_st.log
2 | 2025-04-27 20:33:50,533 - params : {'conf': 'conf/ENERGY-Wind/pecpm.json', 'seed': 622, 'paral': 0, 'backbone_type': 'stgnn', 'gpuid': 2, 'logname': 'pecpm_st', 'method': 'PECPM', 'load_first_year': 0, 'first_year_model_path': '/home/haoyu.zhang/MoE_GNN/new/code/log/AIR/stlora_T-24/2016/21.3875.pkl', 'device': device(type='cuda', index=2), 'methods': {'PECPM': , 'ST-Adapter': , 'GraphPro': , 'RAGraph': , 'STGNN_Model': , 'ASTGNN_Model': , 'DCRNN_Model': , 'TGCN_Model': , 'STLoRA': , 'RAP': , 'TrafficStream': , 'STKEC': , 'EAC': }, 'begin_year': 0, 'end_year': 3, 'year': 0, 'dropout': 0.0, 'lr': 0.03, 'batch_size': 128, 'epoch': 100, 'loss': 'mse', 'activation': 'relu', 'scheduler': 'epo', 'y_len': 12, 'x_len': 12, 'data_process': 0, 'raw_data_path': 'data/ENERGY-Wind/RawData/', 'save_data_path': 'data/ENERGY-Wind/FastData/', 'graph_path': 'data/ENERGY-Wind/graph/', 'model_path': 'log/ENERGY-Wind/', 'gcn': {'in_channel': 12, 'out_channel': 12, 'hidden_channel': 64}, 'tcn': {'in_channel': 1, 'out_channel': 1, 'kernel_size': 3, 'dilation': 1}, 'init': True, 'train': 1, 'auto_test': 0, 'strategy': 'retrain', 'detect': False, 'ewc': False, 'replay': False, 'attention_weight': {'0': 5, '1': 5, '2': 5, '3': 5}, 'last_clusterc': None, 'cluster_num': 3, 'pattern_matching': True, 'max_patterns': 1000, 'path': 'log/ENERGY-Wind/pecpm_st-622', 'logger': }
3 | 2025-04-27 20:33:50,545 - [*] Year 0 load from data/ENERGY-Wind/FastData/0.npz
4 | 2025-04-27 20:33:51,042 - [*] Year 0 Dataset load!
5 | 2025-04-27 20:33:51,044 - PECPM initialized with backbone stgnn
6 | 2025-04-27 20:33:51,045 - [*] Year 0 Training start
7 | 2025-04-27 20:33:51,359 - node number torch.Size([13184, 12])
8 | 2025-04-27 20:33:56,986 - epoch:0, training loss:158.2955 validation loss:3.6963
9 | 2025-04-27 20:34:02,144 - epoch:1, training loss:4.4835 validation loss:3.0789
10 | 2025-04-27 20:34:07,310 - epoch:2, training loss:2.8671 validation loss:3.1588
11 | 2025-04-27 20:34:12,509 - epoch:3, training loss:2.5241 validation loss:3.4815
12 | 2025-04-27 20:34:17,815 - epoch:4, training loss:2.3508 validation loss:3.3080
13 | 2025-04-27 20:34:23,031 - epoch:5, training loss:2.1893 validation loss:3.1589
14 | 2025-04-27 20:34:28,227 - epoch:6, training loss:2.1088 validation loss:3.4053
15 | 2025-04-27 20:34:33,507 - epoch:7, training loss:2.0899 validation loss:3.7009
16 | 2025-04-27 20:34:35,162 - [*] loss:34.5829
17 | 2025-04-27 20:34:35,171 - [*] year 0, testing
18 | 2025-04-27 20:34:35,226 - T:3 MAE 5.5977 RMSE 5.7440 MAPE 15.0544
19 | 2025-04-27 20:34:35,309 - T:6 MAE 5.6130 RMSE 5.7890 MAPE 15.0813
20 | 2025-04-27 20:34:35,711 - T:12 MAE 5.6173 RMSE 5.8707 MAPE 15.0682
21 | 2025-04-27 20:34:35,711 - T:Avg MAE 5.6096 RMSE 5.7954 MAPE 15.0729
22 | 2025-04-27 20:34:35,711 - Finished optimization, total time:30.16 s, best model:log/ENERGY-Wind/pecpm_st-622/0/3.0789.pkl
23 | 2025-04-27 20:34:35,719 - [*] Year 1 load from data/ENERGY-Wind/FastData/1.npz
24 | 2025-04-27 20:34:35,905 - [*] Year 1 Dataset load!
25 | 2025-04-27 20:34:35,906 - [*] load from log/ENERGY-Wind/pecpm_st-622/0/3.0789.pkl
26 | 2025-04-27 20:34:35,912 - PECPM initialized with backbone stgnn
27 | 2025-04-27 20:34:35,913 - [*] Year 1 Training start
28 | 2025-04-27 20:34:36,576 - node number torch.Size([14464, 12])
29 | 2025-04-27 20:34:42,557 - epoch:0, training loss:21.7020 validation loss:1.7199
30 | 2025-04-27 20:34:48,072 - epoch:1, training loss:2.9055 validation loss:1.1833
31 | 2025-04-27 20:34:53,637 - epoch:2, training loss:2.2751 validation loss:1.1086
32 | 2025-04-27 20:34:59,235 - epoch:3, training loss:2.0947 validation loss:1.1492
33 | 2025-04-27 20:35:04,669 - epoch:4, training loss:2.0475 validation loss:0.9925
34 | 2025-04-27 20:35:10,163 - epoch:5, training loss:2.0488 validation loss:1.0450
35 | 2025-04-27 20:35:15,710 - epoch:6, training loss:1.9869 validation loss:1.1371
36 | 2025-04-27 20:35:21,215 - epoch:7, training loss:1.9485 validation loss:0.9529
37 | 2025-04-27 20:35:26,995 - epoch:8, training loss:1.9013 validation loss:1.0311
38 | 2025-04-27 20:35:32,464 - epoch:9, training loss:1.9230 validation loss:1.3122
39 | 2025-04-27 20:35:38,117 - epoch:10, training loss:1.8795 validation loss:1.1964
40 | 2025-04-27 20:35:43,799 - epoch:11, training loss:1.8761 validation loss:0.9629
41 | 2025-04-27 20:35:49,499 - epoch:12, training loss:1.8486 validation loss:1.0189
42 | 2025-04-27 20:35:55,246 - epoch:13, training loss:1.9970 validation loss:1.2088
43 | 2025-04-27 20:35:56,921 - [*] loss:5.7508
44 | 2025-04-27 20:35:56,926 - [*] year 1, testing
45 | 2025-04-27 20:35:56,997 - T:3 MAE 1.9602 RMSE 2.1080 MAPE 5.5155
46 | 2025-04-27 20:35:57,095 - T:6 MAE 2.0086 RMSE 2.1955 MAPE 5.6502
47 | 2025-04-27 20:35:57,378 - T:12 MAE 2.1107 RMSE 2.3980 MAPE 5.9722
48 | 2025-04-27 20:35:57,378 - T:Avg MAE 2.0166 RMSE 2.2182 MAPE 5.6801
49 | 2025-04-27 20:35:57,383 - Finished optimization, total time:55.81 s, best model:log/ENERGY-Wind/pecpm_st-622/1/0.9529.pkl
50 | 2025-04-27 20:35:57,396 - [*] Year 2 load from data/ENERGY-Wind/FastData/2.npz
51 | 2025-04-27 20:35:57,597 - [*] Year 2 Dataset load!
52 | 2025-04-27 20:35:57,598 - [*] load from log/ENERGY-Wind/pecpm_st-622/1/0.9529.pkl
53 | 2025-04-27 20:35:57,605 - PECPM initialized with backbone stgnn
54 | 2025-04-27 20:35:57,606 - [*] Year 2 Training start
55 | 2025-04-27 20:35:58,305 - node number torch.Size([15616, 12])
56 | 2025-04-27 20:36:04,677 - epoch:0, training loss:11.5861 validation loss:8.0503
57 | 2025-04-27 20:36:10,446 - epoch:1, training loss:2.4837 validation loss:7.9047
58 | 2025-04-27 20:36:16,338 - epoch:2, training loss:2.0861 validation loss:7.7101
59 | 2025-04-27 20:36:22,404 - epoch:3, training loss:2.0430 validation loss:7.6727
60 | 2025-04-27 20:36:28,365 - epoch:4, training loss:2.0771 validation loss:8.4515
61 | 2025-04-27 20:36:34,418 - epoch:5, training loss:1.9648 validation loss:8.0583
62 | 2025-04-27 20:36:40,309 - epoch:6, training loss:1.9341 validation loss:8.3316
63 | 2025-04-27 20:36:46,103 - epoch:7, training loss:1.9075 validation loss:8.2400
64 | 2025-04-27 20:36:52,105 - epoch:8, training loss:1.9940 validation loss:8.3490
65 | 2025-04-27 20:36:57,884 - epoch:9, training loss:1.8895 validation loss:7.9429
66 | 2025-04-27 20:36:59,627 - [*] loss:58.9817
67 | 2025-04-27 20:36:59,630 - [*] year 2, testing
68 | 2025-04-27 20:36:59,696 - T:3 MAE 7.5186 RMSE 7.6255 MAPE 33.1333
69 | 2025-04-27 20:36:59,799 - T:6 MAE 7.5234 RMSE 7.6569 MAPE 33.1928
70 | 2025-04-27 20:37:00,112 - T:12 MAE 7.5214 RMSE 7.7132 MAPE 33.3415
71 | 2025-04-27 20:37:00,113 - T:Avg MAE 7.5247 RMSE 7.6647 MAPE 33.2211
72 | 2025-04-27 20:37:00,113 - Finished optimization, total time:42.71 s, best model:log/ENERGY-Wind/pecpm_st-622/2/7.6727.pkl
73 | 2025-04-27 20:37:00,123 - [*] Year 3 load from data/ENERGY-Wind/FastData/3.npz
74 | 2025-04-27 20:37:00,340 - [*] Year 3 Dataset load!
75 | 2025-04-27 20:37:00,341 - [*] load from log/ENERGY-Wind/pecpm_st-622/2/7.6727.pkl
76 | 2025-04-27 20:37:00,346 - PECPM initialized with backbone stgnn
77 | 2025-04-27 20:37:00,348 - [*] Year 3 Training start
78 | 2025-04-27 20:37:00,981 - node number torch.Size([17152, 12])
79 | 2025-04-27 20:37:07,803 - epoch:0, training loss:19.8027 validation loss:6.1207
80 | 2025-04-27 20:37:14,134 - epoch:1, training loss:2.7032 validation loss:6.6243
81 | 2025-04-27 20:37:20,408 - epoch:2, training loss:2.3421 validation loss:6.9325
82 | 2025-04-27 20:37:26,611 - epoch:3, training loss:2.1769 validation loss:6.6871
83 | 2025-04-27 20:37:32,948 - epoch:4, training loss:2.1071 validation loss:6.6705
84 | 2025-04-27 20:37:39,159 - epoch:5, training loss:2.0905 validation loss:6.7977
85 | 2025-04-27 20:37:45,448 - epoch:6, training loss:2.0473 validation loss:7.0273
86 | 2025-04-27 20:37:47,312 - [*] loss:49.9683
87 | 2025-04-27 20:37:47,316 - [*] year 3, testing
88 | 2025-04-27 20:37:47,390 - T:3 MAE 6.7429 RMSE 6.9439 MAPE 170.3491
89 | 2025-04-27 20:37:47,506 - T:6 MAE 6.7066 RMSE 6.9822 MAPE 171.4491
90 | 2025-04-27 20:37:47,858 - T:12 MAE 6.6675 RMSE 7.0717 MAPE 177.5190
91 | 2025-04-27 20:37:47,859 - T:Avg MAE 6.7121 RMSE 6.9925 MAPE 172.9588
92 | 2025-04-27 20:37:47,859 - Finished optimization, total time:32.17 s, best model:log/ENERGY-Wind/pecpm_st-622/3/6.1207.pkl
93 | 2025-04-27 20:37:47,860 -
94 |
95 |
96 | 2025-04-27 20:37:47,861 - 3 MAE 5.60 1.96 7.52 6.74 5.45
97 | 2025-04-27 20:37:47,861 - 3 RMSE 5.74 2.11 7.63 6.94 5.61
98 | 2025-04-27 20:37:47,861 - 3 MAPE 15.05 5.52 33.13 170.35 56.01
99 | 2025-04-27 20:37:47,861 - 6 MAE 5.61 2.01 7.52 6.71 5.46
100 | 2025-04-27 20:37:47,861 - 6 RMSE 5.79 2.20 7.66 6.98 5.66
101 | 2025-04-27 20:37:47,861 - 6 MAPE 15.08 5.65 33.19 171.45 56.34
102 | 2025-04-27 20:37:47,861 - 12 MAE 5.62 2.11 7.52 6.67 5.48
103 | 2025-04-27 20:37:47,861 - 12 RMSE 5.87 2.40 7.71 7.07 5.76
104 | 2025-04-27 20:37:47,861 - 12 MAPE 15.07 5.97 33.34 177.52 57.98
105 | 2025-04-27 20:37:47,861 - Avg MAE 5.61 2.02 7.52 6.71 5.47
106 | 2025-04-27 20:37:47,861 - Avg RMSE 5.80 2.22 7.66 6.99 5.67
107 | 2025-04-27 20:37:47,861 - Avg MAPE 15.07 5.68 33.22 172.96 56.73
108 | 2025-04-27 20:37:47,861 - year 0 total_time 30.1627 average_time 3.7704 epoch 8
109 | 2025-04-27 20:37:47,861 - year 1 total_time 55.8072 average_time 3.9863 epoch 14
110 | 2025-04-27 20:37:47,862 - year 2 total_time 42.7130 average_time 4.2713 epoch 10
111 | 2025-04-27 20:37:47,862 - year 3 total_time 32.1683 average_time 4.5955 epoch 7
112 | 2025-04-27 20:37:47,862 - total time: 160.8511
113 |
--------------------------------------------------------------------------------
/log/ENERGY-Wind/retrain_st-622/retrain_st.log:
--------------------------------------------------------------------------------
1 | 2025-04-27 21:26:52,707 - logger name:log/ENERGY-Wind/retrain_st-622/retrain_st.log
2 | 2025-04-27 21:26:52,708 - params : {'conf': 'conf/ENERGY-Wind/retrain.json', 'seed': 622, 'paral': 0, 'backbone_type': 'stgnn', 'gpuid': 2, 'logname': 'retrain_st', 'method': 'TrafficStream', 'load_first_year': 0, 'first_year_model_path': '/home/haoyu.zhang/MoE_GNN/new/code/log/AIR/stlora_T-24/2016/21.3875.pkl', 'device': device(type='cuda', index=2), 'methods': {'PECPM': , 'ST-Adapter': , 'GraphPro': , 'RAGraph': , 'STGNN_Model': , 'ASTGNN_Model': , 'DCRNN_Model': , 'TGCN_Model': , 'STLoRA': , 'RAP': , 'TrafficStream': , 'STKEC': , 'EAC': }, 'begin_year': 0, 'end_year': 3, 'dropout': 0.0, 'lr': 0.03, 'batch_size': 128, 'epoch': 100, 'loss': 'mse', 'activation': 'relu', 'scheduler': 'epo', 'y_len': 12, 'x_len': 12, 'data_process': 0, 'raw_data_path': 'data/ENERGY-Wind/RawData/', 'save_data_path': 'data/ENERGY-Wind/FastData/', 'graph_path': 'data/ENERGY-Wind/graph/', 'model_path': 'log/ENERGY-Wind/', 'gcn': {'in_channel': 12, 'out_channel': 12, 'hidden_channel': 64}, 'tcn': {'in_channel': 1, 'out_channel': 1, 'kernel_size': 3, 'dilation': 1}, 'init': False, 'train': 1, 'auto_test': 0, 'strategy': 'retrain', 'detect': False, 'ewc': False, 'replay': False, 'path': 'log/ENERGY-Wind/retrain_st-622', 'logger': }
3 | 2025-04-27 21:26:52,726 - [*] Year 0 load from data/ENERGY-Wind/FastData/0.npz
4 | 2025-04-27 21:26:53,212 - [*] Year 0 Dataset load!
5 | 2025-04-27 21:26:53,215 - [*] Year 0 Training start
6 | 2025-04-27 21:26:53,640 - node number torch.Size([13184, 12])
7 | 2025-04-27 21:26:54,888 - epoch:0, training loss:159.0935 validation loss:4.0373
8 | 2025-04-27 21:26:56,424 - epoch:1, training loss:4.5645 validation loss:3.0058
9 | 2025-04-27 21:26:57,968 - epoch:2, training loss:2.8883 validation loss:3.1767
10 | 2025-04-27 21:26:59,447 - epoch:3, training loss:2.5437 validation loss:3.4735
11 | 2025-04-27 21:27:00,936 - epoch:4, training loss:2.3660 validation loss:3.3305
12 | 2025-04-27 21:27:02,415 - epoch:5, training loss:2.2076 validation loss:3.1783
13 | 2025-04-27 21:27:03,891 - epoch:6, training loss:2.1160 validation loss:3.3866
14 | 2025-04-27 21:27:05,350 - epoch:7, training loss:2.0848 validation loss:3.6188
15 | 2025-04-27 21:27:06,141 - [*] loss:33.6749
16 | 2025-04-27 21:27:06,149 - [*] year 0, testing
17 | 2025-04-27 21:27:06,210 - T:3 MAE 5.4772 RMSE 5.6311 MAPE 14.7622
18 | 2025-04-27 21:27:06,343 - T:6 MAE 5.5267 RMSE 5.7102 MAPE 14.8765
19 | 2025-04-27 21:27:06,753 - T:12 MAE 5.5298 RMSE 5.7927 MAPE 14.8589
20 | 2025-04-27 21:27:06,753 - T:Avg MAE 5.5166 RMSE 5.7104 MAPE 14.8505
21 | 2025-04-27 21:27:06,754 - Finished optimization, total time:7.14 s, best model:log/ENERGY-Wind/retrain_st-622/0/3.0058.pkl
22 | 2025-04-27 21:27:06,763 - [*] Year 1 load from data/ENERGY-Wind/FastData/1.npz
23 | 2025-04-27 21:27:06,945 - [*] Year 1 Dataset load!
24 | 2025-04-27 21:27:06,947 - [*] Year 1 Training start
25 | 2025-04-27 21:27:07,520 - node number torch.Size([14464, 12])
26 | 2025-04-27 21:27:08,644 - epoch:0, training loss:264.2195 validation loss:2.4337
27 | 2025-04-27 21:27:10,258 - epoch:1, training loss:5.8446 validation loss:1.3602
28 | 2025-04-27 21:27:11,863 - epoch:2, training loss:3.2207 validation loss:1.2487
29 | 2025-04-27 21:27:13,540 - epoch:3, training loss:2.6536 validation loss:1.0787
30 | 2025-04-27 21:27:15,291 - epoch:4, training loss:2.3786 validation loss:1.1895
31 | 2025-04-27 21:27:17,028 - epoch:5, training loss:2.2449 validation loss:1.0264
32 | 2025-04-27 21:27:18,654 - epoch:6, training loss:2.1494 validation loss:1.1864
33 | 2025-04-27 21:27:20,345 - epoch:7, training loss:2.0866 validation loss:1.2229
34 | 2025-04-27 21:27:22,054 - epoch:8, training loss:2.1262 validation loss:1.2515
35 | 2025-04-27 21:27:23,716 - epoch:9, training loss:2.1110 validation loss:0.9774
36 | 2025-04-27 21:27:25,353 - epoch:10, training loss:2.0223 validation loss:1.1283
37 | 2025-04-27 21:27:26,963 - epoch:11, training loss:1.9737 validation loss:1.1317
38 | 2025-04-27 21:27:28,658 - epoch:12, training loss:1.9710 validation loss:1.0623
39 | 2025-04-27 21:27:30,249 - epoch:13, training loss:2.0686 validation loss:1.4668
40 | 2025-04-27 21:27:31,920 - epoch:14, training loss:1.9920 validation loss:1.2709
41 | 2025-04-27 21:27:33,601 - epoch:15, training loss:2.0015 validation loss:1.3161
42 | 2025-04-27 21:27:34,307 - [*] loss:5.7540
43 | 2025-04-27 21:27:34,319 - [*] year 1, testing
44 | 2025-04-27 21:27:34,372 - T:3 MAE 2.0109 RMSE 2.1545 MAPE 5.6417
45 | 2025-04-27 21:27:34,460 - T:6 MAE 2.0204 RMSE 2.2095 MAPE 5.6837
46 | 2025-04-27 21:27:34,739 - T:12 MAE 2.0964 RMSE 2.3918 MAPE 5.9449
47 | 2025-04-27 21:27:34,740 - T:Avg MAE 2.0343 RMSE 2.2373 MAPE 5.7306
48 | 2025-04-27 21:27:34,743 - Finished optimization, total time:15.47 s, best model:log/ENERGY-Wind/retrain_st-622/1/0.9774.pkl
49 | 2025-04-27 21:27:34,754 - [*] Year 2 load from data/ENERGY-Wind/FastData/2.npz
50 | 2025-04-27 21:27:34,981 - [*] Year 2 Dataset load!
51 | 2025-04-27 21:27:34,984 - [*] Year 2 Training start
52 | 2025-04-27 21:27:35,605 - node number torch.Size([15616, 12])
53 | 2025-04-27 21:27:36,730 - epoch:0, training loss:156.3090 validation loss:8.3101
54 | 2025-04-27 21:27:38,411 - epoch:1, training loss:5.4119 validation loss:7.9268
55 | 2025-04-27 21:27:40,039 - epoch:2, training loss:3.4005 validation loss:8.1441
56 | 2025-04-27 21:27:41,737 - epoch:3, training loss:2.6515 validation loss:8.4292
57 | 2025-04-27 21:27:43,359 - epoch:4, training loss:2.2922 validation loss:8.2424
58 | 2025-04-27 21:27:44,964 - epoch:5, training loss:2.2554 validation loss:8.2865
59 | 2025-04-27 21:27:46,658 - epoch:6, training loss:2.1388 validation loss:8.2085
60 | 2025-04-27 21:27:48,311 - epoch:7, training loss:2.0982 validation loss:8.2554
61 | 2025-04-27 21:27:49,046 - [*] loss:63.8329
62 | 2025-04-27 21:27:49,055 - [*] year 2, testing
63 | 2025-04-27 21:27:49,123 - T:3 MAE 7.7992 RMSE 7.9309 MAPE 35.0510
64 | 2025-04-27 21:27:49,221 - T:6 MAE 7.7712 RMSE 7.9442 MAPE 34.9469
65 | 2025-04-27 21:27:49,530 - T:12 MAE 7.7512 RMSE 8.0108 MAPE 35.0154
66 | 2025-04-27 21:27:49,530 - T:Avg MAE 7.7767 RMSE 7.9572 MAPE 35.0071
67 | 2025-04-27 21:27:49,534 - Finished optimization, total time:7.76 s, best model:log/ENERGY-Wind/retrain_st-622/2/7.9268.pkl
68 | 2025-04-27 21:27:49,544 - [*] Year 3 load from data/ENERGY-Wind/FastData/3.npz
69 | 2025-04-27 21:27:49,763 - [*] Year 3 Dataset load!
70 | 2025-04-27 21:27:49,764 - [*] Year 3 Training start
71 | 2025-04-27 21:27:50,410 - node number torch.Size([17152, 12])
72 | 2025-04-27 21:27:51,488 - epoch:0, training loss:59.4165 validation loss:6.6416
73 | 2025-04-27 21:27:53,195 - epoch:1, training loss:3.7983 validation loss:6.9837
74 | 2025-04-27 21:27:54,916 - epoch:2, training loss:2.5384 validation loss:6.7572
75 | 2025-04-27 21:27:56,568 - epoch:3, training loss:2.2695 validation loss:6.8698
76 | 2025-04-27 21:27:58,259 - epoch:4, training loss:2.2099 validation loss:6.8610
77 | 2025-04-27 21:27:59,859 - epoch:5, training loss:2.1842 validation loss:6.6634
78 | 2025-04-27 21:28:01,518 - epoch:6, training loss:2.1425 validation loss:6.4080
79 | 2025-04-27 21:28:03,238 - epoch:7, training loss:2.0693 validation loss:6.7183
80 | 2025-04-27 21:28:04,845 - epoch:8, training loss:2.0269 validation loss:6.3957
81 | 2025-04-27 21:28:06,480 - epoch:9, training loss:2.0133 validation loss:6.5134
82 | 2025-04-27 21:28:08,135 - epoch:10, training loss:1.9700 validation loss:6.8352
83 | 2025-04-27 21:28:09,826 - epoch:11, training loss:1.9714 validation loss:6.4163
84 | 2025-04-27 21:28:11,483 - epoch:12, training loss:1.9877 validation loss:6.9803
85 | 2025-04-27 21:28:13,139 - epoch:13, training loss:1.9819 validation loss:6.5956
86 | 2025-04-27 21:28:14,763 - epoch:14, training loss:1.9877 validation loss:6.6744
87 | 2025-04-27 21:28:15,476 - [*] loss:53.8206
88 | 2025-04-27 21:28:15,484 - [*] year 3, testing
89 | 2025-04-27 21:28:15,544 - T:3 MAE 6.9747 RMSE 7.1699 MAPE 153.2256
90 | 2025-04-27 21:28:15,652 - T:6 MAE 6.9820 RMSE 7.2363 MAPE 156.2359
91 | 2025-04-27 21:28:15,993 - T:12 MAE 6.9838 RMSE 7.3748 MAPE 161.1288
92 | 2025-04-27 21:28:15,994 - T:Avg MAE 6.9793 RMSE 7.2488 MAPE 156.3929
93 | 2025-04-27 21:28:15,997 - Finished optimization, total time:14.43 s, best model:log/ENERGY-Wind/retrain_st-622/3/6.3957.pkl
94 | 2025-04-27 21:28:15,999 -
95 |
96 |
97 | 2025-04-27 21:28:15,999 - 3 MAE 5.48 2.01 7.80 6.97 5.57
98 | 2025-04-27 21:28:15,999 - 3 RMSE 5.63 2.15 7.93 7.17 5.72
99 | 2025-04-27 21:28:15,999 - 3 MAPE 14.76 5.64 35.05 153.23 52.17
100 | 2025-04-27 21:28:15,999 - 6 MAE 5.53 2.02 7.77 6.98 5.58
101 | 2025-04-27 21:28:15,999 - 6 RMSE 5.71 2.21 7.94 7.24 5.78
102 | 2025-04-27 21:28:16,000 - 6 MAPE 14.88 5.68 34.95 156.24 52.94
103 | 2025-04-27 21:28:16,000 - 12 MAE 5.53 2.10 7.75 6.98 5.59
104 | 2025-04-27 21:28:16,000 - 12 RMSE 5.79 2.39 8.01 7.37 5.89
105 | 2025-04-27 21:28:16,000 - 12 MAPE 14.86 5.94 35.02 161.13 54.24
106 | 2025-04-27 21:28:16,000 - Avg MAE 5.52 2.03 7.78 6.98 5.58
107 | 2025-04-27 21:28:16,000 - Avg RMSE 5.71 2.24 7.96 7.25 5.79
108 | 2025-04-27 21:28:16,000 - Avg MAPE 14.85 5.73 35.01 156.39 53.00
109 | 2025-04-27 21:28:16,000 - year 0 total_time 7.1436 average_time 0.8930 epoch 8
110 | 2025-04-27 21:28:16,000 - year 1 total_time 15.4681 average_time 0.9668 epoch 16
111 | 2025-04-27 21:28:16,000 - year 2 total_time 7.7621 average_time 0.9703 epoch 8
112 | 2025-04-27 21:28:16,000 - year 3 total_time 14.4333 average_time 0.9622 epoch 15
113 | 2025-04-27 21:28:16,000 - total time: 44.8070
114 |
--------------------------------------------------------------------------------
/log/ENERGY-Wind/eac_st-100/eac_st.log:
--------------------------------------------------------------------------------
1 | 2025-04-27 18:03:28,171 - logger name:log/ENERGY-Wind/eac_st-100/eac_st.log
2 | 2025-04-27 18:03:28,171 - params : {'conf': 'conf/ENERGY-Wind/eac.json', 'seed': 100, 'paral': 0, 'backbone_type': 'stgnn', 'gpuid': 2, 'logname': 'eac_st', 'method': 'EAC', 'load_first_year': 0, 'first_year_model_path': '/home/haoyu.zhang/MoE_GNN/new/code/log/AIR/stlora_T-24/2016/21.3875.pkl', 'device': device(type='cuda', index=2), 'methods': {'PECPM': , 'ST-Adapter': , 'GraphPro': , 'RAGraph': , 'STGNN_Model': , 'ASTGNN_Model': , 'DCRNN_Model': , 'TGCN_Model': , 'STLoRA': , 'RAP': , 'TrafficStream': , 'STKEC': , 'EAC': }, 'begin_year': 0, 'end_year': 3, 'dropout': 0.0, 'lr': 0.03, 'batch_size': 128, 'epoch': 100, 'loss': 'mse', 'activation': 'relu', 'scheduler': 'epo', 'y_len': 12, 'x_len': 12, 'data_process': 0, 'raw_data_path': 'data/ENERGY-Wind/RawData/', 'save_data_path': 'data/ENERGY-Wind/FastData/', 'graph_path': 'data/ENERGY-Wind/graph/', 'model_path': 'log/ENERGY-Wind/', 'gcn': {'in_channel': 12, 'out_channel': 12, 'hidden_channel': 64}, 'tcn': {'in_channel': 1, 'out_channel': 1, 'kernel_size': 3, 'dilation': 1}, 'rank': 6, 'init': True, 'train': 1, 'auto_test': 0, 'strategy': 'retrain', 'detect': False, 'ewc': False, 'replay': False, 'path': 'log/ENERGY-Wind/eac_st-100', 'logger': }
3 | 2025-04-27 18:03:28,184 - [*] Year 0 load from data/ENERGY-Wind/FastData/0.npz
4 | 2025-04-27 18:03:28,653 - [*] Year 0 Dataset load!
5 | 2025-04-27 18:03:28,655 - RAP initialized with backbone stgnn
6 | 2025-04-27 18:03:28,656 - [*] Year 0 Training start
7 | 2025-04-27 18:03:28,969 - node number torch.Size([13184, 12])
8 | 2025-04-27 18:03:30,315 - epoch:0, training loss:222.8429 validation loss:2.4422
9 | 2025-04-27 18:03:31,801 - epoch:1, training loss:4.9374 validation loss:3.1767
10 | 2025-04-27 18:03:33,279 - epoch:2, training loss:2.6725 validation loss:2.9531
11 | 2025-04-27 18:03:34,756 - epoch:3, training loss:2.2999 validation loss:3.1356
12 | 2025-04-27 18:03:36,242 - epoch:4, training loss:2.1922 validation loss:3.2285
13 | 2025-04-27 18:03:37,694 - epoch:5, training loss:2.1336 validation loss:3.3310
14 | 2025-04-27 18:03:39,243 - epoch:6, training loss:2.0907 validation loss:3.0541
15 | 2025-04-27 18:03:39,935 - [*] loss:22.0784
16 | 2025-04-27 18:03:39,944 - [*] year 0, testing
17 | 2025-04-27 18:03:40,007 - T:3 MAE 5.4118 RMSE 5.6803 MAPE 14.5196
18 | 2025-04-27 18:03:40,136 - T:6 MAE 4.1883 RMSE 4.7554 MAPE 11.3423
19 | 2025-04-27 18:03:40,538 - T:12 MAE 4.0688 RMSE 4.7456 MAPE 10.9827
20 | 2025-04-27 18:03:40,538 - T:Avg MAE 4.5784 RMSE 5.0675 MAPE 12.3282
21 | 2025-04-27 18:03:40,539 - Finished optimization, total time:6.50 s, best model:log/ENERGY-Wind/eac_st-100/0/2.4422.pkl
22 | 2025-04-27 18:03:40,547 - [*] Year 1 load from data/ENERGY-Wind/FastData/1.npz
23 | 2025-04-27 18:03:40,733 - [*] Year 1 Dataset load!
24 | 2025-04-27 18:03:40,734 - [*] load from log/ENERGY-Wind/eac_st-100/0/2.4422.pkl
25 | 2025-04-27 18:03:40,740 - RAP initialized with backbone stgnn
26 | 2025-04-27 18:03:40,743 - [*] Year 1 Training start
27 | 2025-04-27 18:03:41,271 - node number torch.Size([14464, 12])
28 | 2025-04-27 18:03:42,359 - epoch:0, training loss:10.3076 validation loss:1.9602
29 | 2025-04-27 18:03:43,930 - epoch:1, training loss:5.6791 validation loss:1.7603
30 | 2025-04-27 18:03:45,488 - epoch:2, training loss:4.5384 validation loss:1.5238
31 | 2025-04-27 18:03:47,049 - epoch:3, training loss:4.1658 validation loss:1.8741
32 | 2025-04-27 18:03:48,562 - epoch:4, training loss:3.8223 validation loss:2.1363
33 | 2025-04-27 18:03:50,090 - epoch:5, training loss:4.4880 validation loss:1.4417
34 | 2025-04-27 18:03:51,644 - epoch:6, training loss:3.6211 validation loss:1.4156
35 | 2025-04-27 18:03:53,159 - epoch:7, training loss:3.6635 validation loss:1.3719
36 | 2025-04-27 18:03:54,682 - epoch:8, training loss:3.7230 validation loss:1.3718
37 | 2025-04-27 18:03:56,267 - epoch:9, training loss:3.5648 validation loss:1.6432
38 | 2025-04-27 18:03:57,782 - epoch:10, training loss:3.6830 validation loss:1.5055
39 | 2025-04-27 18:03:59,334 - epoch:11, training loss:3.5953 validation loss:1.5039
40 | 2025-04-27 18:04:00,955 - epoch:12, training loss:3.5799 validation loss:1.4528
41 | 2025-04-27 18:04:02,557 - epoch:13, training loss:3.6055 validation loss:1.6703
42 | 2025-04-27 18:04:04,138 - epoch:14, training loss:3.8412 validation loss:1.3677
43 | 2025-04-27 18:04:05,724 - epoch:15, training loss:3.5754 validation loss:1.3711
44 | 2025-04-27 18:04:07,244 - epoch:16, training loss:3.6762 validation loss:1.3983
45 | 2025-04-27 18:04:08,759 - epoch:17, training loss:3.8060 validation loss:1.3771
46 | 2025-04-27 18:04:10,342 - epoch:18, training loss:3.5273 validation loss:1.3836
47 | 2025-04-27 18:04:11,920 - epoch:19, training loss:3.6508 validation loss:1.3737
48 | 2025-04-27 18:04:13,421 - epoch:20, training loss:3.6370 validation loss:1.5539
49 | 2025-04-27 18:04:14,044 - [*] loss:7.1330
50 | 2025-04-27 18:04:14,055 - [*] year 1, testing
51 | 2025-04-27 18:04:14,112 - T:3 MAE 1.4206 RMSE 1.7459 MAPE 4.1365
52 | 2025-04-27 18:04:14,202 - T:6 MAE 1.8062 RMSE 2.2065 MAPE 5.1933
53 | 2025-04-27 18:04:14,507 - T:12 MAE 2.1483 RMSE 2.6434 MAPE 6.1623
54 | 2025-04-27 18:04:14,507 - T:Avg MAE 1.7823 RMSE 2.1856 MAPE 5.1408
55 | 2025-04-27 18:04:14,507 - Finished optimization, total time:19.11 s, best model:log/ENERGY-Wind/eac_st-100/1/1.3677.pkl
56 | 2025-04-27 18:04:14,517 - [*] Year 2 load from data/ENERGY-Wind/FastData/2.npz
57 | 2025-04-27 18:04:14,708 - [*] Year 2 Dataset load!
58 | 2025-04-27 18:04:14,709 - [*] load from log/ENERGY-Wind/eac_st-100/1/1.3677.pkl
59 | 2025-04-27 18:04:14,715 - RAP initialized with backbone stgnn
60 | 2025-04-27 18:04:14,716 - [*] Year 2 Training start
61 | 2025-04-27 18:04:15,267 - node number torch.Size([15616, 12])
62 | 2025-04-27 18:04:16,332 - epoch:0, training loss:9.1297 validation loss:7.6689
63 | 2025-04-27 18:04:17,948 - epoch:1, training loss:3.9999 validation loss:8.2933
64 | 2025-04-27 18:04:19,512 - epoch:2, training loss:3.7824 validation loss:8.2893
65 | 2025-04-27 18:04:21,113 - epoch:3, training loss:3.6951 validation loss:7.7767
66 | 2025-04-27 18:04:22,688 - epoch:4, training loss:3.6603 validation loss:8.2091
67 | 2025-04-27 18:04:24,324 - epoch:5, training loss:3.6277 validation loss:7.5475
68 | 2025-04-27 18:04:26,099 - epoch:6, training loss:3.6174 validation loss:8.2100
69 | 2025-04-27 18:04:27,889 - epoch:7, training loss:3.6582 validation loss:8.3758
70 | 2025-04-27 18:04:29,707 - epoch:8, training loss:3.6670 validation loss:8.0609
71 | 2025-04-27 18:04:31,524 - epoch:9, training loss:3.6718 validation loss:8.2196
72 | 2025-04-27 18:04:33,311 - epoch:10, training loss:3.6269 validation loss:7.8336
73 | 2025-04-27 18:04:35,075 - epoch:11, training loss:3.5991 validation loss:7.7173
74 | 2025-04-27 18:04:35,918 - [*] loss:57.8817
75 | 2025-04-27 18:04:35,927 - [*] year 2, testing
76 | 2025-04-27 18:04:36,001 - T:3 MAE 6.9356 RMSE 7.0386 MAPE 31.3314
77 | 2025-04-27 18:04:36,127 - T:6 MAE 7.2359 RMSE 7.3869 MAPE 32.5949
78 | 2025-04-27 18:04:36,505 - T:12 MAE 7.3717 RMSE 7.6098 MAPE 33.3309
79 | 2025-04-27 18:04:36,506 - T:Avg MAE 7.1830 RMSE 7.3388 MAPE 32.4345
80 | 2025-04-27 18:04:36,506 - Finished optimization, total time:11.75 s, best model:log/ENERGY-Wind/eac_st-100/2/7.5475.pkl
81 | 2025-04-27 18:04:36,515 - [*] Year 3 load from data/ENERGY-Wind/FastData/3.npz
82 | 2025-04-27 18:04:36,713 - [*] Year 3 Dataset load!
83 | 2025-04-27 18:04:36,714 - [*] load from log/ENERGY-Wind/eac_st-100/2/7.5475.pkl
84 | 2025-04-27 18:04:36,719 - RAP initialized with backbone stgnn
85 | 2025-04-27 18:04:36,720 - [*] Year 3 Training start
86 | 2025-04-27 18:04:37,308 - node number torch.Size([17152, 12])
87 | 2025-04-27 18:04:38,423 - epoch:0, training loss:20.9249 validation loss:6.1370
88 | 2025-04-27 18:04:39,997 - epoch:1, training loss:4.1899 validation loss:6.7083
89 | 2025-04-27 18:04:41,607 - epoch:2, training loss:3.5395 validation loss:6.7294
90 | 2025-04-27 18:04:43,183 - epoch:3, training loss:3.4417 validation loss:6.5688
91 | 2025-04-27 18:04:44,800 - epoch:4, training loss:3.4299 validation loss:6.7110
92 | 2025-04-27 18:04:46,363 - epoch:5, training loss:3.3553 validation loss:6.7399
93 | 2025-04-27 18:04:47,973 - epoch:6, training loss:3.3200 validation loss:6.9329
94 | 2025-04-27 18:04:48,647 - [*] loss:51.5029
95 | 2025-04-27 18:04:48,661 - [*] year 3, testing
96 | 2025-04-27 18:04:48,730 - T:3 MAE 6.0504 RMSE 6.3779 MAPE 167.6940
97 | 2025-04-27 18:04:48,844 - T:6 MAE 6.5392 RMSE 6.9225 MAPE 178.0625
98 | 2025-04-27 18:04:49,229 - T:12 MAE 6.6973 RMSE 7.1966 MAPE 186.5757
99 | 2025-04-27 18:04:49,229 - T:Avg MAE 6.4181 RMSE 6.8141 MAPE 177.7098
100 | 2025-04-27 18:04:49,229 - Finished optimization, total time:6.54 s, best model:log/ENERGY-Wind/eac_st-100/3/6.137.pkl
101 | 2025-04-27 18:04:49,230 -
102 |
103 |
104 | 2025-04-27 18:04:49,230 - 3 MAE 5.41 1.42 6.94 6.05 4.95
105 | 2025-04-27 18:04:49,230 - 3 RMSE 5.68 1.75 7.04 6.38 5.21
106 | 2025-04-27 18:04:49,231 - 3 MAPE 14.52 4.14 31.33 167.69 54.42
107 | 2025-04-27 18:04:49,231 - 6 MAE 4.19 1.81 7.24 6.54 4.94
108 | 2025-04-27 18:04:49,231 - 6 RMSE 4.76 2.21 7.39 6.92 5.32
109 | 2025-04-27 18:04:49,231 - 6 MAPE 11.34 5.19 32.59 178.06 56.80
110 | 2025-04-27 18:04:49,231 - 12 MAE 4.07 2.15 7.37 6.70 5.07
111 | 2025-04-27 18:04:49,231 - 12 RMSE 4.75 2.64 7.61 7.20 5.55
112 | 2025-04-27 18:04:49,231 - 12 MAPE 10.98 6.16 33.33 186.58 59.26
113 | 2025-04-27 18:04:49,231 - Avg MAE 4.58 1.78 7.18 6.42 4.99
114 | 2025-04-27 18:04:49,231 - Avg RMSE 5.07 2.19 7.34 6.81 5.35
115 | 2025-04-27 18:04:49,231 - Avg MAPE 12.33 5.14 32.43 177.71 56.90
116 | 2025-04-27 18:04:49,231 - year 0 total_time 6.4960 average_time 0.9280 epoch 7
117 | 2025-04-27 18:04:49,231 - year 1 total_time 19.1077 average_time 0.9099 epoch 21
118 | 2025-04-27 18:04:49,231 - year 2 total_time 11.7549 average_time 0.9796 epoch 12
119 | 2025-04-27 18:04:49,231 - year 3 total_time 6.5383 average_time 0.9341 epoch 7
120 | 2025-04-27 18:04:49,231 - total time: 43.8969
121 |
--------------------------------------------------------------------------------
/src/trainer/default_trainer.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | import numpy as np
4 | import os.path as osp
5 | import networkx as nx
6 | import torch.nn.functional as func
7 | from torch import optim
8 | from datetime import datetime
9 | from torch_geometric.utils import to_dense_batch
10 |
11 | from src.model.ewc import EWC
12 | from torch_geometric.loader import DataLoader
13 | from dataer.SpatioTemporalDataset import SpatioTemporalDataset
14 | from utils.metric import cal_metric, masked_mae_np
15 | from utils.common_tools import mkdirs, load_best_model
16 |
17 |
18 | def train(inputs, args):
19 | path = osp.join(args.path, str(args.year)) # Define the current year model save path
20 | mkdirs(path)
21 |
22 | # Setting the loss function
23 | if args.loss == "mse":
24 | lossfunc = func.mse_loss
25 | elif args.loss == "huber":
26 | lossfunc = func.smooth_l1_loss
27 |
28 | # Dataset definition
29 | if args.strategy == 'incremental' and args.year > args.begin_year:
30 | # Incremental Policy Data Loader
31 | train_loader = DataLoader(SpatioTemporalDataset("", "", x=inputs["train_x"][:, :, args.subgraph.numpy()], y=inputs["train_y"][:, :, args.subgraph.numpy()], \
32 | edge_index="", mode="subgraph"), batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=32)
33 | val_loader = DataLoader(SpatioTemporalDataset("", "", x=inputs["val_x"][:, :, args.subgraph.numpy()], y=inputs["val_y"][:, :, args.subgraph.numpy()], \
34 | edge_index="", mode="subgraph"), batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=32)
35 | # Construct the adjacency matrix of the subgraph
36 | graph = nx.Graph()
37 | graph.add_nodes_from(range(args.subgraph.size(0)))
38 | graph.add_edges_from(args.subgraph_edge_index.numpy().T)
39 | adj = nx.to_numpy_array(graph) # Convert to adjacency matrix
40 | adj = adj / (np.sum(adj, 1, keepdims=True) + 1e-6) # Normalized adjacency matrix
41 | vars(args)["sub_adj"] = torch.from_numpy(adj).to(torch.float).to(args.device)
42 | else:
43 | # Common Data Loader
44 | train_loader = DataLoader(SpatioTemporalDataset(inputs, "train"), batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=32)
45 | val_loader = DataLoader(SpatioTemporalDataset(inputs, "val"), batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=32)
46 | vars(args)["sub_adj"] = vars(args)["adj"] # Use the adjacency matrix of the entire graph
47 |
48 | # Test Data Loader
49 | test_loader = DataLoader(SpatioTemporalDataset(inputs, "test"), batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=32)
50 |
51 | args.logger.info("[*] Year " + str(args.year) + " Dataset load!")
52 |
53 | # Model definition
54 | if args.init == True and args.year > args.begin_year:
55 | gnn_model, _ = load_best_model(args) # If it is not the first year, load the optimal model
56 | if args.ewc: # If you use the ewc strategy, use the ewc model
57 | args.logger.info("[*] EWC! lambda {:.6f}".format(args.ewc_lambda)) # Record EWC related parameters
58 | model = EWC(gnn_model, args.adj, args.ewc_lambda, args.ewc_strategy) # Initialize the EWC model
59 | ewc_loader = DataLoader(SpatioTemporalDataset(inputs, "train"), batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=32)
60 | model.register_ewc_params(ewc_loader, lossfunc, args.device) # Register EWC parameters
61 | else:
62 | model = gnn_model # Otherwise, use the best model loaded
63 |
64 | if args.method == 'EAC':
65 | for name, param in model.named_parameters():
66 | if "gcn1" in name or "tcn1" in name or "gcn2" in name or "fc" in name:
67 | param.requires_grad = False
68 |
69 | if args.method == 'EAC':
70 | model.expand_adaptive_params(args.graph_size)
71 |
72 | if args.method == 'Universal' and args.use_eac == True:
73 | for name, param in model.named_parameters():
74 | if "gcn1" in name or "tcn1" in name or "gcn2" in name or "fc" in name:
75 | param.requires_grad = False
76 |
77 | if args.method == 'Universal' and args.use_eac == True:
78 | model.expand_adaptive_params(args.graph_size)
79 |
80 | else:
81 | gnn_model = args.methods[args.method](args).to(args.device) # If it is the first year, use the base model
82 | model = gnn_model
83 | if args.method == 'EAC':
84 | model.expand_adaptive_params(args.graph_size)
85 |
86 | if args.method == 'Universal' and args.use_eac == True:
87 | model.expand_adaptive_params(args.graph_size)
88 |
89 | #if args.logname != 'trafficstream':
90 | # model.count_parameters()
91 | # for name, param in model.named_parameters():
92 | # print(f"Parameter: {name} | Requires Grad: {param.requires_grad}")
93 |
94 |
95 | # Model Optimizer
96 | # optimizer = optim.AdamW(model.parameters(), lr=args.lr)
97 | optimizer = optim.AdamW(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr)
98 |
99 |
100 | args.logger.info("[*] Year " + str(args.year) + " Training start")
101 | lowest_validation_loss = 1e7
102 | counter = 0
103 | patience = 5
104 | model.train()
105 | use_time = []
106 |
107 | for epoch in range(args.epoch):
108 |
109 | start_time = datetime.now()
110 |
111 | # Training the model
112 | cn = 0
113 | training_loss = 0.0
114 | for batch_idx, data in enumerate(train_loader):
115 | if epoch == 0 and batch_idx == 0:
116 | args.logger.info("node number {}".format(data.x.shape))
117 | data = data.to(args.device, non_blocking=True)
118 | optimizer.zero_grad()
119 | pred = model(data, args.sub_adj)
120 |
121 | if args.strategy == "incremental" and args.year > args.begin_year:
122 | pred, _ = to_dense_batch(pred, batch=data.batch) # to_dense_batch is used to convert a batch of sparse adjacency matrices into a batch of dense adjacency matrices
123 | data.y, _ = to_dense_batch(data.y, batch=data.batch)
124 | pred = pred[:, args.mapping, :] # Slice according to the mapping to obtain the prediction and true value of the change node
125 | data.y = data.y[:, args.mapping, :]
126 |
127 | loss = lossfunc(data.y, pred, reduction="mean")
128 |
129 | if args.ewc and args.year > args.begin_year:
130 | loss += model.compute_consolidation_loss() # Calculate and add ewc loss if necessary
131 |
132 | training_loss += float(loss)
133 | cn += 1
134 |
135 | loss.backward()
136 | optimizer.step()
137 |
138 |
139 | if epoch == 0:
140 | total_time = (datetime.now() - start_time).total_seconds()
141 | else:
142 | total_time += (datetime.now() - start_time).total_seconds()
143 | use_time.append((datetime.now() - start_time).total_seconds())
144 | training_loss = training_loss / cn
145 |
146 | # Validate the model
147 | validation_loss = 0.0
148 | cn = 0
149 | with torch.no_grad():
150 | for batch_idx, data in enumerate(val_loader):
151 | data = data.to(args.device, non_blocking=True)
152 | pred = model(data, args.sub_adj)
153 | if args.strategy == "incremental" and args.year > args.begin_year:
154 | pred, _ = to_dense_batch(pred, batch=data.batch)
155 | data.y, _ = to_dense_batch(data.y, batch=data.batch)
156 | pred = pred[:, args.mapping, :]
157 | data.y = data.y[:, args.mapping, :]
158 |
159 | loss = masked_mae_np(data.y.cpu().data.numpy(), pred.cpu().data.numpy(), 0)
160 | validation_loss += float(loss)
161 | cn += 1
162 | validation_loss = float(validation_loss/cn)
163 |
164 |
165 | args.logger.info(f"epoch:{epoch}, training loss:{training_loss:.4f} validation loss:{validation_loss:.4f}")
166 |
167 | # Early Stopping Strategy
168 | if validation_loss <= lowest_validation_loss:
169 | counter = 0
170 | lowest_validation_loss = round(validation_loss, 4)
171 | if args.ewc:
172 | torch.save({'model_state_dict': gnn_model.state_dict()}, osp.join(path, str(round(validation_loss,4))+".pkl"))
173 | else:
174 | torch.save({'model_state_dict': model.state_dict()}, osp.join(path, str(round(validation_loss,4))+".pkl"))
175 | else:
176 | counter += 1
177 | if counter > patience:
178 | break
179 |
180 | best_model_path = osp.join(path, str(lowest_validation_loss)+".pkl")
181 |
182 | if args.method == 'TrafficStream':
183 | best_model = args.methods[args.method](args)
184 |
185 | else:
186 | best_model = model
187 |
188 | best_model.load_state_dict(torch.load(best_model_path, args.device)["model_state_dict"])
189 | best_model = best_model.to(args.device)
190 |
191 | # Test the Model
192 | test_model(best_model, args, test_loader, True)
193 | args.result[args.year] = {"total_time": total_time, "average_time": sum(use_time)/len(use_time), "epoch_num": epoch+1}
194 | args.logger.info("Finished optimization, total time:{:.2f} s, best model:{}".format(total_time, best_model_path))
195 |
196 |
197 | def test_model(model, args, testset, pin_memory):
198 | model.eval()
199 | pred_ = []
200 | truth_ = []
201 | loss = 0.0
202 | with torch.no_grad():
203 | cn = 0
204 | for data in testset:
205 | data = data.to(args.device, non_blocking=pin_memory)
206 | pred = model(data, args.adj)
207 | loss += func.mse_loss(data.y, pred, reduction="mean")
208 | pred, _ = to_dense_batch(pred, batch=data.batch)
209 | data.y, _ = to_dense_batch(data.y, batch=data.batch)
210 | pred_.append(pred.cpu().data.numpy())
211 | truth_.append(data.y.cpu().data.numpy())
212 | cn += 1
213 | loss = loss / cn
214 | args.logger.info("[*] loss:{:.4f}".format(loss))
215 | pred_ = np.concatenate(pred_, 0)
216 | truth_ = np.concatenate(truth_, 0)
217 | cal_metric(truth_, pred_, args)
--------------------------------------------------------------------------------
/log/ENERGY-Wind/stadapter_st-622/stadapter_st.log:
--------------------------------------------------------------------------------
1 | 2025-04-27 19:15:59,696 - logger name:log/ENERGY-Wind/stadapter_st-622/stadapter_st.log
2 | 2025-04-27 19:15:59,696 - params : {'conf': 'conf/ENERGY-Wind/stadapter.json', 'seed': 622, 'paral': 0, 'backbone_type': 'stgnn', 'gpuid': 2, 'logname': 'stadapter_st', 'method': 'ST-Adapter', 'load_first_year': 0, 'first_year_model_path': '/home/haoyu.zhang/MoE_GNN/new/code/log/AIR/stlora_T-24/2016/21.3875.pkl', 'device': device(type='cuda', index=2), 'methods': {'PECPM': , 'ST-Adapter': , 'GraphPro': , 'RAGraph': , 'STGNN_Model': , 'ASTGNN_Model': , 'DCRNN_Model': , 'TGCN_Model': , 'STLoRA': , 'RAP': , 'TrafficStream': , 'STKEC': , 'EAC': }, 'begin_year': 0, 'end_year': 3, 'dropout': 0.0, 'lr': 0.03, 'batch_size': 128, 'epoch': 100, 'loss': 'mse', 'activation': 'relu', 'scheduler': 'epo', 'y_len': 12, 'x_len': 12, 'data_process': 0, 'raw_data_path': 'data/ENERGY-Wind/RawData/', 'save_data_path': 'data/ENERGY-Wind/FastData/', 'graph_path': 'data/ENERGY-Wind/graph/', 'model_path': 'log/ENERGY-Wind/', 'gcn': {'in_channel': 12, 'out_channel': 12, 'hidden_channel': 64}, 'tcn': {'in_channel': 1, 'out_channel': 1, 'kernel_size': 3, 'dilation': 1}, 'init': True, 'train': 1, 'auto_test': 0, 'strategy': 'retrain', 'detect': False, 'ewc': False, 'replay': False, 'path': 'log/ENERGY-Wind/stadapter_st-622', 'logger': }
3 | 2025-04-27 19:15:59,709 - [*] Year 0 load from data/ENERGY-Wind/FastData/0.npz
4 | 2025-04-27 19:16:00,174 - [*] Year 0 Dataset load!
5 | 2025-04-27 19:16:00,177 - ST-Adapter initialized with backbone stgnn
6 | 2025-04-27 19:16:00,177 - Adapter bottleneck ratio: 1/8
7 | 2025-04-27 19:16:00,178 - [*] Year 0 Training start
8 | 2025-04-27 19:16:00,613 - node number torch.Size([13184, 12])
9 | 2025-04-27 19:16:02,014 - epoch:0, training loss:163.5270 validation loss:2.9920
10 | 2025-04-27 19:16:03,705 - epoch:1, training loss:3.8866 validation loss:3.4032
11 | 2025-04-27 19:16:05,296 - epoch:2, training loss:2.6626 validation loss:3.8403
12 | 2025-04-27 19:16:06,898 - epoch:3, training loss:2.4891 validation loss:3.2830
13 | 2025-04-27 19:16:08,519 - epoch:4, training loss:2.2397 validation loss:3.2965
14 | 2025-04-27 19:16:10,084 - epoch:5, training loss:2.1429 validation loss:3.8897
15 | 2025-04-27 19:16:11,778 - epoch:6, training loss:2.1406 validation loss:3.0168
16 | 2025-04-27 19:16:12,451 - [*] loss:33.8234
17 | 2025-04-27 19:16:12,458 - [*] year 0, testing
18 | 2025-04-27 19:16:12,537 - T:3 MAE 6.0471 RMSE 6.3408 MAPE 15.7948
19 | 2025-04-27 19:16:12,675 - T:6 MAE 5.7889 RMSE 6.1513 MAPE 15.1150
20 | 2025-04-27 19:16:13,086 - T:12 MAE 5.3346 RMSE 5.8285 MAPE 13.9409
21 | 2025-04-27 19:16:13,086 - T:Avg MAE 5.7222 RMSE 6.0987 MAPE 14.9508
22 | 2025-04-27 19:16:13,087 - Finished optimization, total time:7.40 s, best model:log/ENERGY-Wind/stadapter_st-622/0/2.992.pkl
23 | 2025-04-27 19:16:13,096 - [*] Year 1 load from data/ENERGY-Wind/FastData/1.npz
24 | 2025-04-27 19:16:13,266 - [*] Year 1 Dataset load!
25 | 2025-04-27 19:16:13,267 - [*] load from log/ENERGY-Wind/stadapter_st-622/0/2.992.pkl
26 | 2025-04-27 19:16:13,277 - ST-Adapter initialized with backbone stgnn
27 | 2025-04-27 19:16:13,277 - Adapter bottleneck ratio: 1/8
28 | 2025-04-27 19:16:13,280 - [*] Year 1 Training start
29 | 2025-04-27 19:16:13,812 - node number torch.Size([14464, 12])
30 | 2025-04-27 19:16:15,062 - epoch:0, training loss:55.9271 validation loss:1.9040
31 | 2025-04-27 19:16:16,834 - epoch:1, training loss:5.1014 validation loss:1.5674
32 | 2025-04-27 19:16:18,771 - epoch:2, training loss:2.8625 validation loss:1.0391
33 | 2025-04-27 19:16:20,447 - epoch:3, training loss:2.1907 validation loss:1.0440
34 | 2025-04-27 19:16:22,170 - epoch:4, training loss:2.0671 validation loss:0.9755
35 | 2025-04-27 19:16:23,821 - epoch:5, training loss:1.9833 validation loss:0.9456
36 | 2025-04-27 19:16:25,617 - epoch:6, training loss:1.9514 validation loss:1.0803
37 | 2025-04-27 19:16:27,271 - epoch:7, training loss:1.9102 validation loss:1.0094
38 | 2025-04-27 19:16:28,974 - epoch:8, training loss:1.8839 validation loss:0.9542
39 | 2025-04-27 19:16:30,751 - epoch:9, training loss:1.8566 validation loss:0.9335
40 | 2025-04-27 19:16:32,529 - epoch:10, training loss:1.8558 validation loss:1.0239
41 | 2025-04-27 19:16:34,337 - epoch:11, training loss:1.8441 validation loss:0.9318
42 | 2025-04-27 19:16:35,893 - epoch:12, training loss:1.8214 validation loss:0.9907
43 | 2025-04-27 19:16:37,495 - epoch:13, training loss:1.8112 validation loss:0.9550
44 | 2025-04-27 19:16:39,182 - epoch:14, training loss:1.8264 validation loss:1.1724
45 | 2025-04-27 19:16:41,047 - epoch:15, training loss:1.8463 validation loss:0.8956
46 | 2025-04-27 19:16:42,768 - epoch:16, training loss:1.7783 validation loss:0.9518
47 | 2025-04-27 19:16:44,559 - epoch:17, training loss:1.7998 validation loss:0.9234
48 | 2025-04-27 19:16:46,273 - epoch:18, training loss:1.8157 validation loss:1.3280
49 | 2025-04-27 19:16:48,041 - epoch:19, training loss:1.7744 validation loss:0.9430
50 | 2025-04-27 19:16:49,670 - epoch:20, training loss:1.7609 validation loss:0.9130
51 | 2025-04-27 19:16:51,442 - epoch:21, training loss:1.7701 validation loss:0.9548
52 | 2025-04-27 19:16:52,142 - [*] loss:5.2104
53 | 2025-04-27 19:16:52,150 - [*] year 1, testing
54 | 2025-04-27 19:16:52,210 - T:3 MAE 1.9411 RMSE 2.0794 MAPE 5.4447
55 | 2025-04-27 19:16:52,300 - T:6 MAE 1.9555 RMSE 2.1326 MAPE 5.4912
56 | 2025-04-27 19:16:52,598 - T:12 MAE 2.0053 RMSE 2.2872 MAPE 5.6722
57 | 2025-04-27 19:16:52,598 - T:Avg MAE 1.9613 RMSE 2.1543 MAPE 5.5141
58 | 2025-04-27 19:16:52,598 - Finished optimization, total time:23.26 s, best model:log/ENERGY-Wind/stadapter_st-622/1/0.8956.pkl
59 | 2025-04-27 19:16:52,607 - [*] Year 2 load from data/ENERGY-Wind/FastData/2.npz
60 | 2025-04-27 19:16:52,784 - [*] Year 2 Dataset load!
61 | 2025-04-27 19:16:52,785 - [*] load from log/ENERGY-Wind/stadapter_st-622/1/0.8956.pkl
62 | 2025-04-27 19:16:52,794 - ST-Adapter initialized with backbone stgnn
63 | 2025-04-27 19:16:52,794 - Adapter bottleneck ratio: 1/8
64 | 2025-04-27 19:16:52,797 - [*] Year 2 Training start
65 | 2025-04-27 19:16:53,479 - node number torch.Size([15616, 12])
66 | 2025-04-27 19:16:54,698 - epoch:0, training loss:13.6859 validation loss:7.3132
67 | 2025-04-27 19:16:56,457 - epoch:1, training loss:2.0609 validation loss:7.7740
68 | 2025-04-27 19:16:58,161 - epoch:2, training loss:1.9265 validation loss:8.1685
69 | 2025-04-27 19:16:59,885 - epoch:3, training loss:1.8639 validation loss:7.8645
70 | 2025-04-27 19:17:01,617 - epoch:4, training loss:1.8463 validation loss:8.1964
71 | 2025-04-27 19:17:03,358 - epoch:5, training loss:1.8230 validation loss:8.0463
72 | 2025-04-27 19:17:05,130 - epoch:6, training loss:1.7987 validation loss:8.0527
73 | 2025-04-27 19:17:05,803 - [*] loss:54.0498
74 | 2025-04-27 19:17:05,816 - [*] year 2, testing
75 | 2025-04-27 19:17:05,898 - T:3 MAE 7.2178 RMSE 7.3188 MAPE 31.9493
76 | 2025-04-27 19:17:06,007 - T:6 MAE 7.2082 RMSE 7.3345 MAPE 31.9402
77 | 2025-04-27 19:17:06,399 - T:12 MAE 7.2003 RMSE 7.3875 MAPE 32.0206
78 | 2025-04-27 19:17:06,400 - T:Avg MAE 7.2089 RMSE 7.3423 MAPE 31.9580
79 | 2025-04-27 19:17:06,400 - Finished optimization, total time:7.60 s, best model:log/ENERGY-Wind/stadapter_st-622/2/7.3132.pkl
80 | 2025-04-27 19:17:06,410 - [*] Year 3 load from data/ENERGY-Wind/FastData/3.npz
81 | 2025-04-27 19:17:06,608 - [*] Year 3 Dataset load!
82 | 2025-04-27 19:17:06,610 - [*] load from log/ENERGY-Wind/stadapter_st-622/2/7.3132.pkl
83 | 2025-04-27 19:17:06,619 - ST-Adapter initialized with backbone stgnn
84 | 2025-04-27 19:17:06,620 - Adapter bottleneck ratio: 1/8
85 | 2025-04-27 19:17:06,622 - [*] Year 3 Training start
86 | 2025-04-27 19:17:07,182 - node number torch.Size([17152, 12])
87 | 2025-04-27 19:17:08,525 - epoch:0, training loss:20.0102 validation loss:6.2503
88 | 2025-04-27 19:17:10,303 - epoch:1, training loss:2.2684 validation loss:6.6426
89 | 2025-04-27 19:17:12,096 - epoch:2, training loss:2.0479 validation loss:6.6834
90 | 2025-04-27 19:17:13,977 - epoch:3, training loss:1.9954 validation loss:6.6903
91 | 2025-04-27 19:17:15,643 - epoch:4, training loss:1.9555 validation loss:6.4829
92 | 2025-04-27 19:17:17,441 - epoch:5, training loss:1.9244 validation loss:6.4450
93 | 2025-04-27 19:17:19,226 - epoch:6, training loss:1.9204 validation loss:6.7449
94 | 2025-04-27 19:17:19,934 - [*] loss:53.7557
95 | 2025-04-27 19:17:19,944 - [*] year 3, testing
96 | 2025-04-27 19:17:20,016 - T:3 MAE 6.7124 RMSE 7.0168 MAPE 137.9494
97 | 2025-04-27 19:17:20,127 - T:6 MAE 6.8682 RMSE 7.2696 MAPE 141.7371
98 | 2025-04-27 19:17:20,496 - T:12 MAE 6.8433 RMSE 7.3889 MAPE 147.1859
99 | 2025-04-27 19:17:20,497 - T:Avg MAE 6.8458 RMSE 7.2500 MAPE 142.4055
100 | 2025-04-27 19:17:20,497 - Finished optimization, total time:7.75 s, best model:log/ENERGY-Wind/stadapter_st-622/3/6.2503.pkl
101 | 2025-04-27 19:17:20,498 -
102 |
103 |
104 | 2025-04-27 19:17:20,498 - 3 MAE 6.05 1.94 7.22 6.71 5.48
105 | 2025-04-27 19:17:20,498 - 3 RMSE 6.34 2.08 7.32 7.02 5.69
106 | 2025-04-27 19:17:20,498 - 3 MAPE 15.79 5.44 31.95 137.95 47.78
107 | 2025-04-27 19:17:20,498 - 6 MAE 5.79 1.96 7.21 6.87 5.46
108 | 2025-04-27 19:17:20,499 - 6 RMSE 6.15 2.13 7.33 7.27 5.72
109 | 2025-04-27 19:17:20,499 - 6 MAPE 15.12 5.49 31.94 141.74 48.57
110 | 2025-04-27 19:17:20,499 - 12 MAE 5.33 2.01 7.20 6.84 5.35
111 | 2025-04-27 19:17:20,499 - 12 RMSE 5.83 2.29 7.39 7.39 5.72
112 | 2025-04-27 19:17:20,499 - 12 MAPE 13.94 5.67 32.02 147.19 49.70
113 | 2025-04-27 19:17:20,499 - Avg MAE 5.72 1.96 7.21 6.85 5.43
114 | 2025-04-27 19:17:20,499 - Avg RMSE 6.10 2.15 7.34 7.25 5.71
115 | 2025-04-27 19:17:20,499 - Avg MAPE 14.95 5.51 31.96 142.41 48.71
116 | 2025-04-27 19:17:20,499 - year 0 total_time 7.3985 average_time 1.0570 epoch 7
117 | 2025-04-27 19:17:20,499 - year 1 total_time 23.2602 average_time 1.0573 epoch 22
118 | 2025-04-27 19:17:20,499 - year 2 total_time 7.5955 average_time 1.0851 epoch 7
119 | 2025-04-27 19:17:20,499 - year 3 total_time 7.7456 average_time 1.1065 epoch 7
120 | 2025-04-27 19:17:20,499 - total time: 45.9998
121 |
--------------------------------------------------------------------------------
/log/ENERGY-Wind/stlora_st-24/stlora_st.log:
--------------------------------------------------------------------------------
1 | 2025-04-27 17:10:54,195 - logger name:log/ENERGY-Wind/stlora_st-24/stlora_st.log
2 | 2025-04-27 17:10:54,195 - params : {'conf': 'conf/ENERGY-Wind/stlora.json', 'seed': 24, 'paral': 0, 'backbone_type': 'stgnn', 'gpuid': 2, 'logname': 'stlora_st', 'method': 'STLoRA', 'load_first_year': 0, 'first_year_model_path': '/home/haoyu.zhang/MoE_GNN/new/code/log/AIR/stlora_T-24/2016/21.3875.pkl', 'device': device(type='cuda', index=2), 'methods': {'PECPM': , 'ST-Adapter': , 'GraphPro': , 'RAGraph': , 'STGNN_Model': , 'ASTGNN_Model': , 'DCRNN_Model': , 'TGCN_Model': , 'STLoRA': , 'RAP': , 'TrafficStream': , 'STKEC': , 'EAC': }, 'begin_year': 0, 'end_year': 3, 'dropout': 0.0, 'lr': 0.03, 'batch_size': 128, 'epoch': 100, 'loss': 'mse', 'activation': 'relu', 'scheduler': 'epo', 'y_len': 12, 'x_len': 12, 'data_process': 0, 'raw_data_path': 'data/ENERGY-Wind/RawData/', 'save_data_path': 'data/ENERGY-Wind/FastData/', 'graph_path': 'data/ENERGY-Wind/graph/', 'model_path': 'log/ENERGY-Wind/', 'gcn': {'in_channel': 12, 'out_channel': 12, 'hidden_channel': 64}, 'tcn': {'in_channel': 1, 'out_channel': 1, 'kernel_size': 3, 'dilation': 1}, 'init': True, 'train': 1, 'auto_test': 0, 'strategy': 'retrain', 'detect': False, 'ewc': False, 'replay': False, 'path': 'log/ENERGY-Wind/stlora_st-24', 'logger': }
3 | 2025-04-27 17:10:54,214 - [*] Year 0 load from data/ENERGY-Wind/FastData/0.npz
4 | 2025-04-27 17:10:54,698 - [*] Year 0 Dataset load!
5 | 2025-04-27 17:10:54,700 - RAP initialized with backbone stgnn
6 | 2025-04-27 17:10:54,700 - [*] Year 0 Training start
7 | 2025-04-27 17:10:55,000 - node number torch.Size([13184, 12])
8 | 2025-04-27 17:10:56,320 - epoch:0, training loss:195.4291 validation loss:3.4340
9 | 2025-04-27 17:10:57,869 - epoch:1, training loss:4.5804 validation loss:3.5250
10 | 2025-04-27 17:10:59,369 - epoch:2, training loss:2.5855 validation loss:3.0127
11 | 2025-04-27 17:11:00,902 - epoch:3, training loss:2.2899 validation loss:3.1721
12 | 2025-04-27 17:11:02,474 - epoch:4, training loss:2.2115 validation loss:3.2436
13 | 2025-04-27 17:11:04,289 - epoch:5, training loss:2.1765 validation loss:3.2093
14 | 2025-04-27 17:11:06,173 - epoch:6, training loss:2.1067 validation loss:3.4234
15 | 2025-04-27 17:11:07,958 - epoch:7, training loss:2.0781 validation loss:3.2787
16 | 2025-04-27 17:11:09,486 - epoch:8, training loss:2.0491 validation loss:3.1165
17 | 2025-04-27 17:11:10,316 - [*] loss:32.8293
18 | 2025-04-27 17:11:10,322 - [*] year 0, testing
19 | 2025-04-27 17:11:10,426 - T:3 MAE 5.5025 RMSE 5.6020 MAPE 14.8563
20 | 2025-04-27 17:11:10,609 - T:6 MAE 5.5199 RMSE 5.6570 MAPE 14.9031
21 | 2025-04-27 17:11:11,082 - T:12 MAE 5.5156 RMSE 5.7379 MAPE 14.8488
22 | 2025-04-27 17:11:11,082 - T:Avg MAE 5.5145 RMSE 5.6609 MAPE 14.8820
23 | 2025-04-27 17:11:11,083 - Finished optimization, total time:8.63 s, best model:log/ENERGY-Wind/stlora_st-24/0/3.0127.pkl
24 | 2025-04-27 17:11:11,090 - [*] Year 1 load from data/ENERGY-Wind/FastData/1.npz
25 | 2025-04-27 17:11:11,264 - [*] Year 1 Dataset load!
26 | 2025-04-27 17:11:11,265 - [*] load from log/ENERGY-Wind/stlora_st-24/0/3.0127.pkl
27 | 2025-04-27 17:11:11,270 - RAP initialized with backbone stgnn
28 | 2025-04-27 17:11:11,272 - [*] Year 1 Training start
29 | 2025-04-27 17:11:11,838 - node number torch.Size([14464, 12])
30 | 2025-04-27 17:11:13,074 - epoch:0, training loss:11.6398 validation loss:1.0378
31 | 2025-04-27 17:11:14,971 - epoch:1, training loss:2.2488 validation loss:1.1384
32 | 2025-04-27 17:11:17,034 - epoch:2, training loss:2.1227 validation loss:1.1989
33 | 2025-04-27 17:11:18,853 - epoch:3, training loss:2.0297 validation loss:1.2083
34 | 2025-04-27 17:11:20,547 - epoch:4, training loss:1.9923 validation loss:1.1931
35 | 2025-04-27 17:11:22,315 - epoch:5, training loss:1.9948 validation loss:1.0588
36 | 2025-04-27 17:11:24,381 - epoch:6, training loss:1.8949 validation loss:1.0177
37 | 2025-04-27 17:11:26,244 - epoch:7, training loss:1.8801 validation loss:1.0704
38 | 2025-04-27 17:11:28,041 - epoch:8, training loss:2.0308 validation loss:0.9517
39 | 2025-04-27 17:11:29,752 - epoch:9, training loss:1.8544 validation loss:0.9945
40 | 2025-04-27 17:11:31,680 - epoch:10, training loss:1.7984 validation loss:0.9760
41 | 2025-04-27 17:11:33,603 - epoch:11, training loss:1.7862 validation loss:0.9128
42 | 2025-04-27 17:11:35,356 - epoch:12, training loss:1.7929 validation loss:0.9547
43 | 2025-04-27 17:11:37,130 - epoch:13, training loss:1.7721 validation loss:1.0149
44 | 2025-04-27 17:11:38,901 - epoch:14, training loss:1.7904 validation loss:1.1199
45 | 2025-04-27 17:11:40,944 - epoch:15, training loss:1.7863 validation loss:1.1309
46 | 2025-04-27 17:11:42,825 - epoch:16, training loss:1.8351 validation loss:1.0992
47 | 2025-04-27 17:11:44,644 - epoch:17, training loss:1.7692 validation loss:0.8617
48 | 2025-04-27 17:11:46,460 - epoch:18, training loss:1.8091 validation loss:1.0926
49 | 2025-04-27 17:11:48,272 - epoch:19, training loss:1.8970 validation loss:1.0100
50 | 2025-04-27 17:11:50,063 - epoch:20, training loss:1.7966 validation loss:1.4108
51 | 2025-04-27 17:11:51,815 - epoch:21, training loss:1.7714 validation loss:0.9773
52 | 2025-04-27 17:11:53,511 - epoch:22, training loss:1.7784 validation loss:0.9912
53 | 2025-04-27 17:11:55,389 - epoch:23, training loss:1.7433 validation loss:1.3153
54 | 2025-04-27 17:11:56,234 - [*] loss:4.7105
55 | 2025-04-27 17:11:56,241 - [*] year 1, testing
56 | 2025-04-27 17:11:56,320 - T:3 MAE 1.9343 RMSE 2.0335 MAPE 5.4015
57 | 2025-04-27 17:11:56,436 - T:6 MAE 1.9007 RMSE 2.0647 MAPE 5.3508
58 | 2025-04-27 17:11:56,764 - T:12 MAE 1.8881 RMSE 2.1666 MAPE 5.3822
59 | 2025-04-27 17:11:56,764 - T:Avg MAE 1.9018 RMSE 2.0742 MAPE 5.3565
60 | 2025-04-27 17:11:56,765 - Finished optimization, total time:24.76 s, best model:log/ENERGY-Wind/stlora_st-24/1/0.8617.pkl
61 | 2025-04-27 17:11:56,778 - [*] Year 2 load from data/ENERGY-Wind/FastData/2.npz
62 | 2025-04-27 17:11:56,980 - [*] Year 2 Dataset load!
63 | 2025-04-27 17:11:56,981 - [*] load from log/ENERGY-Wind/stlora_st-24/1/0.8617.pkl
64 | 2025-04-27 17:11:56,987 - RAP initialized with backbone stgnn
65 | 2025-04-27 17:11:56,988 - [*] Year 2 Training start
66 | 2025-04-27 17:11:57,799 - node number torch.Size([15616, 12])
67 | 2025-04-27 17:11:59,216 - epoch:0, training loss:8.7107 validation loss:7.4231
68 | 2025-04-27 17:12:00,943 - epoch:1, training loss:2.0970 validation loss:8.1227
69 | 2025-04-27 17:12:02,726 - epoch:2, training loss:1.9153 validation loss:8.4190
70 | 2025-04-27 17:12:04,472 - epoch:3, training loss:1.8306 validation loss:8.2254
71 | 2025-04-27 17:12:06,449 - epoch:4, training loss:1.8191 validation loss:8.0980
72 | 2025-04-27 17:12:08,465 - epoch:5, training loss:1.7887 validation loss:7.6874
73 | 2025-04-27 17:12:10,201 - epoch:6, training loss:1.8398 validation loss:7.7166
74 | 2025-04-27 17:12:11,183 - [*] loss:54.6621
75 | 2025-04-27 17:12:11,190 - [*] year 2, testing
76 | 2025-04-27 17:12:11,248 - T:3 MAE 7.3343 RMSE 7.4110 MAPE 32.5841
77 | 2025-04-27 17:12:11,377 - T:6 MAE 7.2896 RMSE 7.3885 MAPE 32.5246
78 | 2025-04-27 17:12:11,723 - T:12 MAE 7.2426 RMSE 7.3974 MAPE 32.5267
79 | 2025-04-27 17:12:11,723 - T:Avg MAE 7.2909 RMSE 7.3969 MAPE 32.5335
80 | 2025-04-27 17:12:11,724 - Finished optimization, total time:7.43 s, best model:log/ENERGY-Wind/stlora_st-24/2/7.4231.pkl
81 | 2025-04-27 17:12:11,740 - [*] Year 3 load from data/ENERGY-Wind/FastData/3.npz
82 | 2025-04-27 17:12:11,950 - [*] Year 3 Dataset load!
83 | 2025-04-27 17:12:11,951 - [*] load from log/ENERGY-Wind/stlora_st-24/2/7.4231.pkl
84 | 2025-04-27 17:12:11,956 - RAP initialized with backbone stgnn
85 | 2025-04-27 17:12:11,957 - [*] Year 3 Training start
86 | 2025-04-27 17:12:12,481 - node number torch.Size([17152, 12])
87 | 2025-04-27 17:12:13,896 - epoch:0, training loss:16.1490 validation loss:6.5369
88 | 2025-04-27 17:12:16,023 - epoch:1, training loss:2.3944 validation loss:6.7055
89 | 2025-04-27 17:12:18,019 - epoch:2, training loss:2.1160 validation loss:6.7939
90 | 2025-04-27 17:12:19,834 - epoch:3, training loss:2.0480 validation loss:6.6872
91 | 2025-04-27 17:12:21,661 - epoch:4, training loss:2.0078 validation loss:6.6403
92 | 2025-04-27 17:12:23,589 - epoch:5, training loss:1.9831 validation loss:6.7136
93 | 2025-04-27 17:12:25,526 - epoch:6, training loss:1.9415 validation loss:6.5607
94 | 2025-04-27 17:12:26,436 - [*] loss:56.7380
95 | 2025-04-27 17:12:26,443 - [*] year 3, testing
96 | 2025-04-27 17:12:26,511 - T:3 MAE 7.0188 RMSE 7.3158 MAPE 149.1261
97 | 2025-04-27 17:12:26,628 - T:6 MAE 7.0518 RMSE 7.4103 MAPE 153.7506
98 | 2025-04-27 17:12:26,995 - T:12 MAE 7.0830 RMSE 7.5667 MAPE 161.4535
99 | 2025-04-27 17:12:26,995 - T:Avg MAE 7.0471 RMSE 7.4204 MAPE 153.8829
100 | 2025-04-27 17:12:26,995 - Finished optimization, total time:7.72 s, best model:log/ENERGY-Wind/stlora_st-24/3/6.5369.pkl
101 | 2025-04-27 17:12:26,997 -
102 |
103 |
104 | 2025-04-27 17:12:26,997 - 3 MAE 5.50 1.93 7.33 7.02 5.45
105 | 2025-04-27 17:12:26,997 - 3 RMSE 5.60 2.03 7.41 7.32 5.59
106 | 2025-04-27 17:12:26,997 - 3 MAPE 14.86 5.40 32.58 149.13 50.49
107 | 2025-04-27 17:12:26,997 - 6 MAE 5.52 1.90 7.29 7.05 5.44
108 | 2025-04-27 17:12:26,997 - 6 RMSE 5.66 2.06 7.39 7.41 5.63
109 | 2025-04-27 17:12:26,997 - 6 MAPE 14.90 5.35 32.52 153.75 51.63
110 | 2025-04-27 17:12:26,997 - 12 MAE 5.52 1.89 7.24 7.08 5.43
111 | 2025-04-27 17:12:26,997 - 12 RMSE 5.74 2.17 7.40 7.57 5.72
112 | 2025-04-27 17:12:26,997 - 12 MAPE 14.85 5.38 32.53 161.45 53.55
113 | 2025-04-27 17:12:26,997 - Avg MAE 5.51 1.90 7.29 7.05 5.44
114 | 2025-04-27 17:12:26,997 - Avg RMSE 5.66 2.07 7.40 7.42 5.64
115 | 2025-04-27 17:12:26,997 - Avg MAPE 14.88 5.36 32.53 153.88 51.66
116 | 2025-04-27 17:12:26,997 - year 0 total_time 8.6315 average_time 0.9591 epoch 9
117 | 2025-04-27 17:12:26,998 - year 1 total_time 24.7601 average_time 1.0317 epoch 24
118 | 2025-04-27 17:12:26,998 - year 2 total_time 7.4338 average_time 1.0620 epoch 7
119 | 2025-04-27 17:12:26,998 - year 3 total_time 7.7198 average_time 1.1028 epoch 7
120 | 2025-04-27 17:12:26,998 - total time: 48.5452
121 |
--------------------------------------------------------------------------------
/log/ENERGY-Wind/graphpro_st-24/graphpro_st.log:
--------------------------------------------------------------------------------
1 | 2025-04-27 19:33:32,629 - logger name:log/ENERGY-Wind/graphpro_st-24/graphpro_st.log
2 | 2025-04-27 19:33:32,629 - params : {'conf': 'conf/ENERGY-Wind/graphpro.json', 'seed': 24, 'paral': 0, 'backbone_type': 'stgnn', 'gpuid': 2, 'logname': 'graphpro_st', 'method': 'GraphPro', 'load_first_year': 0, 'first_year_model_path': '/home/haoyu.zhang/MoE_GNN/new/code/log/AIR/stlora_T-24/2016/21.3875.pkl', 'device': device(type='cuda', index=2), 'methods': {'PECPM': , 'ST-Adapter': , 'GraphPro': , 'RAGraph': , 'STGNN_Model': , 'ASTGNN_Model': , 'DCRNN_Model': , 'TGCN_Model': , 'STLoRA': , 'RAP': , 'TrafficStream': , 'STKEC': , 'EAC': }, 'begin_year': 0, 'end_year': 3, 'dropout': 0.0, 'lr': 0.03, 'batch_size': 128, 'epoch': 100, 'loss': 'mse', 'activation': 'relu', 'scheduler': 'epo', 'y_len': 12, 'x_len': 12, 'data_process': 0, 'raw_data_path': 'data/ENERGY-Wind/RawData/', 'save_data_path': 'data/ENERGY-Wind/FastData/', 'graph_path': 'data/ENERGY-Wind/graph/', 'model_path': 'log/ENERGY-Wind/', 'gcn': {'in_channel': 12, 'out_channel': 12, 'hidden_channel': 64}, 'tcn': {'in_channel': 1, 'out_channel': 1, 'kernel_size': 3, 'dilation': 1}, 'init': True, 'train': 1, 'auto_test': 0, 'strategy': 'retrain', 'detect': False, 'ewc': False, 'replay': False, 'path': 'log/ENERGY-Wind/graphpro_st-24', 'logger': }
3 | 2025-04-27 19:33:32,640 - [*] Year 0 load from data/ENERGY-Wind/FastData/0.npz
4 | 2025-04-27 19:33:33,102 - [*] Year 0 Dataset load!
5 | 2025-04-27 19:33:33,104 - Phase set to: pretrain
6 | 2025-04-27 19:33:33,104 - Phase will last for 50 epochs
7 | 2025-04-27 19:33:33,104 - GraphPro initialized with backbone stgnn
8 | 2025-04-27 19:33:33,104 - Initial phase: pretrain
9 | 2025-04-27 19:33:33,104 - Phase schedule: {'pretrain': 50, 'for_tune': 20, 'finetune': 30}
10 | 2025-04-27 19:33:33,104 - Time encoding: True
11 | 2025-04-27 19:33:33,105 - [*] Year 0 Training start
12 | 2025-04-27 19:33:33,544 - node number torch.Size([13184, 12])
13 | 2025-04-27 19:33:34,947 - epoch:0, training loss:180.7820 validation loss:2.9529
14 | 2025-04-27 19:33:36,531 - epoch:1, training loss:3.5852 validation loss:3.2073
15 | 2025-04-27 19:33:38,082 - epoch:2, training loss:2.3422 validation loss:3.4226
16 | 2025-04-27 19:33:39,638 - epoch:3, training loss:2.2227 validation loss:3.2019
17 | 2025-04-27 19:33:41,120 - epoch:4, training loss:2.1778 validation loss:3.5239
18 | 2025-04-27 19:33:42,646 - epoch:5, training loss:2.1543 validation loss:3.0459
19 | 2025-04-27 19:33:44,216 - epoch:6, training loss:2.1342 validation loss:3.4133
20 | 2025-04-27 19:33:44,865 - [*] loss:32.2228
21 | 2025-04-27 19:33:44,873 - [*] year 0, testing
22 | 2025-04-27 19:33:44,953 - T:3 MAE 5.8020 RMSE 6.0287 MAPE 15.3892
23 | 2025-04-27 19:33:45,094 - T:6 MAE 5.1725 RMSE 5.4988 MAPE 13.7554
24 | 2025-04-27 19:33:45,501 - T:12 MAE 5.2733 RMSE 5.7188 MAPE 14.0099
25 | 2025-04-27 19:33:45,502 - T:Avg MAE 5.3716 RMSE 5.6894 MAPE 14.2692
26 | 2025-04-27 19:33:45,502 - Finished optimization, total time:6.53 s, best model:log/ENERGY-Wind/graphpro_st-24/0/2.9529.pkl
27 | 2025-04-27 19:33:45,512 - [*] Year 1 load from data/ENERGY-Wind/FastData/1.npz
28 | 2025-04-27 19:33:45,682 - [*] Year 1 Dataset load!
29 | 2025-04-27 19:33:45,683 - [*] load from log/ENERGY-Wind/graphpro_st-24/0/2.9529.pkl
30 | 2025-04-27 19:33:45,689 - Phase set to: pretrain
31 | 2025-04-27 19:33:45,689 - Phase will last for 50 epochs
32 | 2025-04-27 19:33:45,689 - GraphPro initialized with backbone stgnn
33 | 2025-04-27 19:33:45,689 - Initial phase: pretrain
34 | 2025-04-27 19:33:45,689 - Phase schedule: {'pretrain': 50, 'for_tune': 20, 'finetune': 30}
35 | 2025-04-27 19:33:45,689 - Time encoding: True
36 | 2025-04-27 19:33:45,690 - [*] Year 1 Training start
37 | 2025-04-27 19:33:46,220 - node number torch.Size([14464, 12])
38 | 2025-04-27 19:33:47,458 - epoch:0, training loss:13.8345 validation loss:1.2561
39 | 2025-04-27 19:33:49,147 - epoch:1, training loss:2.5770 validation loss:1.0244
40 | 2025-04-27 19:33:50,687 - epoch:2, training loss:2.0951 validation loss:1.1025
41 | 2025-04-27 19:33:52,286 - epoch:3, training loss:1.9883 validation loss:1.1375
42 | 2025-04-27 19:33:54,000 - epoch:4, training loss:1.9567 validation loss:1.0541
43 | 2025-04-27 19:33:55,614 - epoch:5, training loss:1.9033 validation loss:1.2243
44 | 2025-04-27 19:33:57,216 - epoch:6, training loss:1.9639 validation loss:1.1359
45 | 2025-04-27 19:33:58,875 - epoch:7, training loss:1.8304 validation loss:1.0800
46 | 2025-04-27 19:33:59,588 - [*] loss:5.9380
47 | 2025-04-27 19:33:59,596 - [*] year 1, testing
48 | 2025-04-27 19:33:59,658 - T:3 MAE 2.0423 RMSE 2.1989 MAPE 5.6914
49 | 2025-04-27 19:33:59,754 - T:6 MAE 2.0570 RMSE 2.2632 MAPE 5.7381
50 | 2025-04-27 19:34:00,072 - T:12 MAE 2.1342 RMSE 2.4446 MAPE 5.9991
51 | 2025-04-27 19:34:00,072 - T:Avg MAE 2.0681 RMSE 2.2844 MAPE 5.7740
52 | 2025-04-27 19:34:00,073 - Finished optimization, total time:7.61 s, best model:log/ENERGY-Wind/graphpro_st-24/1/1.0244.pkl
53 | 2025-04-27 19:34:00,083 - [*] Year 2 load from data/ENERGY-Wind/FastData/2.npz
54 | 2025-04-27 19:34:00,277 - [*] Year 2 Dataset load!
55 | 2025-04-27 19:34:00,279 - [*] load from log/ENERGY-Wind/graphpro_st-24/1/1.0244.pkl
56 | 2025-04-27 19:34:00,285 - Phase set to: pretrain
57 | 2025-04-27 19:34:00,285 - Phase will last for 50 epochs
58 | 2025-04-27 19:34:00,285 - GraphPro initialized with backbone stgnn
59 | 2025-04-27 19:34:00,285 - Initial phase: pretrain
60 | 2025-04-27 19:34:00,285 - Phase schedule: {'pretrain': 50, 'for_tune': 20, 'finetune': 30}
61 | 2025-04-27 19:34:00,285 - Time encoding: True
62 | 2025-04-27 19:34:00,287 - [*] Year 2 Training start
63 | 2025-04-27 19:34:00,956 - node number torch.Size([15616, 12])
64 | 2025-04-27 19:34:02,105 - epoch:0, training loss:9.8877 validation loss:8.8847
65 | 2025-04-27 19:34:03,835 - epoch:1, training loss:2.3063 validation loss:8.2602
66 | 2025-04-27 19:34:05,518 - epoch:2, training loss:2.1208 validation loss:8.4131
67 | 2025-04-27 19:34:07,167 - epoch:3, training loss:2.0587 validation loss:7.9906
68 | 2025-04-27 19:34:08,744 - epoch:4, training loss:1.9938 validation loss:8.1076
69 | 2025-04-27 19:34:10,401 - epoch:5, training loss:1.9799 validation loss:8.0021
70 | 2025-04-27 19:34:12,099 - epoch:6, training loss:1.9323 validation loss:8.0875
71 | 2025-04-27 19:34:13,851 - epoch:7, training loss:1.8632 validation loss:8.4553
72 | 2025-04-27 19:34:15,478 - epoch:8, training loss:1.8269 validation loss:8.0111
73 | 2025-04-27 19:34:17,178 - epoch:9, training loss:1.8534 validation loss:8.0521
74 | 2025-04-27 19:34:17,960 - [*] loss:63.5490
75 | 2025-04-27 19:34:17,971 - [*] year 2, testing
76 | 2025-04-27 19:34:18,037 - T:3 MAE 8.0456 RMSE 8.1201 MAPE 35.7189
77 | 2025-04-27 19:34:18,138 - T:6 MAE 7.9483 RMSE 8.0479 MAPE 35.3240
78 | 2025-04-27 19:34:18,516 - T:12 MAE 7.8373 RMSE 7.9895 MAPE 34.9953
79 | 2025-04-27 19:34:18,517 - T:Avg MAE 7.9421 RMSE 8.0472 MAPE 35.3162
80 | 2025-04-27 19:34:18,517 - Finished optimization, total time:10.02 s, best model:log/ENERGY-Wind/graphpro_st-24/2/7.9906.pkl
81 | 2025-04-27 19:34:18,526 - [*] Year 3 load from data/ENERGY-Wind/FastData/3.npz
82 | 2025-04-27 19:34:18,720 - [*] Year 3 Dataset load!
83 | 2025-04-27 19:34:18,722 - [*] load from log/ENERGY-Wind/graphpro_st-24/2/7.9906.pkl
84 | 2025-04-27 19:34:18,727 - Phase set to: pretrain
85 | 2025-04-27 19:34:18,727 - Phase will last for 50 epochs
86 | 2025-04-27 19:34:18,727 - GraphPro initialized with backbone stgnn
87 | 2025-04-27 19:34:18,728 - Initial phase: pretrain
88 | 2025-04-27 19:34:18,728 - Phase schedule: {'pretrain': 50, 'for_tune': 20, 'finetune': 30}
89 | 2025-04-27 19:34:18,728 - Time encoding: True
90 | 2025-04-27 19:34:18,729 - [*] Year 3 Training start
91 | 2025-04-27 19:34:19,286 - node number torch.Size([17152, 12])
92 | 2025-04-27 19:34:20,391 - epoch:0, training loss:17.9595 validation loss:5.9733
93 | 2025-04-27 19:34:22,079 - epoch:1, training loss:2.6188 validation loss:6.6915
94 | 2025-04-27 19:34:23,745 - epoch:2, training loss:2.2091 validation loss:6.6639
95 | 2025-04-27 19:34:25,368 - epoch:3, training loss:2.1364 validation loss:6.4998
96 | 2025-04-27 19:34:26,998 - epoch:4, training loss:2.1255 validation loss:6.4544
97 | 2025-04-27 19:34:28,770 - epoch:5, training loss:2.0899 validation loss:6.6890
98 | 2025-04-27 19:34:30,504 - epoch:6, training loss:2.0929 validation loss:6.4801
99 | 2025-04-27 19:34:31,266 - [*] loss:48.8384
100 | 2025-04-27 19:34:31,274 - [*] year 3, testing
101 | 2025-04-27 19:34:31,350 - T:3 MAE 6.4881 RMSE 6.7692 MAPE 139.1250
102 | 2025-04-27 19:34:31,463 - T:6 MAE 6.4899 RMSE 6.8507 MAPE 140.9272
103 | 2025-04-27 19:34:31,844 - T:12 MAE 6.5294 RMSE 7.0282 MAPE 148.1992
104 | 2025-04-27 19:34:31,844 - T:Avg MAE 6.4983 RMSE 6.8715 MAPE 141.8591
105 | 2025-04-27 19:34:31,844 - Finished optimization, total time:6.81 s, best model:log/ENERGY-Wind/graphpro_st-24/3/5.9733.pkl
106 | 2025-04-27 19:34:31,846 -
107 |
108 |
109 | 2025-04-27 19:34:31,846 - 3 MAE 5.80 2.04 8.05 6.49 5.59
110 | 2025-04-27 19:34:31,846 - 3 RMSE 6.03 2.20 8.12 6.77 5.78
111 | 2025-04-27 19:34:31,846 - 3 MAPE 15.39 5.69 35.72 139.13 48.98
112 | 2025-04-27 19:34:31,846 - 6 MAE 5.17 2.06 7.95 6.49 5.42
113 | 2025-04-27 19:34:31,846 - 6 RMSE 5.50 2.26 8.05 6.85 5.67
114 | 2025-04-27 19:34:31,846 - 6 MAPE 13.76 5.74 35.32 140.93 48.94
115 | 2025-04-27 19:34:31,846 - 12 MAE 5.27 2.13 7.84 6.53 5.44
116 | 2025-04-27 19:34:31,846 - 12 RMSE 5.72 2.44 7.99 7.03 5.80
117 | 2025-04-27 19:34:31,847 - 12 MAPE 14.01 6.00 35.00 148.20 50.80
118 | 2025-04-27 19:34:31,847 - Avg MAE 5.37 2.07 7.94 6.50 5.47
119 | 2025-04-27 19:34:31,847 - Avg RMSE 5.69 2.28 8.05 6.87 5.72
120 | 2025-04-27 19:34:31,847 - Avg MAPE 14.27 5.77 35.32 141.86 49.30
121 | 2025-04-27 19:34:31,847 - year 0 total_time 6.5255 average_time 0.9322 epoch 7
122 | 2025-04-27 19:34:31,847 - year 1 total_time 7.6146 average_time 0.9519 epoch 8
123 | 2025-04-27 19:34:31,847 - year 2 total_time 10.0182 average_time 1.0018 epoch 10
124 | 2025-04-27 19:34:31,847 - year 3 total_time 6.8075 average_time 0.9725 epoch 7
125 | 2025-04-27 19:34:31,847 - total time: 30.9658
126 |
--------------------------------------------------------------------------------
/log/ENERGY-Wind/replay_st-24/replay_st.log:
--------------------------------------------------------------------------------
1 | 2025-04-27 18:51:49,454 - logger name:log/ENERGY-Wind/replay_st-24/replay_st.log
2 | 2025-04-27 18:51:49,454 - params : {'conf': 'conf/ENERGY-Wind/replay.json', 'seed': 24, 'paral': 0, 'backbone_type': 'stgnn', 'gpuid': 2, 'logname': 'replay_st', 'method': 'TrafficStream', 'load_first_year': 0, 'first_year_model_path': '/home/haoyu.zhang/MoE_GNN/new/code/log/AIR/stlora_T-24/2016/21.3875.pkl', 'device': device(type='cuda', index=2), 'methods': {'PECPM': , 'ST-Adapter': , 'GraphPro': , 'RAGraph': , 'STGNN_Model': , 'ASTGNN_Model': , 'DCRNN_Model': , 'TGCN_Model': , 'STLoRA': , 'RAP': , 'TrafficStream': , 'STKEC': , 'EAC': }, 'begin_year': 0, 'end_year': 3, 'dropout': 0.0, 'lr': 0.03, 'batch_size': 128, 'epoch': 100, 'loss': 'mse', 'activation': 'relu', 'scheduler': 'epo', 'y_len': 12, 'x_len': 12, 'data_process': 0, 'raw_data_path': 'data/ENERGY-Wind/RawData/', 'save_data_path': 'data/ENERGY-Wind/FastData/', 'graph_path': 'data/ENERGY-Wind/graph/', 'model_path': 'log/ENERGY-Wind/', 'gcn': {'in_channel': 12, 'out_channel': 12, 'hidden_channel': 64}, 'tcn': {'in_channel': 1, 'out_channel': 1, 'kernel_size': 3, 'dilation': 1}, 'init': True, 'train': 1, 'auto_test': 0, 'strategy': 'incremental', 'increase': True, 'num_hops': 2, 'detect': False, 'detect_strategy': 'feature', 'ewc': False, 'ewc_strategy': 'ewc', 'ewc_lambda': 0.0001, 'replay': True, 'replay_strategy': 'inforeplay', 'path': 'log/ENERGY-Wind/replay_st-24', 'logger': }
3 | 2025-04-27 18:51:49,467 - [*] Year 0 load from data/ENERGY-Wind/FastData/0.npz
4 | 2025-04-27 18:51:49,945 - [*] Year 0 Dataset load!
5 | 2025-04-27 18:51:49,947 - [*] Year 0 Training start
6 | 2025-04-27 18:51:50,325 - node number torch.Size([13184, 12])
7 | 2025-04-27 18:51:51,666 - epoch:0, training loss:195.4291 validation loss:3.4340
8 | 2025-04-27 18:51:53,292 - epoch:1, training loss:4.5804 validation loss:3.5250
9 | 2025-04-27 18:51:54,796 - epoch:2, training loss:2.5855 validation loss:3.0127
10 | 2025-04-27 18:51:56,337 - epoch:3, training loss:2.2899 validation loss:3.1721
11 | 2025-04-27 18:51:57,964 - epoch:4, training loss:2.2115 validation loss:3.2436
12 | 2025-04-27 18:51:59,487 - epoch:5, training loss:2.1765 validation loss:3.2093
13 | 2025-04-27 18:52:00,978 - epoch:6, training loss:2.1067 validation loss:3.4234
14 | 2025-04-27 18:52:02,483 - epoch:7, training loss:2.0781 validation loss:3.2787
15 | 2025-04-27 18:52:03,987 - epoch:8, training loss:2.0491 validation loss:3.1165
16 | 2025-04-27 18:52:04,685 - [*] loss:32.8293
17 | 2025-04-27 18:52:04,691 - [*] year 0, testing
18 | 2025-04-27 18:52:04,756 - T:3 MAE 5.5025 RMSE 5.6020 MAPE 14.8563
19 | 2025-04-27 18:52:04,882 - T:6 MAE 5.5199 RMSE 5.6570 MAPE 14.9031
20 | 2025-04-27 18:52:05,279 - T:12 MAE 5.5156 RMSE 5.7379 MAPE 14.8488
21 | 2025-04-27 18:52:05,280 - T:Avg MAE 5.5145 RMSE 5.6609 MAPE 14.8820
22 | 2025-04-27 18:52:05,282 - Finished optimization, total time:8.36 s, best model:log/ENERGY-Wind/replay_st-24/0/3.0127.pkl
23 | 2025-04-27 18:52:05,291 - [*] Year 1 load from data/ENERGY-Wind/FastData/1.npz
24 | 2025-04-27 18:52:05,295 - [*] load from log/ENERGY-Wind/replay_st-24/0/3.0127.pkl
25 | 2025-04-27 18:52:06,809 - number of increase nodes:15, nodes after 2 hop:torch.Size([87]), total nodes this year 113
26 | 2025-04-27 18:52:07,054 - [*] Year 1 Dataset load!
27 | 2025-04-27 18:52:07,055 - [*] load from log/ENERGY-Wind/replay_st-24/0/3.0127.pkl
28 | 2025-04-27 18:52:07,061 - [*] Year 1 Training start
29 | 2025-04-27 18:52:07,601 - node number torch.Size([11136, 12])
30 | 2025-04-27 18:52:08,925 - epoch:0, training loss:13.2807 validation loss:1.1082
31 | 2025-04-27 18:52:10,774 - epoch:1, training loss:2.0292 validation loss:1.0728
32 | 2025-04-27 18:52:12,572 - epoch:2, training loss:1.7897 validation loss:1.0499
33 | 2025-04-27 18:52:14,441 - epoch:3, training loss:1.7641 validation loss:1.1736
34 | 2025-04-27 18:52:16,326 - epoch:4, training loss:1.7039 validation loss:1.1158
35 | 2025-04-27 18:52:18,187 - epoch:5, training loss:1.6719 validation loss:1.1274
36 | 2025-04-27 18:52:19,993 - epoch:6, training loss:1.6399 validation loss:1.0746
37 | 2025-04-27 18:52:21,784 - epoch:7, training loss:1.6548 validation loss:1.1520
38 | 2025-04-27 18:52:23,594 - epoch:8, training loss:1.6130 validation loss:1.1679
39 | 2025-04-27 18:52:24,318 - [*] loss:6.7739
40 | 2025-04-27 18:52:24,329 - [*] year 1, testing
41 | 2025-04-27 18:52:24,389 - T:3 MAE 2.2732 RMSE 2.4213 MAPE 6.3527
42 | 2025-04-27 18:52:24,484 - T:6 MAE 2.2694 RMSE 2.4647 MAPE 6.3497
43 | 2025-04-27 18:52:24,773 - T:12 MAE 2.3063 RMSE 2.6051 MAPE 6.5019
44 | 2025-04-27 18:52:24,773 - T:Avg MAE 2.2796 RMSE 2.4854 MAPE 6.3833
45 | 2025-04-27 18:52:24,773 - Finished optimization, total time:9.84 s, best model:log/ENERGY-Wind/replay_st-24/1/1.0499.pkl
46 | 2025-04-27 18:52:24,781 - [*] Year 2 load from data/ENERGY-Wind/FastData/2.npz
47 | 2025-04-27 18:52:24,786 - [*] load from log/ENERGY-Wind/replay_st-24/1/1.0499.pkl
48 | 2025-04-27 18:52:26,535 - number of increase nodes:15, nodes after 2 hop:torch.Size([74]), total nodes this year 122
49 | 2025-04-27 18:52:26,760 - [*] Year 2 Dataset load!
50 | 2025-04-27 18:52:26,761 - [*] load from log/ENERGY-Wind/replay_st-24/1/1.0499.pkl
51 | 2025-04-27 18:52:26,767 - [*] Year 2 Training start
52 | 2025-04-27 18:52:27,435 - node number torch.Size([9472, 12])
53 | 2025-04-27 18:52:28,661 - epoch:0, training loss:8.9858 validation loss:7.5302
54 | 2025-04-27 18:52:30,468 - epoch:1, training loss:2.0815 validation loss:7.2327
55 | 2025-04-27 18:52:32,323 - epoch:2, training loss:1.9659 validation loss:7.1624
56 | 2025-04-27 18:52:34,138 - epoch:3, training loss:1.9466 validation loss:7.5663
57 | 2025-04-27 18:52:35,954 - epoch:4, training loss:1.8641 validation loss:7.5126
58 | 2025-04-27 18:52:37,799 - epoch:5, training loss:1.8423 validation loss:7.8234
59 | 2025-04-27 18:52:39,700 - epoch:6, training loss:1.7817 validation loss:7.4000
60 | 2025-04-27 18:52:41,528 - epoch:7, training loss:1.7437 validation loss:7.4669
61 | 2025-04-27 18:52:43,363 - epoch:8, training loss:1.7592 validation loss:7.8309
62 | 2025-04-27 18:52:44,116 - [*] loss:58.7250
63 | 2025-04-27 18:52:44,123 - [*] year 2, testing
64 | 2025-04-27 18:52:44,191 - T:3 MAE 7.5034 RMSE 7.5929 MAPE 33.2288
65 | 2025-04-27 18:52:44,291 - T:6 MAE 7.4957 RMSE 7.6132 MAPE 33.2259
66 | 2025-04-27 18:52:44,661 - T:12 MAE 7.5139 RMSE 7.6885 MAPE 33.4616
67 | 2025-04-27 18:52:44,662 - T:Avg MAE 7.5049 RMSE 7.6281 MAPE 33.2858
68 | 2025-04-27 18:52:44,662 - Finished optimization, total time:9.86 s, best model:log/ENERGY-Wind/replay_st-24/2/7.1624.pkl
69 | 2025-04-27 18:52:44,671 - [*] Year 3 load from data/ENERGY-Wind/FastData/3.npz
70 | 2025-04-27 18:52:44,675 - [*] load from log/ENERGY-Wind/replay_st-24/2/7.1624.pkl
71 | 2025-04-27 18:52:46,448 - number of increase nodes:18, nodes after 2 hop:torch.Size([93]), total nodes this year 134
72 | 2025-04-27 18:52:46,685 - [*] Year 3 Dataset load!
73 | 2025-04-27 18:52:46,685 - [*] load from log/ENERGY-Wind/replay_st-24/2/7.1624.pkl
74 | 2025-04-27 18:52:46,692 - [*] Year 3 Training start
75 | 2025-04-27 18:52:47,299 - node number torch.Size([11904, 12])
76 | 2025-04-27 18:52:48,584 - epoch:0, training loss:20.3419 validation loss:6.7961
77 | 2025-04-27 18:52:50,387 - epoch:1, training loss:2.5727 validation loss:6.3669
78 | 2025-04-27 18:52:52,366 - epoch:2, training loss:2.2768 validation loss:6.0663
79 | 2025-04-27 18:52:54,268 - epoch:3, training loss:2.1737 validation loss:6.1712
80 | 2025-04-27 18:52:56,208 - epoch:4, training loss:2.1050 validation loss:6.1554
81 | 2025-04-27 18:52:58,208 - epoch:5, training loss:2.0773 validation loss:6.0095
82 | 2025-04-27 18:53:00,090 - epoch:6, training loss:2.0401 validation loss:6.2424
83 | 2025-04-27 18:53:01,924 - epoch:7, training loss:2.0228 validation loss:6.1584
84 | 2025-04-27 18:53:03,877 - epoch:8, training loss:1.9635 validation loss:5.7240
85 | 2025-04-27 18:53:05,726 - epoch:9, training loss:2.0545 validation loss:6.2779
86 | 2025-04-27 18:53:07,575 - epoch:10, training loss:1.9900 validation loss:6.1783
87 | 2025-04-27 18:53:09,383 - epoch:11, training loss:1.9318 validation loss:6.0469
88 | 2025-04-27 18:53:11,197 - epoch:12, training loss:1.9386 validation loss:6.4005
89 | 2025-04-27 18:53:13,053 - epoch:13, training loss:1.8961 validation loss:6.2038
90 | 2025-04-27 18:53:15,050 - epoch:14, training loss:1.9067 validation loss:5.8882
91 | 2025-04-27 18:53:15,805 - [*] loss:52.4267
92 | 2025-04-27 18:53:15,811 - [*] year 3, testing
93 | 2025-04-27 18:53:15,884 - T:3 MAE 6.9834 RMSE 7.2246 MAPE 147.8762
94 | 2025-04-27 18:53:15,999 - T:6 MAE 6.9302 RMSE 7.2321 MAPE 150.0907
95 | 2025-04-27 18:53:16,380 - T:12 MAE 6.8511 RMSE 7.2842 MAPE 154.6007
96 | 2025-04-27 18:53:16,380 - T:Avg MAE 6.9241 RMSE 7.2388 MAPE 150.4069
97 | 2025-04-27 18:53:16,380 - Finished optimization, total time:16.75 s, best model:log/ENERGY-Wind/replay_st-24/3/5.724.pkl
98 | 2025-04-27 18:53:16,381 -
99 |
100 |
101 | 2025-04-27 18:53:16,381 - 3 MAE 5.50 2.27 7.50 6.98 5.57
102 | 2025-04-27 18:53:16,381 - 3 RMSE 5.60 2.42 7.59 7.22 5.71
103 | 2025-04-27 18:53:16,381 - 3 MAPE 14.86 6.35 33.23 147.88 50.58
104 | 2025-04-27 18:53:16,381 - 6 MAE 5.52 2.27 7.50 6.93 5.55
105 | 2025-04-27 18:53:16,381 - 6 RMSE 5.66 2.46 7.61 7.23 5.74
106 | 2025-04-27 18:53:16,381 - 6 MAPE 14.90 6.35 33.23 150.09 51.14
107 | 2025-04-27 18:53:16,381 - 12 MAE 5.52 2.31 7.51 6.85 5.55
108 | 2025-04-27 18:53:16,382 - 12 RMSE 5.74 2.61 7.69 7.28 5.83
109 | 2025-04-27 18:53:16,382 - 12 MAPE 14.85 6.50 33.46 154.60 52.35
110 | 2025-04-27 18:53:16,382 - Avg MAE 5.51 2.28 7.50 6.92 5.56
111 | 2025-04-27 18:53:16,382 - Avg RMSE 5.66 2.49 7.63 7.24 5.75
112 | 2025-04-27 18:53:16,382 - Avg MAPE 14.88 6.38 33.29 150.41 51.24
113 | 2025-04-27 18:53:16,382 - year 0 total_time 8.3579 average_time 0.9287 epoch 9
114 | 2025-04-27 18:53:16,382 - year 1 total_time 9.8376 average_time 1.0931 epoch 9
115 | 2025-04-27 18:53:16,382 - year 2 total_time 9.8631 average_time 1.0959 epoch 9
116 | 2025-04-27 18:53:16,382 - year 3 total_time 16.7507 average_time 1.1167 epoch 15
117 | 2025-04-27 18:53:16,382 - total time: 44.8092
118 |
--------------------------------------------------------------------------------