├── .gitignore
├── Appendix
└── Appendix.pdf
├── LICENSE
├── README.md
├── data_process
├── __init__.py
├── etth_data_loader.py
├── financial_dataloader.py
└── forecast_dataloader.py
├── docs
└── RevIN.md
├── experiments
├── __init__.py
├── exp_ETTh.py
├── exp_basic.py
├── exp_financial.py
└── exp_pems.py
├── metrics
├── ETTh_metrics.py
└── Finantial_metics.py
├── models
├── SCINet.py
├── SCINet_decompose.py
└── __init__.py
├── plot.py
├── prepare_data.sh
├── requirements.txt
├── run_ETTh.py
├── run_financial.py
├── run_pems.py
└── utils
├── __init__.py
├── histogram.ipynb
├── math_utils.py
├── recursive_demo.py
├── timefeatures.py
└── tools.py
/.gitignore:
--------------------------------------------------------------------------------
1 | log/
2 | model/
3 | datasets/
4 | dataset/
5 | *.txt
6 | *.pyc
7 | *.idea
8 | *.xml
9 | *.pth
10 | *.npe
11 | run/
12 | run_ETTh/
13 | run_financial/
14 | checkpoints/
15 | output/
16 | run_PEMS/
17 | pems_checkpoint/
18 | financial_checkpoints/
19 | ETT_checkpoints/
20 | exp/
21 |
--------------------------------------------------------------------------------
/Appendix/Appendix.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cure-lab/SCINet/02e6b0af2d58243de09aaa1eac3840237b659847/Appendix/Appendix.pdf
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # SCINet
2 | [](https://arxiv.org/pdf/2106.09305.pdf)
3 |
4 | 
5 |
6 |
7 |
8 | This is the original pytorch implementation for the following paper: [SCINet: Time Series Modeling and Forecasting with Sample Convolution and Interaction](https://arxiv.org/pdf/2106.09305.pdf). Alse see the [Open Review verision](https://openreview.net/pdf?id=AyajSjTAzmg).
9 |
10 | If you find this repository useful for your research work, please consider citing it as follows:
11 |
12 | ```
13 |
14 | @article{liu2022SCINet,
15 |
16 | title={SCINet: Time Series Modeling and Forecasting with Sample Convolution and Interaction},
17 |
18 | author={Liu, Minhao and Zeng, Ailing and Chen, Muxi and Xu, Zhijian and Lai, Qiuxia and Ma, Lingna and Xu, Qiang},
19 |
20 | journal={Thirty-sixth Conference on Neural Information Processing Systems (NeurIPS), 2022},
21 |
22 | year={2022}
23 |
24 | }
25 |
26 | ```
27 |
28 | ## Updates
29 | - [2022-09-15] SCINet has been accepted to NeurIPS 2022!
30 |
31 | - [2021-11-10] Added Reversible Instance Normalization RevIN[1] support!
32 |
33 | - [2021-09-17] SCINet v1.0 is released
34 |
35 |
36 | ## Features
37 |
38 | - [x] Support **11** popular time-series forecasting datasets, namely Electricity Transformer Temperature (ETTh1, ETTh2 and ETTm1) , Traffic, Solar-Energy, Electricity and Exchange Rate and PeMS (PEMS03, PEMS04, PEMS07 and PEMS08), ranging from power, energy, finance and traffic domains.
39 |
40 | [comment]: <> ()
41 |
42 | [comment]: <> ()
43 |
44 | [comment]: <> ()
45 |
46 | [comment]: <> ()
47 |
48 | - [x] Provide all training logs.
49 |
50 | - [x] Support RevIN to handle datasets with a large train-test sample distribution gap. To activate, simply add ```--RIN True``` to the command line. [**Read more**](./docs/RevIN.md)
51 |
52 |
53 | ## To-do items
54 |
55 | - Integrate GNN-based spatial models into SCINet for better performance and higher efficiency on spatial-temporal time series. Our preliminary results show that this feature could result in considerable gains on the prediction accuracy of some datasets (e.g., PEMSxx).
56 |
57 | - Generate probalistic forecasting results.
58 |
59 | Stay tuned!
60 |
61 |
62 |
63 |
64 | ## Used Datasets
65 |
66 |
67 | We conduct the experiments on **11** popular time-series datasets, namely **Electricity Transformer Temperature (ETTh1, ETTh2 and ETTm1) , PeMS (PEMS03, PEMS04, PEMS07 and PEMS08) and Traffic, Solar-Energy, Electricity and Exchange Rate**, ranging from **power, energy, finance and traffic domains**.
68 |
69 |
70 | ### Overall information of the 11 datasets
71 |
72 | | Datasets | Variants | Timesteps | Granularity | Start time | Task Type |
73 | | ------------- | -------- | --------- | ----------- | ---------- | ----------- |
74 | | ETTh1 | 7 | 17,420 | 1hour | 7/1/2016 | Multi-step |
75 | | ETTh2 | 7 | 17,420 | 1hour | 7/1/2016 | Multi-step |
76 | | ETTm1 | 7 | 69,680 | 15min | 7/1/2016 | Multi-step |
77 | | PEMS03 | 358 | 26,209 | 5min | 5/1/2012 | Multi-step |
78 | | PEMS04 | 307 | 16,992 | 5min | 7/1/2017 | Multi-step |
79 | | PEMS07 | 883 | 28,224 | 5min | 5/1/2017 | Multi-step |
80 | | PEMS08 | 170 | 17,856 | 5min | 3/1/2012 | Multi-step |
81 | | Traffic | 862 | 17,544 | 1hour | 1/1/2015 | Single-step |
82 | | Solar-Energy | 137 | 52,560 | 1hour | 1/1/2006 | Single-step |
83 | | Electricity | 321 | 26,304 | 1hour | 1/1/2012 | Single-step |
84 | | Exchange-Rate | 8 | 7,588 | 1hour | 1/1/1990 | Single-step |
85 |
86 |
87 | ## Get started
88 |
89 | ### Requirements
90 |
91 | Install the required package first:
92 |
93 | ```
94 | cd SCINet
95 | conda create -n scinet python=3.8
96 | conda activate scinet
97 | pip install -r requirements.txt
98 | ```
99 |
100 | ### Dataset preparation
101 |
102 | All datasets can be downloaded [here](https://drive.google.com/drive/folders/1Gv1MXjLo5bLGep4bsqDyaNMI2oQC9GH2?usp=sharing). To prepare all dataset at one time, you can just run:
103 | ```
104 | source prepare_data.sh
105 | ```
106 | [](https://drive.google.com/drive/folders/1NU85EuopJNkptFroPtQVXMZE70zaBznZ)
107 | [](https://drive.google.com/drive/folders/17fwxGyQ3Qb0TLOalI-Y9wfgTPuXSYgiI)
108 | [](https://drive.google.com/drive/folders/12ffxwxVAGM_MQiYpIk9aBLQrb2xQupT-)
109 |
110 | The data directory structure is shown as follows.
111 | ```
112 | ./
113 | └── datasets/
114 | ├── ETT-data
115 | │ ├── ETTh1.csv
116 | │ ├── ETTh2.csv
117 | │ └── ETTm1.csv
118 | ├── financial
119 | │ ├── electricity.txt
120 | │ ├── exchange_rate.txt
121 | │ ├── solar_AL.txt
122 | │ └── traffic.txt
123 | └── PEMS
124 | ├── PEMS03.npz
125 | ├── PEMS04.npz
126 | ├── PEMS07.npz
127 | └── PEMS08.npz
128 | ```
129 |
130 | ### Run training code
131 |
132 | We follow the same settings of [StemGNN](https://github.com/microsoft/StemGNN) for PEMS 03, 04, 07, 08 datasets, [MTGNN](https://github.com/nnzhan/MTGNN) for Solar, electricity, traffic, financial datasets, [Informer](https://github.com/zhouhaoyi/Informer2020) for ETTH1, ETTH2, ETTM1 datasets. The detailed training commands are given as follows.
133 |
134 | #### For PEMS dataset (All datasets follow Input 12, Output 12):
135 |
136 | pems03
137 | ```
138 | python run_pems.py --dataset PEMS03 --hidden-size 0.0625 --dropout 0.25 --model_name pems03_h0.0625_dp0.25 --num_decoder_layer 2
139 | ```
140 |
141 | pems04
142 | ```
143 | python run_pems.py --dataset PEMS04 --hidden-size 0.0625 --dropout 0 --model_name pems04_h0.0625_dp0
144 | ```
145 |
146 | pems07
147 | ```
148 | python run_pems.py --dataset PEMS07 --hidden-size 0.03125 --dropout 0.25 --model_name pems07_h0.03125_dp0.25
149 | ```
150 |
151 | pems08
152 | ```
153 | python run_pems.py --dataset PEMS08 --hidden-size 1 --dropout 0.5 --model_name pems08_h1_dp0.5
154 | ```
155 |
156 | ##### PEMS Parameter highlights
157 |
158 | | Parameter Name | Description | Parameter in paper | Default |
159 | | -------------- | ----------------------- | ------------------ | ------- |
160 | | dataset | Name of dataset | N/A | PEMS08 |
161 | | horizon | Horizon | Horizon | 12 |
162 | | window_size | Look-back window | Look-back window | 12 |
163 | | hidden-size | hidden expansion | h | 1 |
164 | | levels | SCINet block levels | L | 2 |
165 | | stacks | The number of SCINet block| K | 1 |
166 |
167 |
168 | #### For Solar dataset:
169 |
170 | predict 3
171 | ```
172 | python run_financial.py --dataset_name solar_AL --window_size 160 --horizon 3 --hidden-size 1 --lastWeight 0.5 --stacks 2 --levels 4 --lradj 2 --lr 1e-4 --dropout 0.25 --batch_size 256 --model_name so_I160_o3_lr1e-4_bs256_dp0.25_h1_s2l4_w0.5
173 | ```
174 | predict 6
175 | ```
176 | python run_financial.py --dataset_name solar_AL --window_size 160 --horizon 6 --hidden-size 0.5 --lastWeight 0.5 --stacks 2 --levels 4 --lradj 2 --lr 1e-4 --dropout 0.25 --batch_size 256 --model_name so_I160_o6_lr1e-4_bs256_dp0.25_h0.5_s2l4_w0.5
177 | ```
178 | predict 12
179 | ```
180 | python run_financial.py --dataset_name solar_AL --window_size 160 --horizon 12 --hidden-size 2 --lastWeight 0.5 --stacks 2 --levels 4 --lradj 2 --lr 1e-4 --dropout 0.25 --batch_size 1024 --model_name so_I160_o12_lr1e-4_bs1024_dp0.25_h2_s2l4_w0.5
181 | ```
182 | predict 24
183 | ```
184 | python run_financial.py --dataset_name solar_AL --window_size 160 --horizon 24 --hidden-size 1 --lastWeight 0.5 --stacks 1 --levels 4 --lradj 2 --lr 1e-4 --dropout 0.25 --batch_size 256 --model_name so_I160_o24_lr1e-4_bs256_dp0.25_h1_s1l4_w0.5
185 | ```
186 |
187 | #### For Electricity dataset:
188 |
189 | predict 3
190 | ```
191 | python run_financial.py --dataset_name electricity --window_size 168 --horizon 3 --hidden-size 8 --single_step 1 --stacks 2 --levels 3 --lr 9e-3 --dropout 0 --batch_size 32 --model_name ele_I168_o3_lr9e-3_bs32_dp0_h8_s2l3_w0.5 --groups 321 --num_decoder_layer 2
192 | ```
193 | predict 6
194 | ```
195 | python run_financial.py --dataset_name electricity --window_size 168 --horizon 6 --hidden-size 8 --single_step 1 --stacks 2 --levels 3 --lr 9e-3 --dropout 0 --batch_size 32 --model_name ele_I168_o6_lr9e-3_bs32_dp0_h8_s2l3_w0.5 --groups 321 --num_decoder_layer 3
196 | ```
197 | predict 12
198 | ```
199 | python run_financial.py --dataset_name electricity --window_size 168 --horizon 12 --hidden-size 8 --single_step 1 --stacks 2 --levels 3 --lr 9e-3 --dropout 0 --batch_size 32 --model_name ele_I168_o12_lr9e-3_bs32_dp0_h8_s2l3_w0.5 --groups 321 --num_decoder_layer 3
200 | ```
201 | predict 24
202 | ```
203 | python run_financial.py --dataset_name electricity --window_size 168 --horizon 24 --hidden-size 8 --single_step 1 --stacks 2 --levels 3 --lr 9e-3 --dropout 0 --batch_size 32 --model_name ele_I168_o24_lr9e-3_bs32_dp0_h8_s2l3_w0.5 --groups 321 --num_decoder_layer 3
204 | ```
205 | predict 96
206 | ```
207 | python -u run_financial.py --dataset_name electricity --window_size 96 --horizon 96 --hidden-size 8 --stacks 2 --levels 3 --lr 9e-4 --dropout 0 --batch_size 32 --model_name ele_I96_o96_lr9e-4_bs32_dp0_h8_s2l3_w0.5_n4 --groups 321 --concat_len 0 --normalize 4 --long_term_forecast
208 | ```
209 | predict 192
210 | ```
211 | python -u run_financial.py --dataset_name electricity --window_size 96 --horizon 192 --hidden-size 8 --stacks 2 --levels 3 --lr 9e-4 --dropout 0 --batch_size 32 --model_name ele_I96_o192_lr9e-4_bs32_dp0_h8_s2l3_w0.5_n4 --groups 321 --concat_len 0 --normalize 4 --long_term_forecast
212 | ```
213 | predict 336
214 | ```
215 | python -u run_financial.py --dataset_name electricity --window_size 96 --horizon 336 --hidden-size 8 --stacks 2 --levels 3 --lr 9e-4 --dropout 0 --batch_size 32 --model_name ele_I168_o336_lr9e-4_bs32_dp0_h8_s2l3_w0.5_n4 --groups 321 --concat_len 0 --normalize 4 --long_term_forecast
216 | ```
217 | predict 720
218 | ```
219 | python -u run_financial.py --dataset_name electricity --window_size 96 --horizon 720 --hidden-size 8 --stacks 2 --levels 3 --lr 9e-4 --dropout 0 --batch_size 32 --model_name ele_I168_o24_lr9e-4_bs32_dp0_h8_s2l3_w0.5_n4 --groups 321 --concat_len 0 --normalize 4 --long_term_forecast
220 | ```
221 |
222 | #### For Traffic dataset (warning: 20,000MiB+ memory usage!):
223 |
224 | predict 3
225 | ```
226 | python run_financial.py --dataset_name traffic --window_size 168 --horizon 3 --hidden-size 1 --single_step 1 --stacks 2 --levels 3 --lr 5e-4 --dropout 0.5 --batch_size 16 --model_name traf_I168_o3_lr5e-4_bs16_dp0.5_h1_s2l3_w1.0
227 | ```
228 | predict 6
229 | ```
230 | python run_financial.py --dataset_name traffic --window_size 168 --horizon 6 --hidden-size 2 --single_step 1 --stacks 1 --levels 3 --lr 5e-4 --dropout 0.25 --batch_size 16 --model_name traf_I168_o6_lr5e-4_bs16_dp0.25_h2_s1l3_w1.0
231 | ```
232 | predict 12
233 | ```
234 | python run_financial.py --dataset_name traffic --window_size 168 --horizon 12 --hidden-size 0.5 --single_step 1 --stacks 2 --levels 3 --lr 5e-4 --dropout 0.25 --batch_size 16 --model_name traf_I168_o12_lr5e-4_bs16_dp0.25_h0.5_s2l3_w1.0
235 | ```
236 | predict 24
237 | ```
238 | python run_financial.py --dataset_name traffic --window_size 168 --horizon 24 --hidden-size 2 --single_step 1 --stacks 2 --levels 2 --lr 5e-4 --dropout 0.5 --batch_size 16 --model_name traf_I168_o24_lr5e-4_bs16_dp0.5_h2_s2l2_w1.0
239 | ```
240 | predict 96
241 | ```
242 | python -u run_financial.py --dataset_name traffic --window_size 96 --horizon 96 --hidden-size 2 --stacks 1 --levels 3 --lr 5e-4 --dropout 0.25 --batch_size 16 --model_name traf_I96_o96_lr5e-4_bs16_dp0.25_h2_s1l3_w1.0 --normalize 4 --long_term_forecast
243 | ```
244 | predict 192
245 | ```
246 | python -u run_financial.py --dataset_name traffic --window_size 96 --horizon 192 --hidden-size 1 --stacks 1 --levels 3 --lr 5e-4 --dropout 0.25 --batch_size 16 --model_name traf_I96_o192_lr5e-4_bs16_dp0.25_h2_s1l3_w1.0 --normalize 4 --long_term_forecast
247 | ```
248 | predict 336
249 | ```
250 | python -u run_financial.py --dataset_name traffic --window_size 96 --horizon 336 --hidden-size 1 --stacks 1 --levels 3 --lr 5e-4 --dropout 0.25 --batch_size 16 --model_name traf_I96_o336_lr5e-4_bs16_dp0.25_h2_s1l3_w1.0 --normalize 4 --long_term_forecast
251 | ```
252 | predict 720
253 | ```
254 | python -u run_financial.py --dataset_name traffic --window_size 96 --horizon 720 --hidden-size 1 --stacks 1 --levels 3 --lr 5e-4 --dropout 0.25 --batch_size 16 --model_name traf_I96_o720_lr5e-4_bs16_dp0.25_h2_s1l3_w1.0 --normalize 4 --long_term_forecast
255 | ```
256 | #### For Exchange rate dataset:
257 |
258 | predict 3
259 | ```
260 | python run_financial.py --dataset_name exchange_rate --window_size 168 --horizon 3 --hidden-size 0.125 --lastWeight 0.5 --stacks 1 --levels 3 --lr 5e-3 --dropout 0.5 --batch_size 4 --model_name ex_I168_o3_lr5e-3_bs4_dp0.5_h0.125_s1l3_w0.5 --num_decoder_layer 2 --epochs 150
261 | ```
262 | predict 6
263 | ```
264 | python run_financial.py --dataset_name exchange_rate --window_size 168 --horizon 6 --hidden-size 0.125 --lastWeight 0.5 --stacks 1 --levels 3 --lr 5e-3 --dropout 0.5 --batch_size 4 --model_name ex_I168_o6_lr5e-3_bs4_dp0.5_h0.125_s1l3_w0.5 --num_decoder_layer 2 --epochs 150
265 | ```
266 | predict 12
267 | ```
268 | python run_financial.py --dataset_name exchange_rate --window_size 168 --horizon 12 --hidden-size 0.125 --lastWeight 0.5 --stacks 1 --levels 3 --lr 5e-3 --dropout 0.5 --batch_size 4 --model_name ex_I168_o12_lr5e-3_bs4_dp0.5_h0.125_s1l3_w0.5 --num_decoder_layer 2 --epochs 150
269 | ```
270 | predict 24
271 | ```
272 | python run_financial.py --dataset_name exchange_rate --window_size 168 --horizon 24 --hidden-size 0.125 --lastWeight 0.5 --stacks 1 --levels 3 --lr 7e-3 --dropout 0.5 --batch_size 4 --model_name ex_I168_o24_lr7e-3_bs4_dp0.5_h0.125_s1l3_w0.5 --num_decoder_layer 2 --epochs 150
273 | ```
274 | predict 96
275 | ```
276 | python run_financial.py --dataset_name exchange_rate --epochs 20 --window_size 96 --horizon 96 --hidden-size 0.125 --normalize 3 --lastWeight 0.5 --stacks 1 --levels 3 --lr 5e-5 --dropout 0 --model_name final --num_decoder_layer 2 --long_term_forecast
277 | ```
278 | predict 192
279 | ```
280 | python run_financial.py --dataset_name exchange_rate --epochs 20 --window_size 96 --horizon 192 --hidden-size 0.125 --normalize 3 --lastWeight 0.5 --stacks 1 --levels 3 --lr 5e-5 --dropout 0 --model_name final --num_decoder_layer 2 --long_term_forecast
281 | ```
282 | predict 336
283 | ```
284 | python run_financial.py --dataset_name exchange_rate --epochs 20 --window_size 96 --horizon 336 --hidden-size 0.125 --normalize 3 --lastWeight 0.5 --stacks 1 --levels 3 --lr 5e-5 --dropout 0 --model_name final --num_decoder_layer 2 --long_term_forecast
285 | ```
286 | predict 720
287 | ```
288 | python run_financial.py --dataset_name exchange_rate --epochs 20 --window_size 96 --horizon 720 --hidden-size 0.125 --normalize 3 --lastWeight 0.5 --stacks 1 --levels 3 --lr 5e-5 --dropout 0 --model_name final --num_decoder_layer 2 --long_term_forecast
289 | ```
290 |
291 | ##### Financial Parameter highlights
292 |
293 | | Parameter Name | Description | Parameter in paper | Default |
294 | | -------------- | ------------------------- | ----------------------- | -------------------------------------- |
295 | | dataset_name | Data name | N/A | exchange_rate |
296 | | horizon | Horizon | Horizon | 3 |
297 | | window_size | Look-back window | Look-back window | 168 |
298 | | batch_size | Batch size | batch size | 8 |
299 | | lr | Learning rate | learning rate | 5e-3 |
300 | | hidden-size | hidden expansion | h | 1 |
301 | | levels | SCINet block levels | L | 3 |
302 | | stacks | The number of SCINet block| K | 1 |
303 | | lastweight | Loss weight of the last frame| Loss weight ($\lambda$) | 1.0 |
304 |
305 |
306 | #### For ETTH1 dataset:
307 |
308 | multivariate, out 24
309 | ```
310 | python run_ETTh.py --data ETTh1 --features M --seq_len 48 --label_len 24 --pred_len 24 --hidden-size 4 --stacks 1 --levels 3 --lr 3e-3 --batch_size 8 --dropout 0.5 --model_name etth1_M_I48_O24_lr3e-3_bs8_dp0.5_h4_s1l3
311 | ```
312 | multivariate, out 48
313 | ```
314 | python run_ETTh.py --data ETTh1 --features M --seq_len 96 --label_len 48 --pred_len 48 --hidden-size 4 --stacks 1 --levels 3 --lr 0.009 --batch_size 16 --dropout 0.25 --model_name etth1_M_I96_O48_lr0.009_bs16_dp0.25_h4_s1l3
315 | ```
316 | multivariate, out 168
317 | ```
318 | python run_ETTh.py --data ETTh1 --features M --seq_len 336 --label_len 168 --pred_len 168 --hidden-size 4 --stacks 1 --levels 3 --lr 5e-4 --batch_size 32 --dropout 0.5 --model_name etth1_M_I336_O168_lr5e-4_bs32_dp0.5_h4_s1l3
319 | ```
320 | multivariate, out 336
321 | ```
322 | python run_ETTh.py --data ETTh1 --features M --seq_len 336 --label_len 336 --pred_len 336 --hidden-size 1 --stacks 1 --levels 4 --lr 1e-4 --batch_size 512 --dropout 0.5 --model_name etth1_M_I336_O336_lr1e-4_bs512_dp0.5_h1_s1l4
323 | ```
324 | multivariate, out 720
325 | ```
326 | python run_ETTh.py --data ETTh1 --features M --seq_len 736 --label_len 720 --pred_len 720 --hidden-size 1 --stacks 1 --levels 5 --lr 5e-5 --batch_size 256 --dropout 0.5 --model_name etth1_M_I736_O720_lr5e-5_bs256_dp0.5_h1_s1l5
327 | ```
328 | Univariate, out 24
329 | ```
330 | python run_ETTh.py --data ETTh1 --features S --seq_len 64 --label_len 24 --pred_len 24 --hidden-size 8 --stacks 1 --levels 3 --lr 0.007 --batch_size 64 --dropout 0.25 --model_name etth1_S_I64_O24_lr0.007_bs64_dp0.25_h8_s1l3
331 | ```
332 | Univariate, out 48
333 | ```
334 | python run_ETTh.py --data ETTh1 --features S --seq_len 720 --label_len 48 --pred_len 48 --hidden-size 4 --stacks 1 --levels 4 --lr 0.0001 --batch_size 8 --dropout 0.5 --model_name etth1_S_I720_O48_lr0.0001_bs8_dp0.5_h4_s1l4
335 | ```
336 | Univariate, out 168
337 | ```
338 | python run_ETTh.py --data ETTh1 --features S --seq_len 720 --label_len 168 --pred_len 168 --hidden-size 4 --stacks 1 --levels 4 --lr 5e-5 --batch_size 8 --dropout 0.5 --model_name etth1_S_I720_O168_lr5e-5_bs8_dp0.5_h4_s1l4
339 | ```
340 | Univariate, out 336
341 | ```
342 | python run_ETTh.py --data ETTh1 --features S --seq_len 720 --label_len 336 --pred_len 336 --hidden-size 1 --stacks 1 --levels 4 --lr 1e-3 --batch_size 128 --dropout 0.5 --model_name etth1_S_I720_O336_lr1e-3_bs128_dp0.5_h1_s1l4
343 | ```
344 | Univariate, out 720
345 | ```
346 | python run_ETTh.py --data ETTh1 --features S --seq_len 736 --label_len 720 --pred_len 720 --hidden-size 4 --stacks 1 --levels 5 --lr 1e-4 --batch_size 32 --dropout 0.5 --model_name etth1_S_I736_O720_lr1e-5_bs32_dp0.5_h4_s1l5
347 | ```
348 |
349 | #### For ETTH2 dataset:
350 |
351 | multivariate, out 24
352 | ```
353 | python run_ETTh.py --data ETTh2 --features M --seq_len 48 --label_len 24 --pred_len 24 --hidden-size 8 --stacks 1 --levels 3 --lr 0.007 --batch_size 16 --dropout 0.25 --model_name etth2_M_I48_O24_lr7e-3_bs16_dp0.25_h8_s1l3
354 | ```
355 | multivariate, out 48
356 | ```
357 | python run_ETTh.py --data ETTh2 --features M --seq_len 96 --label_len 48 --pred_len 48 --hidden-size 4 --stacks 1 --levels 4 --lr 0.007 --batch_size 4 --dropout 0.5 --model_name etth2_M_I96_O48_lr7e-3_bs4_dp0.5_h4_s1l4
358 | ```
359 | multivariate, out 168
360 | ```
361 | python run_ETTh.py --data ETTh2 --features M --seq_len 336 --label_len 168 --pred_len 168 --hidden-size 0.5 --stacks 1 --levels 4 --lr 5e-5 --batch_size 16 --dropout 0.5 --model_name etth2_M_I336_O168_lr5e-5_bs16_dp0.5_h0.5_s1l4
362 | ```
363 | multivariate, out 336
364 | ```
365 | python run_ETTh.py --data ETTh2 --features M --seq_len 336 --label_len 336 --pred_len 336 --hidden-size 1 --stacks 1 --levels 4 --lr 5e-5 --batch_size 128 --dropout 0.5 --model_name etth2_M_I336_O336_lr5e-5_bs128_dp0.5_h1_s1l4
366 | ```
367 | multivariate, out 720
368 | ```
369 | python run_ETTh.py --data ETTh2 --features M --seq_len 736 --label_len 720 --pred_len 720 --hidden-size 4 --stacks 1 --levels 5 --lr 1e-5 --batch_size 128 --dropout 0.5 --model_name etth2_M_I736_O720_lr1e-5_bs128_dp0.5_h4_s1l5
370 | ```
371 | Univariate, out 24
372 | ```
373 | python run_ETTh.py --data ETTh2 --features S --seq_len 48 --label_len 24 --pred_len 24 --hidden-size 4 --stacks 1 --levels 3 --lr 0.001 --batch_size 16 --dropout 0 --model_name etth2_S_I48_O24_lr1e-3_bs16_dp0_h4_s1l3
374 | ```
375 | Univariate, out 48
376 | ```
377 | python run_ETTh.py --data ETTh2 --features S --seq_len 96 --label_len 48 --pred_len 48 --hidden-size 4 --stacks 2 --levels 4 --lr 0.001 --batch_size 32 --dropout 0.5 --model_name etth2_S_I96_O48_lr1e-3_bs32_dp0.5_h4_s2l4
378 | ```
379 | Univariate, out 168
380 | ```
381 | python run_ETTh.py --data ETTh2 --features S --seq_len 336 --label_len 168 --pred_len 168 --hidden-size 4 --stacks 1 --levels 3 --lr 1e-4 --batch_size 8 --dropout 0 --model_name etth2_S_I336_O168_lr1e-4_bs8_dp0_h4_s1l3
382 | ```
383 | Univariate, out 336
384 | ```
385 | python run_ETTh.py --data ETTh2 --features S --seq_len 336 --label_len 336 --pred_len 336 --hidden-size 8 --stacks 1 --levels 3 --lr 5e-4 --batch_size 512 --dropout 0.5 --model_name etth2_S_I336_O336_lr5e-4_bs512_dp0.5_h8_s1l3
386 | ```
387 | Univariate, out 720
388 | ```
389 | python run_ETTh.py --data ETTh2 --features S --seq_len 720 --label_len 720 --pred_len 720 --hidden-size 8 --stacks 1 --levels 3 --lr 1e-5 --batch_size 128 --dropout 0.6 --model_name etth2_S_I736_O720_lr1e-5_bs128_dp0.6_h8_s1l3
390 | ```
391 |
392 | #### For ETTM1 dataset:
393 |
394 | multivariate, out 24
395 | ```
396 | python run_ETTh.py --data ETTm1 --features M --seq_len 48 --label_len 24 --pred_len 24 --hidden-size 4 --stacks 1 --levels 3 --lr 0.005 --batch_size 32 --dropout 0.5 --model_name ettm1_M_I48_O24_lr7e-3_bs16_dp0.25_h8_s1l3
397 | ```
398 | multivariate, out 48
399 | ```
400 | python run_ETTh.py --data ETTm1 --features M --seq_len 96 --label_len 48 --pred_len 48 --hidden-size 4 --stacks 2 --levels 4 --lr 0.001 --batch_size 16 --dropout 0.5 --model_name ettm1_M_I96_O48_lr1e-3_bs16_dp0.5_h4_s2l4
401 | ```
402 | multivariate, out 96
403 | ```
404 | python run_ETTh.py --data ETTm1 --features M --seq_len 384 --label_len 96 --pred_len 96 --hidden-size 0.5 --stacks 2 --levels 4 --lr 5e-5 --batch_size 32 --dropout 0.5 --model_name ettm1_M_I384_O96_lr5e-5_bs32_dp0.5_h0.5_s2l4
405 | ```
406 | multivariate, out 288
407 | ```
408 | python run_ETTh.py --data ETTm1 --features M --seq_len 672 --label_len 288 --pred_len 288 --hidden-size 4 --stacks 1 --levels 5 --lr 1e-5 --batch_size 32 --dropout 0.5 --model_name ettm1_M_I672_O288_lr1e-5_bs32_dp0.5_h0.5_s1l5
409 | ```
410 | multivariate, out 672
411 | ```
412 | python run_ETTh.py --data ETTm1 --features M --seq_len 672 --label_len 672 --pred_len 672 --hidden-size 4 --stacks 2 --levels 5 --lr 1e-5 --batch_size 32 --dropout 0.5 --model_name ettm1_M_I672_O672_lr1e-5_bs32_dp0.5_h4_s2l5
413 | ```
414 | Univariate, out 24
415 | ```
416 | python run_ETTh.py --data ETTm1 --features S --seq_len 96 --label_len 24 --pred_len 24 --hidden-size 4 --stacks 1 --levels 4 --lr 0.001 --batch_size 8 --dropout 0 --model_name ettm1_S_I96_O24_lr1e-3_bs8_dp0_h4_s1l4
417 | ```
418 | Univariate, out 48
419 | ```
420 | python run_ETTh.py --data ETTm1 --features S --seq_len 96 --label_len 48 --pred_len 48 --hidden-size 4 --stacks 1 --levels 3 --lr 0.0005 --batch_size 16 --dropout 0 --model_name ettm1_S_I96_O48_lr5e-4_bs16_dp0_h4_s1l3
421 | ```
422 | Univariate, out 96
423 | ```
424 | python run_ETTh.py --data ETTm1 --features S --seq_len 384 --label_len 96 --pred_len 96 --hidden-size 2 --stacks 1 --levels 4 --lr 1e-5 --batch_size 8 --dropout 0 --model_name ettm1_S_I384_O96_lr1e-5_bs8_dp0_h2_s1l4
425 | ```
426 | Univariate, out 288
427 | ```
428 | python run_ETTh.py --data ETTm1 --features S --seq_len 384 --label_len 288 --pred_len 288 --hidden-size 4 --stacks 1 --levels 4 --lr 1e-5 --batch_size 64 --dropout 0 --model_name ettm1_S_I384_O288_lr1e-5_bs64_dp0_h4_s1l4
429 | ```
430 | Univariate, out 672
431 | ```
432 | python run_ETTh.py --data ETTm1 --features S --seq_len 672 --label_len 672 --pred_len 672 --hidden-size 1 --stacks 1 --levels 5 --lr 1e-4 --batch_size 32 --model_name ettm1_S_I672_O672_lr1e-4_bs32_dp0.5_h1_s1l5
433 | ```
434 |
435 |
436 | ##### ETT Parameter highlights
437 |
438 | | Parameter Name | Description | Parameter in paper | Default |
439 | | -------------- | ---------------------------- | ------------------ | -------------------------- |
440 | | root_path | The root path of subdatasets | N/A | './datasets/ETT-data/ETT/' |
441 | | data | Subdataset | N/A | ETTh1 |
442 | | pred_len | Horizon | Horizon | 48 |
443 | | seq_len | Look-back window | Look-back window | 96 |
444 | | batch_size | Batch size | batch size | 32 |
445 | | lr | Learning rate | learning rate | 0.0001 |
446 | | hidden-size | hidden expansion | h | 1 |
447 | | levels | SCINet block levels | L | 3 |
448 | | stacks | The number of SCINet blocks | K | 1 |
449 |
450 | ## Special Constraint
451 |
452 | - Because of the stacked binary down-sampling method that SCINet adopts, the number of levels (L) and look-back window (W) size should satisfy:)
453 |
454 |
455 |
456 | (The formula might not be shown in the darkmode Github)
457 |
458 | ## References
459 |
460 | [1] [Reversible Instance Normalization for Accurate Time-Series Forecasting against Distribution Shift](https://openreview.net/forum?id=cGDAkQo1C0p)
461 |
462 | ## Contact
463 |
464 | If you have any questions, feel free to contact us or post github issues. Pull requests are highly welcomed!
465 |
466 | ```
467 |
468 | Minhao Liu: mhliu@cse.cuhk.edu.hk
469 |
470 | Ailing Zeng: alzeng@cse.cuhk.edu.hk
471 |
472 | Zhijian Xu: zjxu21@cse.cuhk.edu.hk
473 |
474 | ```
475 |
476 | ## Acknowledgements
477 |
478 | Thank you all for your attention to our work!
479 |
480 | This code uses ([Informer](https://github.com/zhouhaoyi/Informer2020), [MTGNN](https://github.com/nnzhan/MTGNN), [StemGNN](https://github.com/microsoft/StemGNN)) as baseline methods for comparison.
481 |
--------------------------------------------------------------------------------
/data_process/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cure-lab/SCINet/02e6b0af2d58243de09aaa1eac3840237b659847/data_process/__init__.py
--------------------------------------------------------------------------------
/data_process/etth_data_loader.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 | import pandas as pd
4 |
5 | import torch
6 | from torch.utils.data import Dataset, DataLoader
7 | # from sklearn.preprocessing import StandardScaler
8 |
9 | from utils.tools import StandardScaler
10 | from utils.timefeatures import time_features
11 |
12 | import warnings
13 | warnings.filterwarnings('ignore')
14 |
15 | class Dataset_ETT_hour(Dataset):
16 | def __init__(self, root_path, flag='train', size=None,
17 | features='S', data_path='ETTh1.csv',
18 | target='OT', scale=True, inverse=False, timeenc=0, freq='h', cols=None):
19 | # size [seq_len, label_len, pred_len]
20 | # info
21 | if size == None:
22 | self.seq_len = 24*4*4
23 | self.label_len = 24*4
24 | self.pred_len = 24*4
25 | else:
26 | self.seq_len = size[0]
27 | self.label_len = size[1]
28 | self.pred_len = size[2]
29 | # init
30 | assert flag in ['train', 'test', 'val']
31 | type_map = {'train':0, 'val':1, 'test':2}
32 | self.set_type = type_map[flag]
33 |
34 | self.features = features
35 | self.target = target
36 | self.scale = scale
37 | self.inverse = inverse
38 | self.timeenc = timeenc
39 | self.freq = freq
40 |
41 | self.root_path = root_path
42 | self.data_path = data_path
43 | self.__read_data__()
44 |
45 | def __read_data__(self):
46 | self.scaler = StandardScaler()
47 | df_raw = pd.read_csv(os.path.join(self.root_path,
48 | self.data_path))
49 |
50 | border1s = [0, 12*30*24 - self.seq_len, 12*30*24+4*30*24 - self.seq_len]
51 | border2s = [12*30*24, 12*30*24+4*30*24, 12*30*24+8*30*24]
52 | border1 = border1s[self.set_type]
53 | border2 = border2s[self.set_type]
54 |
55 | if self.features=='M' or self.features=='MS':
56 | cols_data = df_raw.columns[1:]
57 | df_data = df_raw[cols_data]
58 | elif self.features=='S':
59 | df_data = df_raw[[self.target]]
60 |
61 | if self.scale:
62 | train_data = df_data[border1s[0]:border2s[0]]
63 | self.scaler.fit(train_data.values)
64 | data = self.scaler.transform(df_data.values)
65 | # data = self.scaler.fit_transform(df_data.values)
66 | else:
67 | data = df_data.values
68 |
69 | df_stamp = df_raw[['date']][border1:border2]
70 | df_stamp['date'] = pd.to_datetime(df_stamp.date)
71 | data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq)
72 |
73 | self.data_x = data[border1:border2]
74 | if self.inverse:
75 | self.data_y = df_data.values[border1:border2]
76 | else:
77 | self.data_y = data[border1:border2]
78 | self.data_stamp = data_stamp
79 |
80 | def __getitem__(self, index):
81 | s_begin = index
82 | s_end = s_begin + self.seq_len
83 | r_begin = s_end - self.label_len
84 | r_end = r_begin + self.label_len + self.pred_len
85 |
86 | seq_x = self.data_x[s_begin:s_end] # 0 - 24
87 | seq_y = self.data_y[r_begin:r_end] # 0 - 48
88 | seq_x_mark = self.data_stamp[s_begin:s_end]
89 | seq_y_mark = self.data_stamp[r_begin:r_end]
90 |
91 | return seq_x, seq_y, seq_x_mark, seq_y_mark
92 |
93 | def __len__(self):
94 | return len(self.data_x) - self.seq_len- self.pred_len + 1
95 |
96 | def inverse_transform(self, data):
97 | return self.scaler.inverse_transform(data)
98 |
99 | class Dataset_ETT_minute(Dataset):
100 | def __init__(self, root_path, flag='train', size=None,
101 | features='S', data_path='ETTm1.csv',
102 | target='OT', scale=True, inverse=False, timeenc=0, freq='t', cols=None):
103 | # size [seq_len, label_len, pred_len]
104 | # info
105 | if size == None:
106 | self.seq_len = 24*4*4
107 | self.label_len = 24*4
108 | self.pred_len = 24*4
109 | else:
110 | self.seq_len = size[0]
111 | self.label_len = size[1]
112 | self.pred_len = size[2]
113 | # init
114 | assert flag in ['train', 'test', 'val']
115 | type_map = {'train':0, 'val':1, 'test':2}
116 | self.set_type = type_map[flag]
117 |
118 | self.features = features
119 | self.target = target
120 | self.scale = scale
121 | self.inverse = inverse
122 | self.timeenc = timeenc
123 | self.freq = freq
124 |
125 | self.root_path = root_path
126 | self.data_path = data_path
127 | self.__read_data__()
128 |
129 | def __read_data__(self):
130 | self.scaler = StandardScaler()
131 | df_raw = pd.read_csv(os.path.join(self.root_path,
132 | self.data_path))
133 |
134 | border1s = [0, 12*30*24*4 - self.seq_len, 12*30*24*4+4*30*24*4 - self.seq_len]
135 | border2s = [12*30*24*4, 12*30*24*4+4*30*24*4, 12*30*24*4+8*30*24*4]
136 | border1 = border1s[self.set_type]
137 | border2 = border2s[self.set_type]
138 |
139 | if self.features=='M' or self.features=='MS':
140 | cols_data = df_raw.columns[1:]
141 | df_data = df_raw[cols_data]
142 | elif self.features=='S':
143 | df_data = df_raw[[self.target]]
144 |
145 | if self.scale:
146 | train_data = df_data[border1s[0]:border2s[0]]
147 | self.scaler.fit(train_data.values)
148 | data = self.scaler.transform(df_data.values)
149 | else:
150 | data = df_data.values
151 |
152 | df_stamp = df_raw[['date']][border1:border2]
153 | df_stamp['date'] = pd.to_datetime(df_stamp.date)
154 | data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq)
155 |
156 | self.data_x = data[border1:border2]
157 | if self.inverse:
158 | self.data_y = df_data.values[border1:border2]
159 | else:
160 | self.data_y = data[border1:border2]
161 | self.data_stamp = data_stamp
162 |
163 | def __getitem__(self, index):
164 | s_begin = index
165 | s_end = s_begin + self.seq_len
166 | r_begin = s_end - self.label_len
167 | r_end = r_begin + self.label_len + self.pred_len
168 |
169 | seq_x = self.data_x[s_begin:s_end]
170 | seq_y = self.data_y[r_begin:r_end]
171 | seq_x_mark = self.data_stamp[s_begin:s_end]
172 | seq_y_mark = self.data_stamp[r_begin:r_end]
173 |
174 | return seq_x, seq_y, seq_x_mark, seq_y_mark
175 |
176 | def __len__(self):
177 | return len(self.data_x) - self.seq_len - self.pred_len + 1
178 |
179 | def inverse_transform(self, data):
180 | return self.scaler.inverse_transform(data)
181 |
182 |
183 | class Dataset_Custom(Dataset):
184 | def __init__(self, root_path, flag='train', size=None,
185 | features='S', data_path='ETTh1.csv',
186 | target='OT', scale=True, inverse=False, timeenc=0, freq='h', cols=None):
187 | # size [seq_len, label_len, pred_len]
188 | # info
189 | if size == None:
190 | self.seq_len = 24*4*4
191 | self.label_len = 24*4
192 | self.pred_len = 24*4
193 | else:
194 | self.seq_len = size[0]
195 | self.label_len = size[1]
196 | self.pred_len = size[2]
197 | # init
198 | assert flag in ['train', 'test', 'val']
199 | type_map = {'train':0, 'val':1, 'test':2}
200 | self.set_type = type_map[flag]
201 |
202 | self.features = features
203 | self.target = target
204 | self.scale = scale
205 | self.inverse = inverse
206 | self.timeenc = timeenc
207 | self.freq = freq
208 | self.cols=cols
209 | self.root_path = root_path
210 | self.data_path = data_path
211 | self.__read_data__()
212 |
213 | def __read_data__(self):
214 | self.scaler = StandardScaler()
215 | df_raw = pd.read_csv(os.path.join(self.root_path,
216 | self.data_path))
217 | '''
218 | df_raw.columns: ['date', ...(other features), target feature]
219 | '''
220 | # cols = list(df_raw.columns);
221 | if self.cols:
222 | cols=self.cols.copy()
223 | cols.remove(self.target)
224 | else:
225 | cols = list(df_raw.columns); cols.remove(self.target); cols.remove('date')
226 | df_raw = df_raw[['date']+cols+[self.target]]
227 |
228 | num_train = int(len(df_raw)*0.7)
229 | num_test = int(len(df_raw)*0.2)
230 | num_vali = len(df_raw) - num_train - num_test
231 | border1s = [0, num_train-self.seq_len, len(df_raw)-num_test-self.seq_len]
232 | border2s = [num_train, num_train+num_vali, len(df_raw)]
233 | border1 = border1s[self.set_type]
234 | border2 = border2s[self.set_type]
235 |
236 | if self.features=='M' or self.features=='MS':
237 | cols_data = df_raw.columns[1:]
238 | df_data = df_raw[cols_data]
239 | elif self.features=='S':
240 | df_data = df_raw[[self.target]]
241 |
242 | if self.scale:
243 | train_data = df_data[border1s[0]:border2s[0]]
244 | self.scaler.fit(train_data.values)
245 | data = self.scaler.transform(df_data.values)
246 | else:
247 | data = df_data.values
248 |
249 | df_stamp = df_raw[['date']][border1:border2]
250 | df_stamp['date'] = pd.to_datetime(df_stamp.date)
251 | data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq)
252 |
253 | self.data_x = data[border1:border2]
254 | if self.inverse:
255 | self.data_y = df_data.values[border1:border2]
256 | else:
257 | self.data_y = data[border1:border2]
258 | self.data_stamp = data_stamp
259 |
260 | def __getitem__(self, index):
261 | s_begin = index
262 | s_end = s_begin + self.seq_len
263 | r_begin = s_end - self.label_len
264 | r_end = r_begin + self.label_len + self.pred_len
265 |
266 | seq_x = self.data_x[s_begin:s_end]
267 | seq_y = self.data_y[r_begin:r_end]
268 | seq_x_mark = self.data_stamp[s_begin:s_end]
269 | seq_y_mark = self.data_stamp[r_begin:r_end]
270 |
271 | return seq_x, seq_y, seq_x_mark, seq_y_mark
272 |
273 | def __len__(self):
274 | return len(self.data_x) - self.seq_len- self.pred_len + 1
275 |
276 | def inverse_transform(self, data):
277 | return self.scaler.inverse_transform(data)
278 |
279 | class Dataset_Pred(Dataset):
280 | def __init__(self, root_path, flag='pred', size=None,
281 | features='S', data_path='ETTh1.csv',
282 | target='OT', scale=True, inverse=False, timeenc=0, freq='15min', cols=None):
283 | # size [seq_len, label_len, pred_len]
284 | # info
285 | if size == None:
286 | self.seq_len = 24*4*4
287 | self.label_len = 24*4
288 | self.pred_len = 24*4
289 | else:
290 | self.seq_len = size[0]
291 | self.label_len = size[1]
292 | self.pred_len = size[2]
293 | # init
294 | assert flag in ['pred']
295 |
296 | self.features = features
297 | self.target = target
298 | self.scale = scale
299 | self.inverse = inverse
300 | self.timeenc = timeenc
301 | self.freq = freq
302 | self.cols=cols
303 | self.root_path = root_path
304 | self.data_path = data_path
305 | self.__read_data__()
306 |
307 | def __read_data__(self):
308 | self.scaler = StandardScaler()
309 | df_raw = pd.read_csv(os.path.join(self.root_path,
310 | self.data_path))
311 | '''
312 | df_raw.columns: ['date', ...(other features), target feature]
313 | '''
314 | if self.cols:
315 | cols=self.cols.copy()
316 | cols.remove(self.target)
317 | else:
318 | cols = list(df_raw.columns); cols.remove(self.target); cols.remove('date')
319 | df_raw = df_raw[['date']+cols+[self.target]]
320 |
321 | border1 = len(df_raw)-self.seq_len
322 | border2 = len(df_raw)
323 |
324 | if self.features=='M' or self.features=='MS':
325 | cols_data = df_raw.columns[1:]
326 | df_data = df_raw[cols_data]
327 | elif self.features=='S':
328 | df_data = df_raw[[self.target]]
329 |
330 | if self.scale:
331 | self.scaler.fit(df_data.values)
332 | data = self.scaler.transform(df_data.values)
333 | else:
334 | data = df_data.values
335 |
336 | tmp_stamp = df_raw[['date']][border1:border2]
337 | tmp_stamp['date'] = pd.to_datetime(tmp_stamp.date)
338 | pred_dates = pd.date_range(tmp_stamp.date.values[-1], periods=self.pred_len+1, freq=self.freq)
339 |
340 | df_stamp = pd.DataFrame(columns = ['date'])
341 | df_stamp.date = list(tmp_stamp.date.values) + list(pred_dates[1:])
342 | data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq[-1:])
343 |
344 | self.data_x = data[border1:border2]
345 | if self.inverse:
346 | self.data_y = df_data.values[border1:border2]
347 | else:
348 | self.data_y = data[border1:border2]
349 | self.data_stamp = data_stamp
350 |
351 | def __getitem__(self, index):
352 | s_begin = index
353 | s_end = s_begin + self.seq_len
354 | r_begin = s_end - self.label_len
355 | r_end = r_begin + self.label_len + self.pred_len
356 |
357 | seq_x = self.data_x[s_begin:s_end]
358 | seq_y = self.data_y[r_begin:r_begin+self.label_len]
359 | seq_x_mark = self.data_stamp[s_begin:s_end]
360 | seq_y_mark = self.data_stamp[r_begin:r_end]
361 |
362 | return seq_x, seq_y, seq_x_mark, seq_y_mark
363 |
364 | def __len__(self):
365 | return len(self.data_x) - self.seq_len + 1
366 |
367 | def inverse_transform(self, data):
368 | return self.scaler.inverse_transform(data)
369 |
--------------------------------------------------------------------------------
/data_process/financial_dataloader.py:
--------------------------------------------------------------------------------
1 | from torch.utils.data import DataLoader
2 | from torch.autograd import Variable
3 | import torch
4 |
5 | import numpy as np
6 | def normal_std(x):
7 | return x.std() * np.sqrt((len(x) - 1.) / (len(x)))
8 |
9 | class DataLoaderH(object):
10 | # train and valid is the ratio of training set and validation set. test = 1 - train - valid
11 | def __init__(self, file_name, train, valid, horizon, window, normalize=2):
12 | self.P = window
13 | self.h = horizon
14 | fin = open(file_name)
15 | self.rawdat = np.loadtxt(fin, delimiter=',')
16 | self.dat = np.zeros(self.rawdat.shape)
17 |
18 | self.n, self.m = self.dat.shape
19 | self.normalize = 2
20 | self.scale = np.ones(self.m)
21 | self.bias = np.zeros(self.m)
22 | self._normalized(normalize)
23 | self._split(int(train * self.n), int((train + valid) * self.n), self.n)
24 |
25 | self.scale = torch.from_numpy(self.scale).float()
26 | self.bias = torch.from_numpy(self.bias).float()
27 | tmp = self.test[1] * self.scale.expand(self.test[1].size(0), self.h, self.m)
28 |
29 | self.scale = self.scale.cuda()
30 | self.scale = Variable(self.scale)
31 | self.bias = self.bias.cuda()
32 | self.bias = Variable(self.bias)
33 |
34 | tmp = tmp[:, -1, :].squeeze()
35 | self.rse = normal_std(tmp)
36 | self.rae = torch.mean(torch.abs(tmp - torch.mean(tmp)))
37 |
38 | def _normalized(self, normalize):
39 |
40 | if (normalize == 0):
41 | self.dat = self.rawdat
42 |
43 | if (normalize == 1):
44 | # normalized by the maximum value of entire matrix.
45 | self.dat = self.rawdat / np.max(self.rawdat)
46 |
47 | if (normalize == 2):
48 | # normlized by the maximum value of each row (sensor).
49 | for i in range(self.m):
50 | self.scale[i] = np.max(np.abs(self.rawdat[:, i]))
51 | self.dat[:, i] = self.rawdat[:, i] / np.max(np.abs(self.rawdat[:, i]))
52 |
53 | if (normalize == 3):
54 | # normlized by the mean/std value of each row (sensor).
55 | for i in range(self.m):
56 | self.scale[i] = np.std(self.rawdat[:, i]) #std
57 | self.bias[i] = np.mean(self.rawdat[:, i])
58 | self.dat[:, i] = (self.rawdat[:, i] - self.bias[i]) / self.scale[i]
59 |
60 | if (normalize == 4):
61 | # normlized by the mean/std value of each row (sensor).
62 | for i in range(self.m):
63 | self.scale[i] = np.std(self.rawdat[:int(self.dat.shape[0]*0.7), i]) #std
64 | self.bias[i] = np.mean(self.rawdat[:int(self.dat.shape[0]*0.7), i])
65 | self.dat[:, i] = (self.rawdat[:, i] - self.bias[i]) / self.scale[i]
66 |
67 | def _split(self, train, valid, test):
68 |
69 | train_set = range(self.P + self.h - 1, train)
70 | valid_set = range(train, valid)
71 | test_set = range(valid, self.n)
72 | self.train = self._batchify(train_set, self.h)
73 | self.valid = self._batchify(valid_set, self.h)
74 | self.test = self._batchify(test_set, self.h)
75 |
76 | def _batchify(self, idx_set, horizon):
77 | n = len(idx_set)
78 | X = torch.zeros((n, self.P, self.m))
79 | Y = torch.zeros((n, self.h, self.m))
80 | for i in range(n):
81 | end = idx_set[i] - self.h + 1
82 | start = end - self.P
83 | X[i, :, :] = torch.from_numpy(self.dat[start:end, :])
84 | # Y[i, :, :] = torch.from_numpy(self.dat[idx_set[i] - self.h:idx_set[i], :])
85 | Y[i, :, :] = torch.from_numpy(self.dat[end:(idx_set[i]+1), :])
86 |
87 |
88 | return [X, Y]
89 |
90 | def get_batches(self, inputs, targets, batch_size, shuffle=True):
91 | length = len(inputs)
92 | if shuffle:
93 | index = torch.randperm(length)
94 | else:
95 | index = torch.LongTensor(range(length))
96 | start_idx = 0
97 | while (start_idx < length):
98 | end_idx = min(length, start_idx + batch_size)
99 | excerpt = index[start_idx:end_idx]
100 | X = inputs[excerpt]
101 | Y = targets[excerpt]
102 | X = X.cuda()
103 | Y = Y.cuda()
104 | yield Variable(X), Variable(Y)
105 | start_idx += batch_size
106 |
107 |
--------------------------------------------------------------------------------
/data_process/forecast_dataloader.py:
--------------------------------------------------------------------------------
1 | import torch.utils.data as torch_data
2 | import numpy as np
3 | import torch
4 | import pandas as pd
5 |
6 |
7 | def normalized(data, normalize_method, norm_statistic=None):
8 | if normalize_method == 'min_max':
9 | if not norm_statistic:
10 | norm_statistic = dict(max=np.max(data, axis=0), min=np.min(data, axis=0))
11 | scale = norm_statistic['max'] - norm_statistic['min'] + 1e-5
12 | data = (data - norm_statistic['min']) / scale
13 | data = np.clip(data, 0.0, 1.0)
14 | elif normalize_method == 'z_score':
15 | if not norm_statistic:
16 | norm_statistic = dict(mean=np.mean(data, axis=0), std=np.std(data, axis=0))
17 | mean = norm_statistic['mean']
18 | std = norm_statistic['std']
19 | std = [1 if i == 0 else i for i in std]
20 | data = (data - mean) / std
21 | norm_statistic['std'] = std
22 | return data, norm_statistic
23 |
24 |
25 | def de_normalized(data, normalize_method, norm_statistic):
26 | if normalize_method == 'min_max':
27 | if not norm_statistic:
28 | norm_statistic = dict(max=np.max(data, axis=0), min=np.min(data, axis=0))
29 | scale = norm_statistic['max'] - norm_statistic['min'] + 1e-8
30 | data = data * scale + norm_statistic['min']
31 | elif normalize_method == 'z_score':
32 | if not norm_statistic:
33 | norm_statistic = dict(mean=np.mean(data, axis=0), std=np.std(data, axis=0))
34 | mean = norm_statistic['mean']
35 | std = norm_statistic['std']
36 | std = [1 if i == 0 else i for i in std]
37 | data = data * std + mean
38 | return data
39 |
40 |
41 | class ForecastDataset(torch_data.Dataset):
42 | def __init__(self, df, window_size, horizon, normalize_method=None, norm_statistic=None, interval=1):
43 | self.window_size = window_size # 12
44 | self.interval = interval #1
45 | self.horizon = horizon
46 | self.normalize_method = normalize_method
47 | self.norm_statistic = norm_statistic
48 | df = pd.DataFrame(df)
49 | df = df.fillna(method='ffill', limit=len(df)).fillna(method='bfill', limit=len(df)).values
50 | self.data = df
51 | self.df_length = len(df)
52 | self.x_end_idx = self.get_x_end_idx()
53 | if normalize_method:
54 | self.data, _ = normalized(self.data, normalize_method, norm_statistic)
55 |
56 | def __getitem__(self, index):
57 | hi = self.x_end_idx[index] #12
58 | lo = hi - self.window_size #0
59 | train_data = self.data[lo: hi] #0:12
60 | target_data = self.data[hi:hi + self.horizon] #12:24
61 | x = torch.from_numpy(train_data).type(torch.float)
62 | y = torch.from_numpy(target_data).type(torch.float)
63 | return x, y
64 |
65 | def __len__(self):
66 | return len(self.x_end_idx)
67 |
68 | def get_x_end_idx(self):
69 | # each element `hi` in `x_index_set` is an upper bound for get training data
70 | # training data range: [lo, hi), lo = hi - window_size
71 | x_index_set = range(self.window_size, self.df_length - self.horizon + 1)
72 | x_end_idx = [x_index_set[j * self.interval] for j in range((len(x_index_set)) // self.interval)]
73 | return x_end_idx
74 |
75 | class ForecastTestDataset(torch_data.Dataset):
76 | def __init__(self, df, window_size, horizon, normalize_method=None, norm_statistic=None, interval=1):
77 | self.window_size = window_size # 12
78 | self.interval = interval #1
79 | self.horizon = horizon
80 | self.normalize_method = normalize_method
81 | self.norm_statistic = norm_statistic
82 | df = pd.DataFrame(df)
83 | df = df.fillna(method='ffill', limit=len(df)).fillna(method='bfill', limit=len(df)).values
84 | self.data = df
85 | self.df_length = len(df)
86 | self.x_end_idx = self.get_x_end_idx()
87 | if normalize_method:
88 | self.data, _ = normalized(self.data, normalize_method, norm_statistic)
89 |
90 | def __getitem__(self, index):
91 | hi = self.x_end_idx[index] #12
92 | lo = hi - self.window_size #0
93 | train_data = self.data[lo: hi] #0:12
94 | target_data = self.data[hi:hi + self.horizon] #12:24
95 | x = torch.from_numpy(train_data).type(torch.float)
96 | y = torch.from_numpy(target_data).type(torch.float)
97 | return x, y
98 |
99 | def __len__(self):
100 | return len(self.x_end_idx)
101 |
102 | def get_x_end_idx(self):
103 | # each element `hi` in `x_index_set` is an upper bound for get training data
104 | # training data range: [lo, hi), lo = hi - window_size
105 | x_index_set = range(self.window_size, self.df_length - self.horizon + 1)
106 | x_end_idx = [x_index_set[j * 12] for j in range((len(x_index_set)) // 12)]
107 | return x_end_idx
108 |
--------------------------------------------------------------------------------
/docs/RevIN.md:
--------------------------------------------------------------------------------
1 | # RevIN addon
2 |
3 | Reversible Instance Normalization (RevIN) is a newly proposed lightweight normalization method on ICLR2022. This method is specially designed as a countermeasure of the distribution shift problem in time series forecasting. We have verified that as the authors claimed, their method can largely boost the performance of SCINet on ETT dataset.
4 |
5 | Thus, we decide to merge their marvelous solution to our repository! A lot of thanks to the authors.
6 |
7 | [](https://openreview.net/forum?id=cGDAkQo1C0p)
8 |
9 | ## Experiment result on ETT
10 |
11 | TODO:
12 |
13 | ## When and How to Enable RevIN
14 |
15 | TODO:
16 |
17 | ## Simple Tool to Check Your Dataset
18 |
19 | TODO:
--------------------------------------------------------------------------------
/experiments/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cure-lab/SCINet/02e6b0af2d58243de09aaa1eac3840237b659847/experiments/__init__.py
--------------------------------------------------------------------------------
/experiments/exp_ETTh.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 |
4 | import numpy as np
5 |
6 | import torch
7 | import torch.nn as nn
8 | from torch import optim
9 | from torch.utils.data import DataLoader
10 | from torch.utils.tensorboard import SummaryWriter
11 | import warnings
12 | warnings.filterwarnings('ignore')
13 | from data_process.etth_data_loader import Dataset_ETT_hour, Dataset_ETT_minute, Dataset_Custom, Dataset_Pred
14 | from experiments.exp_basic import Exp_Basic
15 | from utils.tools import EarlyStopping, adjust_learning_rate, save_model, load_model
16 | from metrics.ETTh_metrics import metric
17 | from models.SCINet import SCINet
18 | from models.SCINet_decompose import SCINet_decompose
19 |
20 | class Exp_ETTh(Exp_Basic):
21 | def __init__(self, args):
22 | super(Exp_ETTh, self).__init__(args)
23 |
24 | def _build_model(self):
25 |
26 | if self.args.features == 'S':
27 | in_dim = 1
28 | elif self.args.features == 'M':
29 | in_dim = 7
30 | else:
31 | print('Error!')
32 |
33 | if self.args.decompose:
34 | model = SCINet_decompose(
35 | output_len=self.args.pred_len,
36 | input_len=self.args.seq_len,
37 | input_dim= in_dim,
38 | hid_size = self.args.hidden_size,
39 | num_stacks=self.args.stacks,
40 | num_levels=self.args.levels,
41 | num_decoder_layer=self.args.num_decoder_layer,
42 | concat_len = self.args.concat_len,
43 | groups = self.args.groups,
44 | kernel = self.args.kernel,
45 | dropout = self.args.dropout,
46 | single_step_output_One = self.args.single_step_output_One,
47 | positionalE = self.args.positionalEcoding,
48 | modified = True,
49 | RIN=self.args.RIN)
50 | else:
51 | model = SCINet(
52 | output_len=self.args.pred_len,
53 | input_len=self.args.seq_len,
54 | input_dim= in_dim,
55 | hid_size = self.args.hidden_size,
56 | num_stacks=self.args.stacks,
57 | num_levels=self.args.levels,
58 | num_decoder_layer=self.args.num_decoder_layer,
59 | concat_len = self.args.concat_len,
60 | groups = self.args.groups,
61 | kernel = self.args.kernel,
62 | dropout = self.args.dropout,
63 | single_step_output_One = self.args.single_step_output_One,
64 | positionalE = self.args.positionalEcoding,
65 | modified = True,
66 | RIN=self.args.RIN)
67 | print(model)
68 | return model.double()
69 |
70 | def _get_data(self, flag):
71 | args = self.args
72 |
73 | data_dict = {
74 | 'ETTh1':Dataset_ETT_hour,
75 | 'ETTh2':Dataset_ETT_hour,
76 | 'ETTm1':Dataset_ETT_minute,
77 | 'ETTm2':Dataset_ETT_minute,
78 | 'WTH':Dataset_Custom,
79 | 'ECL':Dataset_Custom,
80 | 'Solar':Dataset_Custom,
81 | 'custom':Dataset_Custom,
82 | }
83 | Data = data_dict[self.args.data]
84 | timeenc = 0 if args.embed!='timeF' else 1
85 |
86 | if flag == 'test':
87 | shuffle_flag = False; drop_last = True; batch_size = args.batch_size; freq=args.freq
88 | elif flag=='pred':
89 | shuffle_flag = False; drop_last = False; batch_size = 1; freq=args.detail_freq
90 | Data = Dataset_Pred
91 | else:
92 | shuffle_flag = True; drop_last = True; batch_size = args.batch_size; freq=args.freq
93 | data_set = Data(
94 | root_path=args.root_path,
95 | data_path=args.data_path,
96 | flag=flag,
97 | size=[args.seq_len, args.label_len, args.pred_len],
98 | features=args.features,
99 | target=args.target,
100 | inverse=args.inverse,
101 | timeenc=timeenc,
102 | freq=freq,
103 | cols=args.cols
104 | )
105 | print(flag, len(data_set))
106 | data_loader = DataLoader(
107 | data_set,
108 | batch_size=batch_size,
109 | shuffle=shuffle_flag,
110 | num_workers=args.num_workers,
111 | drop_last=drop_last)
112 |
113 | return data_set, data_loader
114 |
115 | def _select_optimizer(self):
116 | model_optim = optim.Adam(self.model.parameters(), lr=self.args.lr)
117 | return model_optim
118 |
119 | def _select_criterion(self, losstype):
120 | if losstype == "mse":
121 | criterion = nn.MSELoss()
122 | elif losstype == "mae":
123 | criterion = nn.L1Loss()
124 | else:
125 | criterion = nn.L1Loss()
126 | return criterion
127 |
128 | def valid(self, valid_data, valid_loader, criterion):
129 | self.model.eval()
130 | total_loss = []
131 |
132 | preds = []
133 | trues = []
134 | mids = []
135 | pred_scales = []
136 | true_scales = []
137 | mid_scales = []
138 |
139 | for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(valid_loader):
140 | pred, pred_scale, mid, mid_scale, true, true_scale = self._process_one_batch_SCINet(
141 | valid_data, batch_x, batch_y)
142 |
143 | if self.args.stacks == 1:
144 | loss = criterion(pred.detach().cpu(), true.detach().cpu())
145 |
146 | preds.append(pred.detach().cpu().numpy())
147 | trues.append(true.detach().cpu().numpy())
148 | pred_scales.append(pred_scale.detach().cpu().numpy())
149 | true_scales.append(true_scale.detach().cpu().numpy())
150 |
151 | elif self.args.stacks == 2:
152 | loss = criterion(pred.detach().cpu(), true.detach().cpu()) + criterion(mid.detach().cpu(), true.detach().cpu())
153 |
154 | preds.append(pred.detach().cpu().numpy())
155 | trues.append(true.detach().cpu().numpy())
156 | mids.append(mid.detach().cpu().numpy())
157 | pred_scales.append(pred_scale.detach().cpu().numpy())
158 | mid_scales.append(mid_scale.detach().cpu().numpy())
159 | true_scales.append(true_scale.detach().cpu().numpy())
160 |
161 | else:
162 | print('Error!')
163 |
164 | total_loss.append(loss)
165 | total_loss = np.average(total_loss)
166 |
167 | if self.args.stacks == 1:
168 | preds = np.array(preds)
169 | trues = np.array(trues)
170 | pred_scales = np.array(pred_scales)
171 | true_scales = np.array(true_scales)
172 |
173 | preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])
174 | trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1])
175 | true_scales = true_scales.reshape(-1, true_scales.shape[-2], true_scales.shape[-1])
176 | pred_scales = pred_scales.reshape(-1, pred_scales.shape[-2], pred_scales.shape[-1])
177 |
178 | mae, mse, rmse, mape, mspe, corr = metric(preds, trues)
179 | maes, mses, rmses, mapes, mspes, corrs = metric(pred_scales, true_scales)
180 | print('normed mse:{:.4f}, mae:{:.4f}, rmse:{:.4f}, mape:{:.4f}, mspe:{:.4f}, corr:{:.4f}'.format(mse, mae, rmse, mape, mspe, corr))
181 | print('denormed mse:{:.4f}, mae:{:.4f}, rmse:{:.4f}, mape:{:.4f}, mspe:{:.4f}, corr:{:.4f}'.format(mses, maes, rmses, mapes, mspes, corrs))
182 | elif self.args.stacks == 2:
183 | preds = np.array(preds)
184 | trues = np.array(trues)
185 | mids = np.array(mids)
186 | pred_scales = np.array(pred_scales)
187 | true_scales = np.array(true_scales)
188 | mid_scales = np.array(mid_scales)
189 |
190 | preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])
191 | trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1])
192 | mids = mids.reshape(-1, mids.shape[-2], mids.shape[-1])
193 | true_scales = true_scales.reshape(-1, true_scales.shape[-2], true_scales.shape[-1])
194 | pred_scales = pred_scales.reshape(-1, pred_scales.shape[-2], pred_scales.shape[-1])
195 | mid_scales = mid_scales.reshape(-1, mid_scales.shape[-2], mid_scales.shape[-1])
196 | # print('test shape:', preds.shape, mids.shape, trues.shape)
197 |
198 | mae, mse, rmse, mape, mspe, corr = metric(mids, trues)
199 | maes, mses, rmses, mapes, mspes, corrs = metric(mid_scales, true_scales)
200 | print('mid --> normed mse:{:.4f}, mae:{:.4f}, rmse:{:.4f}, mape:{:.4f}, mspe:{:.4f}, corr:{:.4f}'.format(mse, mae, rmse, mape, mspe, corr))
201 | print('mid --> denormed mse:{:.4f}, mae:{:.4f}, rmse:{:.4f}, mape:{:.4f}, mspe:{:.4f}, corr:{:.4f}'.format(mses, maes, rmses, mapes, mspes, corrs))
202 |
203 | mae, mse, rmse, mape, mspe, corr = metric(preds, trues)
204 | maes, mses, rmses, mapes, mspes, corrs = metric(pred_scales, true_scales)
205 | print('final --> normed mse:{:.4f}, mae:{:.4f}, rmse:{:.4f}, mape:{:.4f}, mspe:{:.4f}, corr:{:.4f}'.format(mse, mae, rmse, mape, mspe, corr))
206 | print('final --> denormed mse:{:.4f}, mae:{:.4f}, rmse:{:.4f}, mape:{:.4f}, mspe:{:.4f}, corr:{:.4f}'.format(mses, maes, rmses, mapes, mspes, corrs))
207 | else:
208 | print('Error!')
209 |
210 | return total_loss
211 |
212 | def train(self, setting):
213 | train_data, train_loader = self._get_data(flag = 'train')
214 | valid_data, valid_loader = self._get_data(flag = 'val')
215 | test_data, test_loader = self._get_data(flag = 'test')
216 | path = os.path.join(self.args.checkpoints, setting)
217 | print(path)
218 | if not os.path.exists(path):
219 | os.makedirs(path)
220 | writer = SummaryWriter('event/run_ETTh/{}'.format(self.args.model_name))
221 |
222 | time_now = time.time()
223 |
224 | train_steps = len(train_loader)
225 | early_stopping = EarlyStopping(patience=self.args.patience, verbose=True)
226 |
227 | model_optim = self._select_optimizer()
228 | criterion = self._select_criterion(self.args.loss)
229 |
230 | if self.args.use_amp:
231 | scaler = torch.cuda.amp.GradScaler()
232 |
233 | if self.args.resume:
234 | self.model, lr, epoch_start = load_model(self.model, path, model_name=self.args.data, horizon=self.args.horizon)
235 | else:
236 | epoch_start = 0
237 |
238 | for epoch in range(epoch_start, self.args.train_epochs):
239 | iter_count = 0
240 | train_loss = []
241 |
242 | self.model.train()
243 | epoch_time = time.time()
244 | for i, (batch_x,batch_y,batch_x_mark,batch_y_mark) in enumerate(train_loader):
245 | iter_count += 1
246 |
247 | model_optim.zero_grad()
248 | pred, pred_scale, mid, mid_scale, true, true_scale = self._process_one_batch_SCINet(
249 | train_data, batch_x, batch_y)
250 |
251 | if self.args.stacks == 1:
252 | loss = criterion(pred, true)
253 | elif self.args.stacks == 2:
254 | loss = criterion(pred, true) + criterion(mid, true)
255 | else:
256 | print('Error!')
257 |
258 | train_loss.append(loss.item())
259 |
260 | if (i+1) % 100==0:
261 | print("\titers: {0}, epoch: {1} | loss: {2:.7f}".format(i + 1, epoch + 1, loss.item()))
262 | speed = (time.time()-time_now)/iter_count
263 | left_time = speed*((self.args.train_epochs - epoch)*train_steps - i)
264 | print('\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))
265 | iter_count = 0
266 | time_now = time.time()
267 |
268 | if self.args.use_amp:
269 | print('use amp')
270 | scaler.scale(loss).backward()
271 | scaler.step(model_optim)
272 | scaler.update()
273 | else:
274 | loss.backward()
275 | model_optim.step()
276 |
277 | print("Epoch: {} cost time: {}".format(epoch+1, time.time()-epoch_time))
278 | train_loss = np.average(train_loss)
279 | print('--------start to validate-----------')
280 | valid_loss = self.valid(valid_data, valid_loader, criterion)
281 | print('--------start to test-----------')
282 | test_loss = self.valid(test_data, test_loader, criterion)
283 |
284 | print("Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} valid Loss: {3:.7f} Test Loss: {4:.7f}".format(
285 | epoch + 1, train_steps, train_loss, valid_loss, test_loss))
286 |
287 | writer.add_scalar('train_loss', train_loss, global_step=epoch)
288 | writer.add_scalar('valid_loss', valid_loss, global_step=epoch)
289 | writer.add_scalar('test_loss', test_loss, global_step=epoch)
290 |
291 | early_stopping(valid_loss, self.model, path)
292 | if early_stopping.early_stop:
293 | print("Early stopping")
294 | break
295 |
296 | lr = adjust_learning_rate(model_optim, epoch+1, self.args)
297 |
298 | save_model(epoch, lr, self.model, path, model_name=self.args.data, horizon=self.args.pred_len)
299 | best_model_path = path+'/'+'checkpoint.pth'
300 | self.model.load_state_dict(torch.load(best_model_path))
301 | return self.model
302 |
303 | def test(self, setting, evaluate=False):
304 | test_data, test_loader = self._get_data(flag='test')
305 |
306 | self.model.eval()
307 |
308 | preds = []
309 | trues = []
310 | mids = []
311 | pred_scales = []
312 | true_scales = []
313 | mid_scales = []
314 |
315 | if evaluate:
316 | path = os.path.join(self.args.checkpoints, setting)
317 | best_model_path = path+'/'+'checkpoint.pth'
318 | self.model.load_state_dict(torch.load(best_model_path))
319 |
320 | for i, (batch_x,batch_y,batch_x_mark,batch_y_mark) in enumerate(test_loader):
321 | pred, pred_scale, mid, mid_scale, true, true_scale = self._process_one_batch_SCINet(
322 | test_data, batch_x, batch_y)
323 |
324 | if self.args.stacks == 1:
325 | preds.append(pred.detach().cpu().numpy())
326 | trues.append(true.detach().cpu().numpy())
327 | pred_scales.append(pred_scale.detach().cpu().numpy())
328 | true_scales.append(true_scale.detach().cpu().numpy())
329 | elif self.args.stacks == 2:
330 | preds.append(pred.detach().cpu().numpy())
331 | trues.append(true.detach().cpu().numpy())
332 | mids.append(mid.detach().cpu().numpy())
333 | pred_scales.append(pred_scale.detach().cpu().numpy())
334 | mid_scales.append(mid_scale.detach().cpu().numpy())
335 | true_scales.append(true_scale.detach().cpu().numpy())
336 |
337 | else:
338 | print('Error!')
339 |
340 | if self.args.stacks == 1:
341 | preds = np.array(preds)
342 | trues = np.array(trues)
343 |
344 | pred_scales = np.array(pred_scales)
345 | true_scales = np.array(true_scales)
346 |
347 | preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])
348 | trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1])
349 | true_scales = true_scales.reshape(-1, true_scales.shape[-2], true_scales.shape[-1])
350 | pred_scales = pred_scales.reshape(-1, pred_scales.shape[-2], pred_scales.shape[-1])
351 |
352 | mae, mse, rmse, mape, mspe, corr = metric(preds, trues)
353 | maes, mses, rmses, mapes, mspes, corrs = metric(pred_scales, true_scales)
354 | print('normed mse:{:.4f}, mae:{:.4f}, rmse:{:.4f}, mape:{:.4f}, mspe:{:.4f}, corr:{:.4f}'.format(mse, mae, rmse, mape, mspe, corr))
355 | print('TTTT denormed mse:{:.4f}, mae:{:.4f}, rmse:{:.4f}, mape:{:.4f}, mspe:{:.4f}, corr:{:.4f}'.format(mses, maes, rmses, mapes, mspes, corrs))
356 |
357 | elif self.args.stacks == 2:
358 | preds = np.array(preds)
359 | trues = np.array(trues)
360 | mids = np.array(mids)
361 |
362 | pred_scales = np.array(pred_scales)
363 | true_scales = np.array(true_scales)
364 | mid_scales = np.array(mid_scales)
365 |
366 | preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])
367 | trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1])
368 | mids = mids.reshape(-1, mids.shape[-2], mids.shape[-1])
369 | true_scales = true_scales.reshape(-1, true_scales.shape[-2], true_scales.shape[-1])
370 | pred_scales = pred_scales.reshape(-1, pred_scales.shape[-2], pred_scales.shape[-1])
371 | mid_scales = mid_scales.reshape(-1, mid_scales.shape[-2], mid_scales.shape[-1])
372 | # print('test shape:', preds.shape, mids.shape, trues.shape)
373 |
374 | mae, mse, rmse, mape, mspe, corr = metric(mids, trues)
375 | maes, mses, rmses, mapes, mspes, corrs = metric(mid_scales, true_scales)
376 | print('Mid --> normed mse:{:.4f}, mae:{:.4f}, rmse:{:.4f}, mape:{:.4f}, mspe:{:.4f}, corr:{:.4f}'.format(mse, mae, rmse, mape, mspe, corr))
377 |
378 | mae, mse, rmse, mape, mspe, corr = metric(preds, trues)
379 | maes, mses, rmses, mapes, mspes, corrs = metric(pred_scales, true_scales)
380 | print('TTTT Final --> denormed mse:{:.4f}, mae:{:.4f}, rmse:{:.4f}, mape:{:.4f}, mspe:{:.4f}, corr:{:.4f}'.format(mse, mae, rmse, mape, mspe, corr))
381 |
382 | else:
383 | print('Error!')
384 |
385 | # result save
386 | if self.args.save:
387 | folder_path = 'exp/ett_results/' + setting + '/'
388 | if not os.path.exists(folder_path):
389 | os.makedirs(folder_path)
390 |
391 | mae, mse, rmse, mape, mspe, corr = metric(preds, trues)
392 | print('Test:mse:{:.4f}, mae:{:.4f}, rmse:{:.4f}, mape:{:.4f}, mspe:{:.4f}, corr:{:.4f}'.format(mse, mae, rmse, mape, mspe, corr))
393 |
394 | np.save(folder_path + 'metrics.npy', np.array([mae, mse, rmse, mape, mspe]))
395 | np.save(folder_path + 'pred.npy', preds)
396 | np.save(folder_path + 'true.npy', trues)
397 | np.save(folder_path + 'pred_scales.npy', pred_scales)
398 | np.save(folder_path + 'true_scales.npy', true_scales)
399 |
400 | return mae, maes, mse, mses
401 |
402 | def _process_one_batch_SCINet(self, dataset_object, batch_x, batch_y):
403 | batch_x = batch_x.double().cuda()
404 | batch_y = batch_y.double()
405 |
406 | if self.args.stacks == 1:
407 | outputs = self.model(batch_x)
408 | elif self.args.stacks == 2:
409 | outputs, mid = self.model(batch_x)
410 | else:
411 | print('Error!')
412 |
413 | #if self.args.inverse:
414 | outputs_scaled = dataset_object.inverse_transform(outputs)
415 | if self.args.stacks == 2:
416 | mid_scaled = dataset_object.inverse_transform(mid)
417 | f_dim = -1 if self.args.features=='MS' else 0
418 | batch_y = batch_y[:,-self.args.pred_len:,f_dim:].cuda()
419 | batch_y_scaled = dataset_object.inverse_transform(batch_y)
420 |
421 | if self.args.stacks == 1:
422 | return outputs, outputs_scaled, 0,0, batch_y, batch_y_scaled
423 | elif self.args.stacks == 2:
424 | return outputs, outputs_scaled, mid, mid_scaled, batch_y, batch_y_scaled
425 | else:
426 | print('Error!')
427 |
--------------------------------------------------------------------------------
/experiments/exp_basic.py:
--------------------------------------------------------------------------------
1 | import os
2 | import torch
3 | import numpy as np
4 |
5 | class Exp_Basic(object):
6 | def __init__(self, args):
7 | self.args = args
8 | # self.device = self._acquire_device()
9 | self.model = self._build_model().cuda()
10 |
11 | def _build_model(self):
12 | raise NotImplementedError
13 | return None
14 |
15 | def _acquire_device(self):
16 | if self.args.use_gpu:
17 | os.environ["CUDA_VISIBLE_DEVICES"] = str(self.args.gpu) if not self.args.use_multi_gpu else self.args.devices
18 | device = torch.device('cuda:{}'.format(self.args.gpu))
19 | print('Use GPU: cuda:{}'.format(self.args.gpu))
20 | else:
21 | device = torch.device('cpu')
22 | print('Use CPU')
23 | return device
24 |
25 | def _get_data(self):
26 | pass
27 |
28 | def valid(self):
29 | pass
30 |
31 | def train(self):
32 | pass
33 |
34 | def test(self):
35 | pass
36 |
--------------------------------------------------------------------------------
/experiments/exp_financial.py:
--------------------------------------------------------------------------------
1 | import os
2 | import math
3 | import time
4 | import warnings
5 | warnings.filterwarnings('ignore')
6 |
7 | import numpy as np
8 |
9 | import torch
10 | import torch.nn as nn
11 | from torch import optim
12 | from torch.utils.data import DataLoader
13 | from torch.autograd import Variable
14 | from torch.utils.tensorboard import SummaryWriter
15 | from metrics.Finantial_metics import MSE, MAE
16 | from experiments.exp_basic import Exp_Basic
17 | from data_process.financial_dataloader import DataLoaderH
18 | from utils.tools import EarlyStopping, adjust_learning_rate, save_model, load_model
19 | from metrics.ETTh_metrics import metric
20 | from utils.math_utils import smooth_l1_loss
21 | from models.SCINet import SCINet
22 | from models.SCINet_decompose import SCINet_decompose
23 |
24 | class Exp_financial(Exp_Basic):
25 | def __init__(self, args):
26 | super(Exp_financial, self).__init__(args)
27 | if self.args.L1Loss:
28 | self.criterion = smooth_l1_loss
29 | else:
30 | self.criterion = nn.MSELoss(size_average=False).cuda()
31 | self.evaluateL2 = nn.MSELoss(size_average=False).cuda()
32 | self.evaluateL1 = nn.L1Loss(size_average=False).cuda()
33 | self.writer = SummaryWriter('.exp/run_financial/{}'.format(args.model_name))
34 |
35 | def _build_model(self):
36 | if self.args.dataset_name == 'electricity':
37 | self.input_dim = 321
38 |
39 | if self.args.dataset_name == 'solar_AL':
40 | self.input_dim = 137
41 |
42 | if self.args.dataset_name == 'exchange_rate':
43 | self.input_dim = 8
44 |
45 | if self.args.dataset_name == 'traffic':
46 | self.input_dim = 862
47 |
48 | if self.args.decompose:
49 | model = SCINet_decomp(
50 | output_len=self.args.horizon,
51 | input_len=self.args.window_size,
52 | input_dim=self.input_dim,
53 | hid_size=self.args.hidden_size,
54 | num_stacks=self.args.stacks,
55 | num_levels=self.args.levels,
56 | num_decoder_layer=self.args.num_decoder_layer,
57 | concat_len=self.args.concat_len,
58 | groups=self.args.groups,
59 | kernel=self.args.kernel,
60 | dropout=self.args.dropout,
61 | single_step_output_One=self.args.single_step_output_One,
62 | positionalE=self.args.positionalEcoding,
63 | modified=True,
64 | RIN=self.args.RIN
65 | )
66 |
67 | else:
68 |
69 | model = SCINet(
70 | output_len=self.args.horizon,
71 | input_len=self.args.window_size,
72 | input_dim=self.input_dim,
73 | hid_size=self.args.hidden_size,
74 | num_stacks=self.args.stacks,
75 | num_levels=self.args.levels,
76 | num_decoder_layer=self.args.num_decoder_layer,
77 | concat_len=self.args.concat_len,
78 | groups=self.args.groups,
79 | kernel=self.args.kernel,
80 | dropout=self.args.dropout,
81 | single_step_output_One=self.args.single_step_output_One,
82 | positionalE=self.args.positionalEcoding,
83 | modified=True,
84 | RIN=self.args.RIN
85 | )
86 | print(model)
87 | return model
88 |
89 | def _get_data(self):
90 | if self.args.dataset_name == 'electricity':
91 | self.args.data = './datasets/financial/electricity.txt'
92 |
93 | if self.args.dataset_name == 'solar_AL':
94 | self.args.data = './datasets/financial/solar_AL.txt'
95 |
96 | if self.args.dataset_name == 'exchange_rate':
97 | self.args.data = './datasets/financial/exchange_rate.txt'
98 |
99 | if self.args.dataset_name == 'traffic':
100 | self.args.data = './datasets/financial/traffic.txt'
101 |
102 | if self.args.long_term_forecast:
103 | return DataLoaderH(self.args.data, 0.7, 0.1, self.args.horizon, self.args.window_size, 4)
104 | else:
105 | return DataLoaderH(self.args.data, 0.6, 0.2, self.args.horizon, self.args.window_size, self.args.normalize)
106 |
107 | def _select_optimizer(self):
108 | return torch.optim.Adam(params=self.model.parameters(), lr=self.args.lr, betas=(0.9, 0.999), weight_decay=1e-5)
109 |
110 |
111 | def train(self):
112 |
113 | best_val=10000000
114 |
115 | optim=self._select_optimizer()
116 |
117 | data=self._get_data()
118 | X=data.train[0]
119 | Y=data.train[1]
120 | save_path = os.path.join(self.args.save_path, self.args.model_name)
121 | if not os.path.exists(save_path):
122 | os.makedirs(save_path)
123 |
124 | if self.args.resume:
125 | self.model, lr, epoch_start = load_model(self.model, save_path, model_name=self.args.dataset_name, horizon=self.args.horizon)
126 | else:
127 | epoch_start = 0
128 |
129 | for epoch in range(epoch_start, self.args.epochs):
130 | epoch_start_time = time.time()
131 | iter = 0
132 | self.model.train()
133 | total_loss = 0
134 | n_samples = 0
135 | final_loss = 0
136 | min_loss = 0
137 | lr = adjust_learning_rate(optim, epoch, self.args)
138 |
139 | for tx, ty in data.get_batches(X, Y, self.args.batch_size, True):
140 | self.model.zero_grad() #torch.Size([32, 168, 137])
141 | if self.args.stacks == 1:
142 | forecast = self.model(tx)
143 | elif self.args.stacks == 2:
144 | forecast, res = self.model(tx)
145 | scale = data.scale.expand(forecast.size(0), self.args.horizon, data.m)
146 | bias = data.bias.expand(forecast.size(0), self.args.horizon, data.m)
147 | weight = torch.tensor(self.args.lastWeight).cuda() #used with multi-step
148 |
149 | if self.args.single_step: #single step
150 | ty_last = ty[:, -1, :]
151 | scale_last = data.scale.expand(forecast.size(0), data.m)
152 | bias_last = data.bias.expand(forecast.size(0), data.m)
153 | if self.args.normalize == 3:
154 | loss_f = self.criterion(forecast[:, -1], ty_last)
155 | if self.args.stacks == 2:
156 | loss_m = self.criterion(res, ty)/res.shape[1] #average results
157 |
158 | else:
159 | loss_f = self.criterion(forecast[:, -1] * scale_last + bias_last, ty_last * scale_last + bias_last)
160 | if self.args.stacks == 2:
161 | loss_m = self.criterion(res * scale + bias, ty * scale + bias)/res.shape[1] #average results
162 |
163 | else:
164 | if self.args.normalize == 3:
165 | if self.args.lastWeight == 1.0:
166 | loss_f = self.criterion(forecast, ty)
167 | if self.args.stacks == 2:
168 | loss_m = self.criterion(res, ty)
169 | else:
170 | loss_f = self.criterion(forecast[:, :-1, :], ty[:, :-1, :] ) \
171 | + weight * self.criterion(forecast[:, -1:, :], ty[:, -1:, :] )
172 | if self.args.stacks == 2:
173 | loss_m = self.criterion(res[:, :-1, :] , ty[:, :-1, :] ) \
174 | + weight * self.criterion(res[:, -1:, :], ty[:, -1:, :] )
175 | else:
176 | if self.args.lastWeight == 1.0:
177 | loss_f = self.criterion(forecast * scale + bias, ty * scale + bias)
178 | if self.args.stacks == 2:
179 | loss_m = self.criterion(res * scale + bias, ty * scale + bias)
180 | else:
181 | loss_f = self.criterion(forecast[:, :-1, :] * scale[:, :-1, :] + bias[:, :-1, :],
182 | ty[:, :-1, :] * scale[:, :-1, :] + bias[:, :-1, :]) \
183 | + weight * self.criterion(forecast[:, -1:, :] * scale[:, -1:, :] + bias[:, -1:, :],
184 | ty[:, -1:, :] * scale[:, -1:, :] + bias[:, -1:, :])
185 | if self.args.stacks == 2:
186 | loss_m = self.criterion(res[:, :-1, :] * scale[:, :-1, :] + bias[:, :-1, :],
187 | ty[:, :-1, :] * scale[:, :-1, :] + bias[:, :-1, :]) \
188 | + weight * self.criterion(res[:, -1:, :] * scale[:, -1:, :] + bias[:, -1:, :],
189 | ty[:, -1:, :] * scale[:, -1:, :] + bias[:, -1:, :])
190 | loss = loss_f
191 | if self.args.stacks == 2:
192 | loss += loss_m
193 |
194 | loss.backward()
195 | total_loss += loss.item()
196 |
197 | final_loss += loss_f.item()
198 | if self.args.stacks == 2:
199 | min_loss += loss_m.item()
200 | n_samples += (forecast.size(0) * data.m)
201 | grad_norm = optim.step()
202 |
203 | if iter%100==0:
204 | if self.args.stacks == 1:
205 | print('iter:{:3d} | loss: {:.7f}'.format(iter, loss.item()/(forecast.size(0) * data.m)))
206 | elif self.args.stacks == 2:
207 | print('iter:{:3d} | loss: {:.7f}, loss_final: {:.7f}, loss_mid: {:.7f}'.format(iter, loss.item()/(forecast.size(0) * data.m),
208 | loss_f.item()/(forecast.size(0) * data.m),loss_m.item()/(forecast.size(0) * data.m)))
209 | iter += 1
210 | if self.args.stacks == 1:
211 | val_loss, val_rae, val_corr, val_mse, val_mae = self.validate(data, data.valid[0],data.valid[1])
212 | test_loss, test_rae, test_corr, test_mse, test_mae = self.validate(data, data.test[0],data.test[1])
213 | elif self.args.stacks == 2:
214 | val_loss, val_rae, val_corr, val_rse_mid, val_rae_mid, val_correlation_mid, val_mse, val_mae =self.validate(data, data.valid[0],data.valid[1])
215 | test_loss, test_rae, test_corr, test_rse_mid, test_rae_mid, test_correlation_mid, test_mse, test_mae = self.validate(data, data.test[0],data.test[1])
216 |
217 | self.writer.add_scalar('Train_loss_tatal', total_loss / n_samples, global_step=epoch)
218 | self.writer.add_scalar('Train_loss_Final', final_loss / n_samples, global_step=epoch)
219 | self.writer.add_scalar('Validation_final_rse', val_loss, global_step=epoch)
220 | self.writer.add_scalar('Validation_final_rae', val_rae, global_step=epoch)
221 | self.writer.add_scalar('Validation_final_corr', val_corr, global_step=epoch)
222 | self.writer.add_scalar('Test_final_rse', test_loss, global_step=epoch)
223 | self.writer.add_scalar('Test_final_rae', test_rae, global_step=epoch)
224 | self.writer.add_scalar('Test_final_corr', test_corr, global_step=epoch)
225 | if self.args.stacks == 2:
226 | self.writer.add_scalar('Train_loss_Mid', min_loss / n_samples, global_step=epoch)
227 | self.writer.add_scalar('Validation_mid_rse', val_rse_mid, global_step=epoch)
228 | self.writer.add_scalar('Validation_mid_rae', val_rae_mid, global_step=epoch)
229 | self.writer.add_scalar('Validation_mid_corr', val_correlation_mid, global_step=epoch)
230 | self.writer.add_scalar('Test_mid_rse', test_rse_mid, global_step=epoch)
231 | self.writer.add_scalar('Test_mid_rae', test_rae_mid, global_step=epoch)
232 | self.writer.add_scalar('Test_mid_corr', test_correlation_mid, global_step=epoch)
233 |
234 | print(
235 | '| EncoDeco: end of epoch {:3d} | time: {:5.2f}s | train_loss {:5.4f} | valid rse {:5.4f} | valid rae {:5.4f} | valid corr {:5.4f}| valid mse {:5.4f} | valid mae {:5.4f}|'
236 | ' test rse {:5.4f} | test rae {:5.4f} | test corr {:5.4f} | test mse {:5.4f} | test mae {:5.4f}|'.format(
237 | epoch, (time.time() - epoch_start_time), total_loss / n_samples, val_loss, val_rae, val_corr, val_mse, val_mae, test_loss, test_rae, test_corr, test_mse, test_mae), flush=True)
238 |
239 | if val_mse < best_val and self.args.long_term_forecast:
240 | save_model(epoch, lr, self.model, save_path, model_name=self.args.dataset_name, horizon=self.args.horizon)
241 | print('--------------| Best Val loss |--------------')
242 | best_val = val_mse
243 | elif val_loss < best_val and not self.args.long_term_forecast:
244 | save_model(epoch, lr, self.model, save_path, model_name=self.args.dataset_name, horizon=self.args.horizon)
245 | print('--------------| Best Val loss |--------------')
246 | best_val = val_loss
247 |
248 | return total_loss / n_samples
249 |
250 | def validate(self, data, X, Y, evaluate=False):
251 | self.model.eval()
252 | total_loss = 0
253 | total_loss_l1 = 0
254 |
255 | total_loss_mid = 0
256 | total_loss_l1_mid = 0
257 | n_samples = 0
258 | predict = None
259 | res_mid = None
260 | test = None
261 |
262 | forecast_set = []
263 | Mid_set = []
264 | target_set = []
265 |
266 | if evaluate:
267 | save_path = os.path.join(self.args.save_path, self.args.model_name)
268 | self.model = load_model(self.model, save_path, model_name=self.args.dataset_name, horizon=self.args.horizon)[0]
269 |
270 | for X, Y in data.get_batches(X, Y, self.args.batch_size, False):
271 | with torch.no_grad():
272 | if self.args.stacks == 1:
273 | forecast = self.model(X)
274 | elif self.args.stacks == 2:
275 | forecast, res = self.model(X) #torch.Size([32, 3, 137])
276 | # only predict the last step
277 | true = Y[:, -1, :].squeeze()
278 | output = forecast[:,-1,:].squeeze()
279 |
280 | forecast_set.append(forecast)
281 | target_set.append(Y)
282 | if self.args.stacks == 2:
283 | Mid_set.append(res)
284 |
285 | if len(forecast.shape)==1:
286 | forecast = forecast.unsqueeze(dim=0)
287 | if self.args.stacks == 2:
288 | res = res.unsqueeze(dim=0)
289 | if predict is None:
290 | predict = forecast[:,-1,:].squeeze()
291 | test = Y[:,-1,:].squeeze() #torch.Size([32, 3, 137])
292 | if self.args.stacks == 2:
293 | res_mid = res[:,-1,:].squeeze()
294 |
295 | else:
296 | predict = torch.cat((predict, forecast[:,-1,:].squeeze()))
297 | test = torch.cat((test, Y[:, -1, :].squeeze()))
298 | if self.args.stacks == 2:
299 | res_mid = torch.cat((res_mid, res[:,-1,:].squeeze()))
300 |
301 | scale = data.scale.expand(output.size(0),data.m)
302 | bias = data.bias.expand(output.size(0), data.m)
303 | if self.args.stacks == 2:
304 | output_res = res[:,-1,:].squeeze()
305 |
306 | total_loss += self.evaluateL2(output * scale + bias, true * scale+ bias).item()
307 | total_loss_l1 += self.evaluateL1(output * scale+ bias, true * scale+ bias).item()
308 | if self.args.stacks == 2:
309 | total_loss_mid += self.evaluateL2(output_res * scale+ bias, true * scale+ bias).item()
310 | total_loss_l1_mid += self.evaluateL1(output_res * scale+ bias, true * scale+ bias).item()
311 |
312 | n_samples += (output.size(0) * data.m)
313 |
314 | forecast_Norm = torch.cat(forecast_set, axis=0)
315 | target_Norm = torch.cat(target_set, axis=0)
316 | mse = MSE(forecast_Norm.cpu().numpy(), target_Norm.cpu().numpy())
317 | mae = MAE(forecast_Norm.cpu().numpy(), target_Norm.cpu().numpy())
318 |
319 | if self.args.stacks == 2:
320 | Mid_Norm = torch.cat(Mid_set, axis=0)
321 |
322 | rse_final_each = []
323 | rae_final_each = []
324 | corr_final_each = []
325 | Scale = data.scale.expand(forecast_Norm.size(0),data.m)
326 | bias = data.bias.expand(forecast_Norm.size(0),data.m)
327 | if not self.args.single_step: #single step
328 | for i in range(forecast_Norm.shape[1]): #get results of each step
329 | lossL2_F = self.evaluateL2(forecast_Norm[:,i,:] * Scale + bias, target_Norm[:,i,:] * Scale+ bias).item()
330 | lossL1_F = self.evaluateL1(forecast_Norm[:,i,:] * Scale+ bias, target_Norm[:,i,:] * Scale+ bias).item()
331 | if self.args.stacks == 2:
332 | lossL2_M = self.evaluateL2(Mid_Norm[:, i, :] * Scale+ bias, target_Norm[:, i, :] * Scale+ bias).item()
333 | lossL1_M = self.evaluateL1(Mid_Norm[:, i, :] * Scale+ bias, target_Norm[:, i, :] * Scale+ bias).item()
334 | rse_F = math.sqrt(lossL2_F / forecast_Norm.shape[0]/ data.m) / data.rse
335 | rae_F = (lossL1_F / forecast_Norm.shape[0]/ data.m) / data.rae
336 | rse_final_each.append(rse_F.item())
337 | rae_final_each.append(rae_F.item())
338 |
339 | pred = forecast_Norm[:,i,:].data.cpu().numpy()
340 | y_true = target_Norm[:,i,:].data.cpu().numpy()
341 |
342 | sig_p = pred.std(axis=0)
343 | sig_g = y_true.std(axis=0)
344 | m_p = pred.mean(axis=0)
345 | m_g = y_true.mean(axis=0)
346 | ind = (sig_p * sig_g != 0)
347 | corr = ((pred - m_p) * (y_true - m_g)).mean(axis=0) / (sig_p * sig_g)
348 | corr = (corr[ind]).mean()
349 | corr_final_each.append(corr)
350 |
351 | rse = math.sqrt(total_loss / n_samples) / data.rse
352 | rae = (total_loss_l1 / n_samples) / data.rae
353 | if self.args.stacks == 2:
354 | rse_mid = math.sqrt(total_loss_mid / n_samples) / data.rse
355 | rae_mid = (total_loss_l1_mid / n_samples) / data.rae
356 |
357 | # only calculate the last step for financial datasets.
358 | predict = forecast_Norm.cpu().numpy()[:,-1,:]
359 | Ytest = target_Norm.cpu().numpy()[:,-1,:]
360 |
361 | sigma_p = predict.std(axis=0)
362 | sigma_g = Ytest.std(axis=0)
363 | mean_p = predict.mean(axis=0)
364 | mean_g = Ytest.mean(axis=0)
365 | index = (sigma_p * sigma_g != 0)
366 | correlation = ((predict - mean_p) * (Ytest - mean_g)).mean(axis=0) / (sigma_p * sigma_g)
367 | correlation = (correlation[index]).mean()
368 | if self.args.stacks == 2:
369 | mid_pred = Mid_Norm.cpu().numpy()[:,-1,:]
370 | sigma_mid = mid_pred.std(axis=0)
371 | mean_mid = mid_pred.mean(axis=0)
372 | index_mid = (sigma_mid * sigma_g != 0)
373 | correlation_mid = ((mid_pred - mean_mid) * (Ytest - mean_g)).mean(axis=0) / (sigma_mid * sigma_g)
374 | correlation_mid = (correlation_mid[index_mid]).mean()
375 |
376 | print(
377 | '|valid_final mse {:5.4f} |valid_final mae {:5.4f} |valid_final rse {:5.4f} | valid_final rae {:5.4f} | valid_final corr {:5.4f}'.format(mse,mae,
378 | rse, rae, correlation), flush=True)
379 | if self.args.stacks == 2:
380 | print(
381 | '|valid_final mse {:5.4f} |valid_final mae {:5.4f} |valid_mid rse {:5.4f} | valid_mid rae {:5.4f} | valid_mid corr {:5.4f}'.format(mse,mae,
382 | rse_mid, rae_mid, correlation_mid), flush=True)
383 |
384 | if self.args.stacks == 1:
385 | return rse, rae, correlation, mse, mae
386 | if self.args.stacks == 2:
387 | return rse, rae, correlation, rse_mid, rae_mid, correlation_mid, mse, mae
388 |
--------------------------------------------------------------------------------
/experiments/exp_pems.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 |
4 | import warnings
5 | warnings.filterwarnings('ignore')
6 |
7 | import numpy as np
8 | import torch
9 | import torch.nn as nn
10 | from torch import optim
11 | from torch.utils.data import DataLoader
12 |
13 | from experiments.exp_basic import Exp_Basic
14 | from data_process.forecast_dataloader import ForecastDataset,ForecastTestDataset, de_normalized
15 | from utils.tools import EarlyStopping, adjust_learning_rate, save_model, load_model
16 | from metrics.ETTh_metrics import metric
17 | from torch.utils.tensorboard import SummaryWriter
18 | from utils.math_utils import evaluate, creatMask
19 | from models.SCINet import SCINet
20 | from models.SCINet_decompose import SCINet_decompose
21 |
22 | class Exp_pems(Exp_Basic):
23 | def __init__(self, args):
24 | super(Exp_pems, self).__init__(args)
25 | self.result_file = os.path.join('exp/pems_checkpoint', self.args.dataset, 'checkpoints')
26 | self.result_test_file = os.path.join('exp/pems_checkpoint', args.dataset, 'test')
27 | self.result_train_file = os.path.join('exp/pems_checkpoint', args.dataset, 'train')
28 |
29 | def _build_model(self):
30 | if self.args.dataset == 'PEMS03':
31 | self.input_dim = 358
32 | elif self.args.dataset == 'PEMS04':
33 | self.input_dim = 307
34 | elif self.args.dataset == 'PEMS07':
35 | self.input_dim = 883
36 | elif self.args.dataset == 'PEMS08':
37 | self.input_dim = 170
38 | if self.args.decompose:
39 | model = SCINet_decomp(
40 | output_len=self.args.horizon,
41 | input_len=self.args.window_size,
42 | input_dim=self.input_dim,
43 | hid_size = self.args.hidden_size,
44 | num_stacks=self.args.stacks,
45 | num_levels=self.args.levels,
46 | num_decoder_layer=self.args.num_decoder_layer,
47 | concat_len = self.args.concat_len,
48 | groups = self.args.groups,
49 | kernel = self.args.kernel,
50 | dropout = self.args.dropout,
51 | single_step_output_One = self.args.single_step_output_One,
52 | positionalE = self.args.positionalEcoding,
53 | modified = True,
54 | RIN=self.args.RIN
55 | )
56 | else:
57 | model = SCINet(
58 | output_len=self.args.horizon,
59 | input_len=self.args.window_size,
60 | input_dim=self.input_dim,
61 | hid_size = self.args.hidden_size,
62 | num_stacks=self.args.stacks,
63 | num_levels=self.args.levels,
64 | num_decoder_layer=self.args.num_decoder_layer,
65 | concat_len = self.args.concat_len,
66 | groups = self.args.groups,
67 | kernel = self.args.kernel,
68 | dropout = self.args.dropout,
69 | single_step_output_One = self.args.single_step_output_One,
70 | positionalE = self.args.positionalEcoding,
71 | modified = True,
72 | RIN=self.args.RIN
73 | )
74 |
75 | print(model)
76 | return model
77 |
78 | def _get_data(self):
79 | data_file = os.path.join('./datasets/PEMS', self.args.dataset + '.npz')
80 | print('data file:',data_file)
81 | data = np.load(data_file,allow_pickle=True)
82 | data = data['data'][:,:,0]
83 | train_ratio = self.args.train_length / (self.args.train_length + self.args.valid_length + self.args.test_length)
84 | valid_ratio = self.args.valid_length / (self.args.train_length + self.args.valid_length + self.args.test_length)
85 | test_ratio = 1 - train_ratio - valid_ratio
86 | train_data = data[:int(train_ratio * len(data))]
87 | valid_data = data[int(train_ratio * len(data)):int((train_ratio + valid_ratio) * len(data))]
88 | test_data = data[int((train_ratio + valid_ratio) * len(data)):]
89 | if len(train_data) == 0:
90 | raise Exception('Cannot organize enough training data')
91 | if len(valid_data) == 0:
92 | raise Exception('Cannot organize enough validation data')
93 | if len(test_data) == 0:
94 | raise Exception('Cannot organize enough test data')
95 | if self.args.normtype == 0: # we follow StemGNN and other related works for somewhat fair comparison (orz..), but we strongly suggest use self.args.normtype==2!!!
96 | train_mean = np.mean(train_data, axis=0)
97 | train_std = np.std(train_data, axis=0)
98 | train_normalize_statistic = {"mean": train_mean.tolist(), "std": train_std.tolist()}
99 | val_mean = np.mean(valid_data, axis=0)
100 | val_std = np.std(valid_data, axis=0)
101 | val_normalize_statistic = {"mean": val_mean.tolist(), "std": val_std.tolist()}
102 | test_mean = np.mean(test_data, axis=0)
103 | test_std = np.std(test_data, axis=0)
104 | test_normalize_statistic = {"mean": test_mean.tolist(), "std": test_std.tolist()}
105 | elif self.args.normtype == 1:
106 | data_mean = np.mean(data, axis=0)
107 | data_std = np.std(data, axis=0)
108 | train_normalize_statistic = {"mean": data_mean.tolist(), "std": data_std.tolist()}
109 | val_normalize_statistic = {"mean": data_mean.tolist(), "std": data_std.tolist()}
110 | test_normalize_statistic = {"mean": data_mean.tolist(), "std": data_std.tolist()}
111 | else:
112 | train_mean = np.mean(train_data, axis=0)
113 | train_std = np.std(train_data, axis=0)
114 | train_normalize_statistic = {"mean": train_mean.tolist(), "std": train_std.tolist()}
115 | val_normalize_statistic = {"mean": train_mean.tolist(), "std": train_std.tolist()}
116 | test_normalize_statistic = {"mean": train_mean.tolist(), "std": train_std.tolist()}
117 | train_set = ForecastDataset(train_data, window_size=self.args.window_size, horizon=self.args.horizon,
118 | normalize_method=self.args.norm_method, norm_statistic=train_normalize_statistic)
119 | valid_set = ForecastDataset(valid_data, window_size=self.args.window_size, horizon=self.args.horizon,
120 | normalize_method=self.args.norm_method, norm_statistic=val_normalize_statistic)
121 | test_set = ForecastTestDataset(test_data, window_size=self.args.window_size, horizon=self.args.horizon,
122 | normalize_method=self.args.norm_method, norm_statistic=test_normalize_statistic)
123 | train_loader = DataLoader(train_set, batch_size=self.args.batch_size, drop_last=False, shuffle=True,
124 | num_workers=1)
125 | valid_loader = DataLoader(valid_set, batch_size=self.args.batch_size, shuffle=False, num_workers=1)
126 | test_loader = DataLoader(test_set, batch_size=self.args.batch_size, shuffle=False, num_workers=1)
127 | node_cnt = train_data.shape[1]
128 | return test_loader, train_loader, valid_loader,node_cnt,test_normalize_statistic,val_normalize_statistic
129 |
130 | def _select_optimizer(self):
131 | if self.args.optimizer == 'RMSProp':
132 | my_optim = torch.optim.RMSprop(params=self.model.parameters(), lr=self.args.lr, eps=1e-08)
133 | else:
134 | my_optim = torch.optim.Adam(params=self.model.parameters(), lr=self.args.lr, betas=(0.9, 0.999), weight_decay=self.args.weight_decay)
135 | return my_optim
136 |
137 | def inference(self, model, dataloader, node_cnt, window_size, horizon):
138 | forecast_set = []
139 | Mid_set = []
140 | target_set = []
141 | input_set = []
142 | self.model.eval()
143 | with torch.no_grad():
144 | for i, (inputs, target) in enumerate(dataloader):
145 | inputs = inputs.cuda()
146 | target = target.cuda()
147 | input_set.append(inputs.detach().cpu().numpy())
148 | step = 0
149 | forecast_steps = np.zeros([inputs.size()[0], horizon, node_cnt], dtype=np.float)
150 | Mid_steps = np.zeros([inputs.size()[0], horizon, node_cnt], dtype=np.float)
151 | while step < horizon:
152 | if self.args.stacks == 1:
153 | forecast_result = self.model(inputs)
154 | elif self.args.stacks == 2:
155 | forecast_result, Mid_result = self.model(inputs)
156 |
157 | len_model_output = forecast_result.size()[1]
158 | if len_model_output == 0:
159 | raise Exception('Get blank inference result')
160 | inputs[:, :window_size - len_model_output, :] = inputs[:, len_model_output:window_size,
161 | :].clone()
162 | inputs[:, window_size - len_model_output:, :] = forecast_result.clone()
163 | forecast_steps[:, step:min(horizon - step, len_model_output) + step, :] = \
164 | forecast_result[:, :min(horizon - step, len_model_output), :].detach().cpu().numpy()
165 | if self.args.stacks == 2:
166 | Mid_steps[:, step:min(horizon - step, len_model_output) + step, :] = \
167 | Mid_result[:, :min(horizon - step, len_model_output), :].detach().cpu().numpy()
168 |
169 | step += min(horizon - step, len_model_output)
170 | forecast_set.append(forecast_steps)
171 | target_set.append(target.detach().cpu().numpy())
172 | if self.args.stacks == 2:
173 | Mid_set.append(Mid_steps)
174 |
175 | result_save = np.concatenate(forecast_set, axis=0)
176 | target_save = np.concatenate(target_set, axis=0)
177 |
178 | if self.args.stacks == 1:
179 | return np.concatenate(forecast_set, axis=0), np.concatenate(target_set, axis=0), np.concatenate(input_set, axis=0)
180 |
181 | elif self.args.stacks == 2:
182 | return np.concatenate(forecast_set, axis=0), np.concatenate(target_set, axis=0),np.concatenate(Mid_set, axis=0), np.concatenate(input_set, axis=0)
183 |
184 | def validate(self, model, epoch, forecast_loss, dataloader, normalize_method, statistic,
185 | node_cnt, window_size, horizon, writer,
186 | result_file=None,test=False):
187 | #start = datetime.now()
188 | print("===================Validate Normal=========================")
189 | if self.args.stacks == 1:
190 | forecast_norm, target_norm, input_norm = self.inference(model, dataloader,
191 | node_cnt, window_size, horizon)
192 | elif self.args.stacks == 2:
193 | forecast_norm, target_norm, mid_norm, input_norm = self.inference(model, dataloader,
194 | node_cnt, window_size, horizon)
195 | if normalize_method and statistic:
196 | forecast = de_normalized(forecast_norm, normalize_method, statistic)
197 | target = de_normalized(target_norm, normalize_method, statistic)
198 | input = de_normalized(input_norm, normalize_method, statistic)
199 | if self.args.stacks == 2:
200 | mid = de_normalized(mid_norm, normalize_method, statistic)
201 | else:
202 | forecast, target, input = forecast_norm, target_norm, input_norm
203 | if self.args.stacks == 2:
204 | mid = mid_norm
205 |
206 | beta = 0.1
207 | forecast_norm = torch.from_numpy(forecast_norm).float()
208 | target_norm = torch.from_numpy(target_norm).float()
209 | if self.args.stacks == 1:
210 | loss = forecast_loss(forecast_norm, target_norm)
211 |
212 | elif self.args.stacks == 2:
213 | mid_norm = torch.from_numpy(mid_norm).float()
214 |
215 | loss = forecast_loss(forecast_norm, target_norm) + forecast_loss(mid_norm, target_norm)
216 | loss_F = forecast_loss(forecast_norm, target_norm)
217 | loss_M = forecast_loss(mid_norm, target_norm)
218 |
219 | score = evaluate(target, forecast)
220 | score_final_detail = evaluate(target, forecast,by_step=True)
221 | print('by each step: MAPE & MAE & RMSE',score_final_detail)
222 | if self.args.stacks == 2:
223 | score1 = evaluate(target, mid)
224 | #end = datetime.now()
225 |
226 | if writer:
227 | if test:
228 | print(f'TEST: RAW : MAE {score[1]:7.2f};MAPE {score[0]:7.2f}; RMSE {score[2]:7.2f}.')
229 | writer.add_scalar('Test MAE_final', score[1], global_step=epoch)
230 | writer.add_scalar('Test RMSE_final', score[2], global_step=epoch)
231 | if self.args.stacks == 2:
232 | print(f'TEST: RAW-Mid : MAE {score1[1]:7.2f}; MAPE {score[0]:7.2f}; RMSE {score1[2]:7.2f}.')
233 | writer.add_scalar('Test MAE_Mid', score1[1], global_step=epoch)
234 | writer.add_scalar('Test RMSE_Mid', score1[2], global_step=epoch)
235 | writer.add_scalar('Test Loss_final', loss_F, global_step=epoch)
236 | writer.add_scalar('Test Loss_Mid', loss_M, global_step=epoch)
237 |
238 | else:
239 | print(f'VAL: RAW : MAE {score[1]:7.2f}; RMSE {score[2]:7.2f}.')
240 | writer.add_scalar('VAL MAE_final', score[1], global_step=epoch)
241 | writer.add_scalar('VAL RMSE_final', score[2], global_step=epoch)
242 |
243 | if self.args.stacks == 2:
244 | print(f'VAL: RAW-Mid : MAE {score1[1]:7.2f}; RMSE {score1[2]:7.2f}.')
245 | writer.add_scalar('VAL MAE_Mid', score1[1], global_step=epoch)
246 | writer.add_scalar('VAL RMSE_Mid', score1[2], global_step=epoch)
247 | writer.add_scalar('VAL Loss_final', loss_F, global_step=epoch)
248 | writer.add_scalar('VAL Loss_Mid', loss_M, global_step=epoch)
249 |
250 | if result_file:
251 | if not os.path.exists(result_file):
252 | os.makedirs(result_file)
253 | step_to_print = 0
254 | forcasting_2d = forecast[:, step_to_print, :]
255 | forcasting_2d_target = target[:, step_to_print, :]
256 |
257 | np.savetxt(f'{result_file}/target.csv', forcasting_2d_target, delimiter=",")
258 | np.savetxt(f'{result_file}/predict.csv', forcasting_2d, delimiter=",")
259 | np.savetxt(f'{result_file}/predict_abs_error.csv',
260 | np.abs(forcasting_2d - forcasting_2d_target), delimiter=",")
261 | np.savetxt(f'{result_file}/predict_ape.csv',
262 | np.abs((forcasting_2d - forcasting_2d_target) / forcasting_2d_target), delimiter=",")
263 |
264 | return dict(mae=score[1], mape=score[0], rmse=score[2])
265 |
266 |
267 | def train(self):
268 | my_optim=self._select_optimizer()
269 | my_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=my_optim, gamma=self.args.decay_rate)
270 | test_loader, train_loader, valid_loader,node_cnt,test_normalize_statistic,val_normalize_statistic=self._get_data()
271 | forecast_loss = nn.L1Loss().cuda()
272 | best_validate_mae = np.inf
273 | best_test_mae = np.inf
274 | validate_score_non_decrease_count = 0
275 | writer = SummaryWriter('exp/run_PEMS/{}_scinet'.format(self.args.model_name))
276 |
277 | performance_metrics = {}
278 |
279 | if self.args.resume:
280 | self.model, lr, epoch_start = load_model(self.model, self.result_file, model_name=self.args.dataset, horizon=self.args.horizon)
281 | else:
282 | epoch_start = 0
283 |
284 | for epoch in range(epoch_start, self.args.epoch):
285 | lr = adjust_learning_rate(my_optim, epoch, self.args)
286 | epoch_start_time = time.time()
287 | self.model.train()
288 | loss_total = 0
289 | loss_total_F = 0
290 | loss_total_M = 0
291 | cnt = 0
292 | for i, (inputs, target) in enumerate(train_loader):
293 | inputs = inputs.cuda() # torch.Size([32, 12, 228])
294 | target = target.cuda() # torch.Size([32, 3, 228])
295 | self.model.zero_grad()
296 | if self.args.stacks == 1:
297 | forecast = self.model(inputs)
298 | loss = forecast_loss(forecast, target)
299 | elif self.args.stacks == 2:
300 | forecast, res = self.model(inputs)
301 | loss = forecast_loss(forecast, target) + forecast_loss(res, target)
302 | loss_M = forecast_loss(res, target)
303 | loss_F = forecast_loss(forecast, target)
304 |
305 | cnt += 1
306 | loss.backward()
307 | my_optim.step()
308 | loss_total += float(loss)
309 | if self.args.stacks == 2:
310 | loss_total_F += float(loss_F)
311 | loss_total_M += float(loss_M)
312 | if self.args.stacks == 1:
313 | print('| end of epoch {:3d} | time: {:5.2f}s | train_total_loss {:5.4f} '.format(epoch, (
314 | time.time() - epoch_start_time), loss_total / cnt))
315 | elif self.args.stacks == 2:
316 | print('| end of epoch {:3d} | time: {:5.2f}s | train_total_loss {:5.4f}, loss_F {:5.4f}, loss_M {:5.4f} '.format(epoch, (
317 | time.time() - epoch_start_time), loss_total / cnt, loss_total_F / cnt, loss_total_M / cnt))
318 |
319 | writer.add_scalar('Train_loss_tatal', loss_total / cnt, global_step=epoch)
320 | if self.args.stacks == 2:
321 | writer.add_scalar('Train_loss_Mid', loss_total_F / cnt, global_step=epoch)
322 | writer.add_scalar('Train_loss_Final', loss_total_M / cnt, global_step=epoch)
323 |
324 | if (epoch+1) % self.args.exponential_decay_step == 0:
325 | my_lr_scheduler.step()
326 | if (epoch + 1) % self.args.validate_freq == 0:
327 | is_best_for_now = False
328 | print('------ validate on data: VALIDATE ------')
329 | performance_metrics = self.validate(self.model, epoch, forecast_loss, valid_loader, self.args.norm_method, val_normalize_statistic,
330 | node_cnt, self.args.window_size, self.args.horizon,
331 | writer, result_file=None, test=False)
332 | test_metrics = self.validate(self.model, epoch, forecast_loss, test_loader, self.args.norm_method, test_normalize_statistic,
333 | node_cnt, self.args.window_size, self.args.horizon,
334 | writer, result_file=None, test=True)
335 | if best_validate_mae > performance_metrics['mae']:
336 | best_validate_mae = performance_metrics['mae']
337 | is_best_for_now = True
338 | validate_score_non_decrease_count = 0
339 | print('got best validation result:',performance_metrics, test_metrics)
340 | else:
341 | validate_score_non_decrease_count += 1
342 | if best_test_mae > test_metrics['mae']:
343 | best_test_mae = test_metrics['mae']
344 | print('got best test result:', test_metrics)
345 |
346 | # save model
347 | if is_best_for_now:
348 | save_model(epoch, lr, model=self.model, model_dir=self.result_file, model_name=self.args.dataset, horizon=self.args.horizon)
349 | print('saved model!')
350 | # early stop
351 | if self.args.early_stop and validate_score_non_decrease_count >= self.args.early_stop_step:
352 | break
353 | return performance_metrics, test_normalize_statistic
354 |
355 | def test(self, epoch=None):
356 | data_file = os.path.join('./datasets/PEMS', self.args.dataset + '.npz')
357 | data = np.load(data_file,allow_pickle=True)
358 | data = data['data'][:,:,0]
359 | train_ratio = self.args.train_length / (self.args.train_length + self.args.valid_length + self.args.test_length)
360 | valid_ratio = self.args.valid_length / (self.args.train_length + self.args.valid_length + self.args.test_length)
361 | test_data = data[int((train_ratio + valid_ratio) * len(data)):]
362 | result_train_file=self.result_train_file
363 | result_test_file=self.result_test_file
364 |
365 | test_mean = np.mean(test_data, axis=0)
366 | test_std = np.std(test_data, axis=0)
367 | normalize_statistic = {"mean": test_mean.tolist(), "std": test_std.tolist()}
368 |
369 | forecast_loss = nn.L1Loss().cuda() #smooth_l1_loss #nn.MSELoss(reduction='mean').cuda()
370 | model = load_model(self.model, self.result_file, model_name=self.args.dataset, horizon=self.args.horizon)
371 | node_cnt = test_data.shape[1]
372 | test_set = ForecastTestDataset(test_data, window_size=self.args.window_size, horizon=self.args.horizon,
373 | normalize_method=self.args.norm_method, norm_statistic=normalize_statistic)
374 | test_loader = DataLoader(test_set, batch_size=self.args.batch_size*10, drop_last=False,
375 | shuffle=False, num_workers=0)
376 | performance_metrics = self.validate(model = model, epoch = 100, forecast_loss = forecast_loss, dataloader = test_loader, normalize_method = self.args.norm_method, statistic = normalize_statistic,
377 | node_cnt = node_cnt, window_size = self.args.window_size, horizon =self.args.horizon,
378 | result_file=result_test_file, writer = None, test=True)
379 | mae, rmse, mape = performance_metrics['mae'], performance_metrics['rmse'], performance_metrics['mape']
380 | print('Performance on test set: | MAE: {:5.2f} | MAPE: {:5.2f} | RMSE: {:5.4f}'.format(mae, mape, rmse))
381 |
--------------------------------------------------------------------------------
/metrics/ETTh_metrics.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | def RSE(pred, true):
4 | return np.sqrt(np.sum((true-pred)**2)) / np.sqrt(np.sum((true-true.mean())**2))
5 |
6 | def CORR(pred, true):
7 | u = ((true-true.mean(0))*(pred-pred.mean(0))).sum(0)
8 | d = np.sqrt(((true-true.mean(0))**2*(pred-pred.mean(0))**2).sum(0))
9 | return (u/d).mean()
10 |
11 | def Corr(pred, true):
12 | sig_p = np.std(pred, axis=0)
13 | sig_g = np.std(true, axis=0)
14 | m_p = pred.mean(0)
15 | m_g = true.mean(0)
16 | ind = (sig_g != 0)
17 | corr = ((pred - m_p) * (true - m_g)).mean(0) / (sig_p * sig_g)
18 | corr = (corr[ind]).mean()
19 | return corr
20 |
21 | def MAE(pred, true):
22 | return np.mean(np.abs(pred-true))
23 |
24 | def MSE(pred, true):
25 | return np.mean((pred-true)**2)
26 |
27 | def RMSE(pred, true):
28 | return np.sqrt(MSE(pred, true))
29 |
30 | def MAPE(pred, true):
31 | return np.mean(np.abs((pred - true) / true))
32 |
33 | def MSPE(pred, true):
34 | return np.mean(np.square((pred - true) / true))
35 |
36 | def metric(pred, true):
37 | mae = MAE(pred, true)
38 | mse = MSE(pred, true)
39 | rmse = RMSE(pred, true)
40 | mape = MAPE(pred, true)
41 | mspe = MSPE(pred, true)
42 | #corr1 = CORR(pred, true)
43 | corr = Corr(pred, true)
44 | return mae,mse,rmse,mape,mspe,corr
45 |
--------------------------------------------------------------------------------
/metrics/Finantial_metics.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | def MAE(pred, true):
3 | return np.mean(np.abs(pred - true))
4 |
5 |
6 | def MSE(pred, true):
7 | return np.mean((pred - true) ** 2)
--------------------------------------------------------------------------------
/models/SCINet.py:
--------------------------------------------------------------------------------
1 |
2 | import math
3 | import torch.nn.functional as F
4 | from torch.autograd import Variable
5 | from torch import nn
6 | import torch
7 | import argparse
8 | import numpy as np
9 |
10 | class Splitting(nn.Module):
11 | def __init__(self):
12 | super(Splitting, self).__init__()
13 |
14 | def even(self, x):
15 | return x[:, ::2, :]
16 |
17 | def odd(self, x):
18 | return x[:, 1::2, :]
19 |
20 | def forward(self, x):
21 | '''Returns the odd and even part'''
22 | return (self.even(x), self.odd(x))
23 |
24 |
25 | class Interactor(nn.Module):
26 | def __init__(self, in_planes, splitting=True,
27 | kernel = 5, dropout=0.5, groups = 1, hidden_size = 1, INN = True):
28 | super(Interactor, self).__init__()
29 | self.modified = INN
30 | self.kernel_size = kernel
31 | self.dilation = 1
32 | self.dropout = dropout
33 | self.hidden_size = hidden_size
34 | self.groups = groups
35 | if self.kernel_size % 2 == 0:
36 | pad_l = self.dilation * (self.kernel_size - 2) // 2 + 1 #by default: stride==1
37 | pad_r = self.dilation * (self.kernel_size) // 2 + 1 #by default: stride==1
38 |
39 | else:
40 | pad_l = self.dilation * (self.kernel_size - 1) // 2 + 1 # we fix the kernel size of the second layer as 3.
41 | pad_r = self.dilation * (self.kernel_size - 1) // 2 + 1
42 | self.splitting = splitting
43 | self.split = Splitting()
44 |
45 | modules_P = []
46 | modules_U = []
47 | modules_psi = []
48 | modules_phi = []
49 | prev_size = 1
50 |
51 | size_hidden = self.hidden_size
52 | modules_P += [
53 | nn.ReplicationPad1d((pad_l, pad_r)),
54 |
55 | nn.Conv1d(in_planes * prev_size, int(in_planes * size_hidden),
56 | kernel_size=self.kernel_size, dilation=self.dilation, stride=1, groups= self.groups),
57 | nn.LeakyReLU(negative_slope=0.01, inplace=True),
58 |
59 | nn.Dropout(self.dropout),
60 | nn.Conv1d(int(in_planes * size_hidden), in_planes,
61 | kernel_size=3, stride=1, groups= self.groups),
62 | nn.Tanh()
63 | ]
64 | modules_U += [
65 | nn.ReplicationPad1d((pad_l, pad_r)),
66 | nn.Conv1d(in_planes * prev_size, int(in_planes * size_hidden),
67 | kernel_size=self.kernel_size, dilation=self.dilation, stride=1, groups= self.groups),
68 | nn.LeakyReLU(negative_slope=0.01, inplace=True),
69 | nn.Dropout(self.dropout),
70 | nn.Conv1d(int(in_planes * size_hidden), in_planes,
71 | kernel_size=3, stride=1, groups= self.groups),
72 | nn.Tanh()
73 | ]
74 |
75 | modules_phi += [
76 | nn.ReplicationPad1d((pad_l, pad_r)),
77 | nn.Conv1d(in_planes * prev_size, int(in_planes * size_hidden),
78 | kernel_size=self.kernel_size, dilation=self.dilation, stride=1, groups= self.groups),
79 | nn.LeakyReLU(negative_slope=0.01, inplace=True),
80 | nn.Dropout(self.dropout),
81 | nn.Conv1d(int(in_planes * size_hidden), in_planes,
82 | kernel_size=3, stride=1, groups= self.groups),
83 | nn.Tanh()
84 | ]
85 | modules_psi += [
86 | nn.ReplicationPad1d((pad_l, pad_r)),
87 | nn.Conv1d(in_planes * prev_size, int(in_planes * size_hidden),
88 | kernel_size=self.kernel_size, dilation=self.dilation, stride=1, groups= self.groups),
89 | nn.LeakyReLU(negative_slope=0.01, inplace=True),
90 | nn.Dropout(self.dropout),
91 | nn.Conv1d(int(in_planes * size_hidden), in_planes,
92 | kernel_size=3, stride=1, groups= self.groups),
93 | nn.Tanh()
94 | ]
95 | self.phi = nn.Sequential(*modules_phi)
96 | self.psi = nn.Sequential(*modules_psi)
97 | self.P = nn.Sequential(*modules_P)
98 | self.U = nn.Sequential(*modules_U)
99 |
100 | def forward(self, x):
101 | if self.splitting:
102 | (x_even, x_odd) = self.split(x)
103 | else:
104 | (x_even, x_odd) = x
105 |
106 | if self.modified:
107 | x_even = x_even.permute(0, 2, 1)
108 | x_odd = x_odd.permute(0, 2, 1)
109 |
110 | d = x_odd.mul(torch.exp(self.phi(x_even)))
111 | c = x_even.mul(torch.exp(self.psi(x_odd)))
112 |
113 | x_even_update = c + self.U(d)
114 | x_odd_update = d - self.P(c)
115 |
116 | return (x_even_update, x_odd_update)
117 |
118 | else:
119 | x_even = x_even.permute(0, 2, 1)
120 | x_odd = x_odd.permute(0, 2, 1)
121 |
122 | d = x_odd - self.P(x_even)
123 | c = x_even + self.U(d)
124 |
125 | return (c, d)
126 |
127 |
128 | class InteractorLevel(nn.Module):
129 | def __init__(self, in_planes, kernel, dropout, groups , hidden_size, INN):
130 | super(InteractorLevel, self).__init__()
131 | self.level = Interactor(in_planes = in_planes, splitting=True,
132 | kernel = kernel, dropout=dropout, groups = groups, hidden_size = hidden_size, INN = INN)
133 |
134 | def forward(self, x):
135 | (x_even_update, x_odd_update) = self.level(x)
136 | return (x_even_update, x_odd_update)
137 |
138 | class LevelSCINet(nn.Module):
139 | def __init__(self,in_planes, kernel_size, dropout, groups, hidden_size, INN):
140 | super(LevelSCINet, self).__init__()
141 | self.interact = InteractorLevel(in_planes= in_planes, kernel = kernel_size, dropout = dropout, groups =groups , hidden_size = hidden_size, INN = INN)
142 |
143 | def forward(self, x):
144 | (x_even_update, x_odd_update) = self.interact(x)
145 | return x_even_update.permute(0, 2, 1), x_odd_update.permute(0, 2, 1) #even: B, T, D odd: B, T, D
146 |
147 | class SCINet_Tree(nn.Module):
148 | def __init__(self, in_planes, current_level, kernel_size, dropout, groups, hidden_size, INN):
149 | super().__init__()
150 | self.current_level = current_level
151 |
152 |
153 | self.workingblock = LevelSCINet(
154 | in_planes = in_planes,
155 | kernel_size = kernel_size,
156 | dropout = dropout,
157 | groups= groups,
158 | hidden_size = hidden_size,
159 | INN = INN)
160 |
161 |
162 | if current_level!=0:
163 | self.SCINet_Tree_odd=SCINet_Tree(in_planes, current_level-1, kernel_size, dropout, groups, hidden_size, INN)
164 | self.SCINet_Tree_even=SCINet_Tree(in_planes, current_level-1, kernel_size, dropout, groups, hidden_size, INN)
165 |
166 | def zip_up_the_pants(self, even, odd):
167 | even = even.permute(1, 0, 2)
168 | odd = odd.permute(1, 0, 2) #L, B, D
169 | even_len = even.shape[0]
170 | odd_len = odd.shape[0]
171 | mlen = min((odd_len, even_len))
172 | _ = []
173 | for i in range(mlen):
174 | _.append(even[i].unsqueeze(0))
175 | _.append(odd[i].unsqueeze(0))
176 | if odd_len < even_len:
177 | _.append(even[-1].unsqueeze(0))
178 | return torch.cat(_,0).permute(1,0,2) #B, L, D
179 |
180 | def forward(self, x):
181 | x_even_update, x_odd_update= self.workingblock(x)
182 | # We recursively reordered these sub-series. You can run the ./utils/recursive_demo.py to emulate this procedure.
183 | if self.current_level ==0:
184 | return self.zip_up_the_pants(x_even_update, x_odd_update)
185 | else:
186 | return self.zip_up_the_pants(self.SCINet_Tree_even(x_even_update), self.SCINet_Tree_odd(x_odd_update))
187 |
188 | class EncoderTree(nn.Module):
189 | def __init__(self, in_planes, num_levels, kernel_size, dropout, groups, hidden_size, INN):
190 | super().__init__()
191 | self.levels=num_levels
192 | self.SCINet_Tree = SCINet_Tree(
193 | in_planes = in_planes,
194 | current_level = num_levels-1,
195 | kernel_size = kernel_size,
196 | dropout =dropout ,
197 | groups = groups,
198 | hidden_size = hidden_size,
199 | INN = INN)
200 |
201 | def forward(self, x):
202 |
203 | x= self.SCINet_Tree(x)
204 |
205 | return x
206 |
207 | class SCINet(nn.Module):
208 | def __init__(self, output_len, input_len, input_dim = 9, hid_size = 1, num_stacks = 1,
209 | num_levels = 3, num_decoder_layer = 1, concat_len = 0, groups = 1, kernel = 5, dropout = 0.5,
210 | single_step_output_One = 0, input_len_seg = 0, positionalE = False, modified = True, RIN=False):
211 | super(SCINet, self).__init__()
212 |
213 | self.input_dim = input_dim
214 | self.input_len = input_len
215 | self.output_len = output_len
216 | self.hidden_size = hid_size
217 | self.num_levels = num_levels
218 | self.groups = groups
219 | self.modified = modified
220 | self.kernel_size = kernel
221 | self.dropout = dropout
222 | self.single_step_output_One = single_step_output_One
223 | self.concat_len = concat_len
224 | self.pe = positionalE
225 | self.RIN=RIN
226 | self.num_decoder_layer = num_decoder_layer
227 |
228 | self.blocks1 = EncoderTree(
229 | in_planes=self.input_dim,
230 | num_levels = self.num_levels,
231 | kernel_size = self.kernel_size,
232 | dropout = self.dropout,
233 | groups = self.groups,
234 | hidden_size = self.hidden_size,
235 | INN = modified)
236 |
237 | if num_stacks == 2: # we only implement two stacks at most.
238 | self.blocks2 = EncoderTree(
239 | in_planes=self.input_dim,
240 | num_levels = self.num_levels,
241 | kernel_size = self.kernel_size,
242 | dropout = self.dropout,
243 | groups = self.groups,
244 | hidden_size = self.hidden_size,
245 | INN = modified)
246 |
247 | self.stacks = num_stacks
248 |
249 | for m in self.modules():
250 | if isinstance(m, nn.Conv2d):
251 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
252 | m.weight.data.normal_(0, math.sqrt(2. / n))
253 | elif isinstance(m, nn.BatchNorm2d):
254 | m.weight.data.fill_(1)
255 | m.bias.data.zero_()
256 | elif isinstance(m, nn.Linear):
257 | m.bias.data.zero_()
258 | self.projection1 = nn.Conv1d(self.input_len, self.output_len, kernel_size=1, stride=1, bias=False)
259 | self.div_projection = nn.ModuleList()
260 | self.overlap_len = self.input_len//4
261 | self.div_len = self.input_len//6
262 |
263 | if self.num_decoder_layer > 1:
264 | self.projection1 = nn.Linear(self.input_len, self.output_len)
265 | for layer_idx in range(self.num_decoder_layer-1):
266 | div_projection = nn.ModuleList()
267 | for i in range(6):
268 | lens = min(i*self.div_len+self.overlap_len,self.input_len) - i*self.div_len
269 | div_projection.append(nn.Linear(lens, self.div_len))
270 | self.div_projection.append(div_projection)
271 |
272 | if self.single_step_output_One: # only output the N_th timestep.
273 | if self.stacks == 2:
274 | if self.concat_len:
275 | self.projection2 = nn.Conv1d(self.concat_len + self.output_len, 1,
276 | kernel_size = 1, bias = False)
277 | else:
278 | self.projection2 = nn.Conv1d(self.input_len + self.output_len, 1,
279 | kernel_size = 1, bias = False)
280 | else: # output the N timesteps.
281 | if self.stacks == 2:
282 | if self.concat_len:
283 | self.projection2 = nn.Conv1d(self.concat_len + self.output_len, self.output_len,
284 | kernel_size = 1, bias = False)
285 | else:
286 | self.projection2 = nn.Conv1d(self.input_len + self.output_len, self.output_len,
287 | kernel_size = 1, bias = False)
288 |
289 | # For positional encoding
290 | self.pe_hidden_size = input_dim
291 | if self.pe_hidden_size % 2 == 1:
292 | self.pe_hidden_size += 1
293 |
294 | num_timescales = self.pe_hidden_size // 2
295 | max_timescale = 10000.0
296 | min_timescale = 1.0
297 |
298 | log_timescale_increment = (
299 | math.log(float(max_timescale) / float(min_timescale)) /
300 | max(num_timescales - 1, 1))
301 | temp = torch.arange(num_timescales, dtype=torch.float32)
302 | inv_timescales = min_timescale * torch.exp(
303 | torch.arange(num_timescales, dtype=torch.float32) *
304 | -log_timescale_increment)
305 | self.register_buffer('inv_timescales', inv_timescales)
306 |
307 | ### RIN Parameters ###
308 | if self.RIN:
309 | self.affine_weight = nn.Parameter(torch.ones(1, 1, input_dim))
310 | self.affine_bias = nn.Parameter(torch.zeros(1, 1, input_dim))
311 |
312 | def get_position_encoding(self, x):
313 | max_length = x.size()[1]
314 | position = torch.arange(max_length, dtype=torch.float32, device=x.device) # tensor([0., 1., 2., 3., 4.], device='cuda:0')
315 | temp1 = position.unsqueeze(1) # 5 1
316 | temp2 = self.inv_timescales.unsqueeze(0) # 1 256
317 | scaled_time = position.unsqueeze(1) * self.inv_timescales.unsqueeze(0) # 5 256
318 | signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1) #[T, C]
319 | signal = F.pad(signal, (0, 0, 0, self.pe_hidden_size % 2))
320 | signal = signal.view(1, max_length, self.pe_hidden_size)
321 |
322 | return signal
323 |
324 | def forward(self, x):
325 | assert self.input_len % (np.power(2, self.num_levels)) == 0 # evenly divided the input length into two parts. (e.g., 32 -> 16 -> 8 -> 4 for 3 levels)
326 | if self.pe:
327 | pe = self.get_position_encoding(x)
328 | if pe.shape[2] > x.shape[2]:
329 | x += pe[:, :, :-1]
330 | else:
331 | x += self.get_position_encoding(x)
332 |
333 | ### activated when RIN flag is set ###
334 | if self.RIN:
335 | print('/// RIN ACTIVATED ///\r',end='')
336 | means = x.mean(1, keepdim=True).detach()
337 | #mean
338 | x = x - means
339 | #var
340 | stdev = torch.sqrt(torch.var(x, dim=1, keepdim=True, unbiased=False) + 1e-5)
341 | x /= stdev
342 | # affine
343 | # print(x.shape,self.affine_weight.shape,self.affine_bias.shape)
344 | x = x * self.affine_weight + self.affine_bias
345 |
346 | # the first stack
347 | res1 = x
348 | x = self.blocks1(x)
349 | x += res1
350 | if self.num_decoder_layer == 1:
351 | x = self.projection1(x)
352 | else:
353 | x = x.permute(0,2,1)
354 | for div_projection in self.div_projection:
355 | output = torch.zeros(x.shape,dtype=x.dtype).cuda()
356 | for i, div_layer in enumerate(div_projection):
357 | div_x = x[:,:,i*self.div_len:min(i*self.div_len+self.overlap_len,self.input_len)]
358 | output[:,:,i*self.div_len:(i+1)*self.div_len] = div_layer(div_x)
359 | x = output
360 | x = self.projection1(x)
361 | x = x.permute(0,2,1)
362 |
363 | if self.stacks == 1:
364 | ### reverse RIN ###
365 | if self.RIN:
366 | x = x - self.affine_bias
367 | x = x / (self.affine_weight + 1e-10)
368 | x = x * stdev
369 | x = x + means
370 |
371 | return x
372 |
373 | elif self.stacks == 2:
374 | MidOutPut = x
375 | if self.concat_len:
376 | x = torch.cat((res1[:, -self.concat_len:,:], x), dim=1)
377 | else:
378 | x = torch.cat((res1, x), dim=1)
379 |
380 | # the second stack
381 | res2 = x
382 | x = self.blocks2(x)
383 | x += res2
384 | x = self.projection2(x)
385 |
386 | ### Reverse RIN ###
387 | if self.RIN:
388 | MidOutPut = MidOutPut - self.affine_bias
389 | MidOutPut = MidOutPut / (self.affine_weight + 1e-10)
390 | MidOutPut = MidOutPut * stdev
391 | MidOutPut = MidOutPut + means
392 |
393 | if self.RIN:
394 | x = x - self.affine_bias
395 | x = x / (self.affine_weight + 1e-10)
396 | x = x * stdev
397 | x = x + means
398 |
399 | return x, MidOutPut
400 |
401 |
402 | def get_variable(x):
403 | x = Variable(x)
404 | return x.cuda() if torch.cuda.is_available() else x
405 |
406 | if __name__ == '__main__':
407 | parser = argparse.ArgumentParser()
408 |
409 | parser.add_argument('--window_size', type=int, default=96)
410 | parser.add_argument('--horizon', type=int, default=12)
411 |
412 | parser.add_argument('--dropout', type=float, default=0.5)
413 | parser.add_argument('--groups', type=int, default=1)
414 |
415 | parser.add_argument('--hidden-size', default=1, type=int, help='hidden channel of module')
416 | parser.add_argument('--INN', default=1, type=int, help='use INN or basic strategy')
417 | parser.add_argument('--kernel', default=3, type=int, help='kernel size')
418 | parser.add_argument('--dilation', default=1, type=int, help='dilation')
419 | parser.add_argument('--positionalEcoding', type=bool, default=True)
420 |
421 | parser.add_argument('--single_step_output_One', type=int, default=0)
422 |
423 | args = parser.parse_args()
424 |
425 | model = SCINet(output_len = args.horizon, input_len= args.window_size, input_dim = 9, hid_size = args.hidden_size, num_stacks = 1,
426 | num_levels = 3, concat_len = 0, groups = args.groups, kernel = args.kernel, dropout = args.dropout,
427 | single_step_output_One = args.single_step_output_One, positionalE = args.positionalEcoding, modified = True).cuda()
428 | x = torch.randn(32, 96, 9).cuda()
429 | y = model(x)
430 | print(y.shape)
431 |
--------------------------------------------------------------------------------
/models/SCINet_decompose.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | import math
4 | import torch.nn.functional as F
5 | from torch.autograd import Variable
6 | from torch import nn
7 | import torch
8 | import argparse
9 | import numpy as np
10 | from models.SCINet import EncoderTree
11 |
12 | class moving_avg(nn.Module):
13 | """
14 | Moving average block to highlight the trend of time series
15 | """
16 | def __init__(self, kernel_size, stride):
17 | super(moving_avg, self).__init__()
18 | self.kernel_size = kernel_size
19 | self.avg = nn.AvgPool1d(kernel_size=kernel_size, stride=stride, padding=0)
20 |
21 | def forward(self, x):
22 | # padding on the both ends of time series
23 | front = x[:, 0:1, :].repeat(1, (self.kernel_size - 1) // 2, 1)
24 | end = x[:, -1:, :].repeat(1, (self.kernel_size - 1) // 2, 1)
25 | x = torch.cat([front, x, end], dim=1)
26 | x = self.avg(x.permute(0, 2, 1))
27 | x = x.permute(0, 2, 1)
28 | return x
29 |
30 |
31 | class series_decomp(nn.Module):
32 | """
33 | Series decomposition block
34 | """
35 | def __init__(self, kernel_size):
36 | super(series_decomp, self).__init__()
37 | self.moving_avg = moving_avg(kernel_size, stride=1)
38 |
39 | def forward(self, x):
40 | moving_mean = self.moving_avg(x)
41 | res = x - moving_mean
42 | return res, moving_mean
43 |
44 | class SCINet_decompose(nn.Module):
45 | def __init__(self, output_len, input_len, input_dim = 9, hid_size = 1, num_stacks = 1,
46 | num_levels = 3, concat_len = 0, groups = 1, kernel = 5, dropout = 0.5,
47 | single_step_output_One = 0, input_len_seg = 0, positionalE = False, modified = True, RIN=False):
48 | super(SCINet_decompose, self).__init__()
49 |
50 | self.input_dim = input_dim
51 | self.input_len = input_len
52 | self.output_len = output_len
53 | self.hidden_size = hid_size
54 | self.num_levels = num_levels
55 | self.groups = groups
56 | self.modified = modified
57 | self.kernel_size = kernel
58 | self.dropout = dropout
59 | self.single_step_output_One = single_step_output_One
60 | self.concat_len = concat_len
61 | self.pe = positionalE
62 | self.RIN=RIN
63 | self.decomp = series_decomp(25)
64 | self.trend = nn.Linear(input_len,input_len)
65 | self.trend_dec = nn.Linear(input_len,output_len)
66 | self.blocks1 = EncoderTree(
67 | in_planes=self.input_dim,
68 | num_levels = self.num_levels,
69 | kernel_size = self.kernel_size,
70 | dropout = self.dropout,
71 | groups = self.groups,
72 | hidden_size = self.hidden_size,
73 | INN = modified)
74 |
75 | if num_stacks == 2: # we only implement two stacks at most.
76 | self.blocks2 = EncoderTree(
77 | in_planes=self.input_dim,
78 | num_levels = self.num_levels,
79 | kernel_size = self.kernel_size,
80 | dropout = self.dropout,
81 | groups = self.groups,
82 | hidden_size = self.hidden_size,
83 | INN = modified)
84 |
85 | self.stacks = num_stacks
86 |
87 | for m in self.modules():
88 | if isinstance(m, nn.Conv2d):
89 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
90 | m.weight.data.normal_(0, math.sqrt(2. / n))
91 | elif isinstance(m, nn.BatchNorm2d):
92 | m.weight.data.fill_(1)
93 | m.bias.data.zero_()
94 | elif isinstance(m, nn.Linear):
95 | m.bias.data.zero_()
96 | self.projection1 = nn.Conv1d(self.input_len, self.output_len, kernel_size=1, stride=1, bias=False)
97 | if self.single_step_output_One: # only output the N_th timestep.
98 | if self.stacks == 2:
99 | if self.concat_len:
100 | self.projection2 = nn.Conv1d(self.concat_len + self.output_len, 1,
101 | kernel_size = 1, bias = False)
102 | else:
103 | self.projection2 = nn.Conv1d(self.input_len + self.output_len, 1,
104 | kernel_size = 1, bias = False)
105 | else: # output the N timesteps.
106 | if self.stacks == 2:
107 | if self.concat_len:
108 | self.projection2 = nn.Conv1d(self.concat_len + self.output_len, self.output_len,
109 | kernel_size = 1, bias = False)
110 | else:
111 | self.projection2 = nn.Conv1d(self.input_len + self.output_len, self.output_len,
112 | kernel_size = 1, bias = False)
113 |
114 | # For positional encoding
115 | self.pe_hidden_size = input_dim
116 | if self.pe_hidden_size % 2 == 1:
117 | self.pe_hidden_size += 1
118 |
119 | num_timescales = self.pe_hidden_size // 2
120 | max_timescale = 10000.0
121 | min_timescale = 1.0
122 |
123 | log_timescale_increment = (
124 | math.log(float(max_timescale) / float(min_timescale)) /
125 | max(num_timescales - 1, 1))
126 | temp = torch.arange(num_timescales, dtype=torch.float32)
127 | inv_timescales = min_timescale * torch.exp(
128 | torch.arange(num_timescales, dtype=torch.float32) *
129 | -log_timescale_increment)
130 | self.register_buffer('inv_timescales', inv_timescales)
131 |
132 | ### RIN Parameters ###
133 | if self.RIN:
134 | self.affine_weight = nn.Parameter(torch.ones(1, 1, input_dim))
135 | self.affine_bias = nn.Parameter(torch.zeros(1, 1, input_dim))
136 | self.affine_weight2 = nn.Parameter(torch.ones(1, 1, input_dim))
137 | self.affine_bias2 = nn.Parameter(torch.zeros(1, 1, input_dim))
138 |
139 | def get_position_encoding(self, x):
140 | max_length = x.size()[1]
141 | position = torch.arange(max_length, dtype=torch.float32, device=x.device) # tensor([0., 1., 2., 3., 4.], device='cuda:0')
142 | temp1 = position.unsqueeze(1) # 5 1
143 | temp2 = self.inv_timescales.unsqueeze(0) # 1 256
144 | scaled_time = position.unsqueeze(1) * self.inv_timescales.unsqueeze(0) # 5 256
145 | signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1) #[T, C]
146 | signal = F.pad(signal, (0, 0, 0, self.pe_hidden_size % 2))
147 | signal = signal.view(1, max_length, self.pe_hidden_size)
148 |
149 | return signal
150 |
151 | def forward(self, x):
152 | assert self.input_len % (np.power(2, self.num_levels)) == 0 # evenly divided the input length into two parts. (e.g., 32 -> 16 -> 8 -> 4 for 3 levels)
153 | x, trend = self.decomp(x)
154 |
155 | if self.RIN:
156 | means = x.mean(1, keepdim=True).detach()
157 | x = x - means
158 | stdev = torch.sqrt(torch.var(x, dim=1, keepdim=True, unbiased=False) + 1e-5)
159 | x /= stdev
160 | # seq_means = x[:,-1,:].unsqueeze(1).repeat(1,self.input_len,1).detach()
161 | # pred_means = x[:,-1,:].unsqueeze(1).repeat(1,self.output_len,1).detach()
162 | # x = x - seq_means
163 | x = x * self.affine_weight + self.affine_bias
164 |
165 | # print('/// RIN ACTIVATED ///\r',end='')
166 | means2 = trend.mean(1, keepdim=True).detach()
167 | trend = trend - means2
168 | stdev2 = torch.sqrt(torch.var(trend, dim=1, keepdim=True, unbiased=False) + 1e-5)
169 | trend /= stdev2
170 | # seq_means2 = trend[:,-1,:].unsqueeze(1).repeat(1,self.input_len,1).detach()
171 | # pred_means2 = trend[:,-1,:].unsqueeze(1).repeat(1,self.output_len,1).detach()
172 | # trend = trend - seq_means2
173 | trend = trend * self.affine_weight2 + self.affine_bias2
174 |
175 |
176 | if self.pe:
177 | pe = self.get_position_encoding(x)
178 | if pe.shape[2] > x.shape[2]:
179 | x = x + pe[:, :, :-1]
180 | else:
181 | x = x + self.get_position_encoding(x)
182 |
183 | ### activated when RIN flag is set ###
184 |
185 |
186 | # the first stack
187 | res1 = x
188 | x = self.blocks1(x)
189 | x = self.projection1(x)
190 |
191 | trend = trend.permute(0,2,1)
192 | trend = self.trend(trend)
193 | trend = self.trend_dec(trend).permute(0,2,1)
194 |
195 | if self.stacks == 1:
196 | ### reverse RIN ###
197 | if self.RIN:
198 | x = x - self.affine_bias
199 | x = x / (self.affine_weight + 1e-10)
200 | # x = x + pred_means
201 | x = x * stdev
202 | x = x + means
203 |
204 | trend = trend - self.affine_bias2
205 | trend = trend / (self.affine_weight2 + 1e-10)
206 | # trend = trend + pred_means2
207 | trend = trend * stdev2
208 | trend = trend + means2
209 |
210 | return x + trend
211 |
212 | elif self.stacks == 2:
213 | MidOutPut = x
214 | if self.concat_len:
215 | x = torch.cat((res1[:, -self.concat_len:,:], x), dim=1)
216 | else:
217 | x = torch.cat((res1, x), dim=1)
218 |
219 | # the second stack
220 | x = self.blocks2(x)
221 | x = self.projection2(x)
222 |
223 | ### Reverse RIN ###
224 | if self.RIN:
225 | MidOutPut = MidOutPut - self.affine_bias
226 | MidOutPut = MidOutPut / (self.affine_weight + 1e-10)
227 | MidOutPut = MidOutPut * stdev
228 | MidOutPut = MidOutPut + means
229 |
230 | x = x - self.affine_bias
231 | x = x / (self.affine_weight + 1e-10)
232 | x = x * stdev
233 | x = x + means
234 |
235 | trend = trend - self.affine_bias2
236 | trend = trend / (self.affine_weight2 + 1e-10)
237 | # trend = trend + pred_means2
238 | trend = trend * stdev2
239 | trend = trend + means2
240 |
241 | return x + trend, MidOutPut
242 |
243 |
244 | def get_variable(x):
245 | x = Variable(x)
246 | return x.cuda() if torch.cuda.is_available() else x
247 |
248 | if __name__ == '__main__':
249 | parser = argparse.ArgumentParser()
250 |
251 | parser.add_argument('--window_size', type=int, default=96)
252 | parser.add_argument('--horizon', type=int, default=12)
253 |
254 | parser.add_argument('--dropout', type=float, default=0.5)
255 | parser.add_argument('--groups', type=int, default=1)
256 |
257 | parser.add_argument('--hidden-size', default=1, type=int, help='hidden channel of module')
258 | parser.add_argument('--INN', default=1, type=int, help='use INN or basic strategy')
259 | parser.add_argument('--kernel', default=3, type=int, help='kernel size')
260 | parser.add_argument('--dilation', default=1, type=int, help='dilation')
261 | parser.add_argument('--positionalEcoding', type=bool, default=True)
262 |
263 | parser.add_argument('--single_step_output_One', type=int, default=0)
264 |
265 | args = parser.parse_args()
266 |
267 | model = SCINet_decompose(output_len = args.horizon, input_len= args.window_size, input_dim = 9, hid_size = args.hidden_size, num_stacks = 1,
268 | num_levels = 3, concat_len = 0, groups = args.groups, kernel = args.kernel, dropout = args.dropout,
269 | single_step_output_One = args.single_step_output_One, positionalE = args.positionalEcoding, modified = True).cuda()
270 | x = torch.randn(32, 96, 9).cuda()
271 | y = model(x)
272 | print(y.shape)
273 |
--------------------------------------------------------------------------------
/models/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cure-lab/SCINet/02e6b0af2d58243de09aaa1eac3840237b659847/models/__init__.py
--------------------------------------------------------------------------------
/plot.py:
--------------------------------------------------------------------------------
1 | import matplotlib
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 |
5 | # first save the forecasted output on the testset in the inference stage
6 | # then load them as follows:
7 | gt = np.load('results/modelTCN_Levels4_hid32_dataETTh1_ftM_sl96_ll48_pl48_lr0.009_bs16_hid4.0_s1_l3_dp0.25_invFalse_itr0_ind0/true_scale.npy', allow_pickle=True)[:2810]
8 | pred_tcn = np.load('results/TCN_Levels4_hid32_dataETTh1_ftM_sl96_ll48_pl48_lr0.009_bs16_hid4.0_s1_l3_dp0.25_invFalse_itr0_ind0/pred_scale.npy', allow_pickle=True)[:2810]
9 | pred_informer = np.load('results/informer_ETTh1_ftM_sl96_ll48_pl48_dm512_nh8_el2_dl1_df2048_atprob_fc5_ebtimeF_dtTrue_mxTrue_Exp_0/pred.npy', allow_pickle=True)[:2810]
10 | pred_scinet = np.load('results/SCINET_ETTh1_ftM_sl96_ll48_pl48_mxTrue_test_lr0.009_bs16_hid4.0_s1_l3_dp0.25_invFalse_itr0/pred.npy', allow_pickle=True)[:2810]
11 |
12 | index = 1388
13 | for i in range(0,7):
14 | # plot 7 variates in ETTh1 dataset
15 | fig = plt.figure(figsize=(8,6))
16 | plt.title('The prediction results of Informer on ETTh1 with In 96 Out 48 Setting')
17 | plt.plot(pred_informer[index,:,i],color=(168/255, 218/255, 220/255), marker='v',label = 'Informer')
18 | plt.plot(pred_tcn[index,:,i],color=(69/255, 123/255, 157/255),marker='v', label = 'TCN')
19 | plt.plot(pred_scinet[index,:,i],color=(218/255, 85/255, 82/255), marker='v',label = 'SCINet')
20 | plt.plot(gt[index,:,i],color=(167/255, 30/255, 52/255), marker='o', label = 'Ground Truth')
21 | plt.xticks(fontsize=14)
22 | plt.yticks(fontsize=14)
23 | plt.legend(loc=9,fontsize=14,ncol=4)
24 | plt.grid(True)
25 | plt.xlabel('Future Time Steps', fontsize=14)
26 | plt.ylabel('Prediction Results', fontsize=14)
27 | plt.tight_layout()
28 | plt.savefig('ETTh1_M_i96o48_denorm_i{}'.format(i),dpi=300)
29 | plt.close()
30 |
--------------------------------------------------------------------------------
/prepare_data.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | pip install gdown
3 |
4 | mkdir -p datasets
5 | cd datasets
6 |
7 | mkdir -p PEMS
8 | cd PEMS
9 |
10 | gdown "https://drive.google.com/uc?id=1UQ-Mz-AJLieia-woSMtx8aW6igxPDDxL"
11 | gdown "https://drive.google.com/uc?id=1mjM4xCf2GKWM5w6sCGuiK-o-RbRAZGMF"
12 | gdown "https://drive.google.com/uc?id=1tekQqMPFjtT0I4JyV-SgToLn4K0KxNdf"
13 | gdown "https://drive.google.com/uc?id=1IqbRJYuvxIwuaK1jpXvzrsDa7LxoD9zL"
14 |
15 | cd ..
16 |
17 | mkdir -p financial
18 | cd financial
19 |
20 | gdown "https://drive.google.com/uc?id=1ttSg9i3bzTI77oVoUU-odi67_NVbzFBx"
21 | gdown "https://drive.google.com/uc?id=1zSKR2tORND40BBheWgtoCwgx1pTNntXM"
22 | gdown "https://drive.google.com/uc?id=1MGIl1Aqnek0rPoPyqgS_Wzo5eQgIjihh"
23 | gdown "https://drive.google.com/uc?id=1bp9J5PeA4lbj1wPXa4oxDX-vXuG_UyNz"
24 |
25 | cd ..
26 |
27 | mkdir -p ETT-data
28 | cd ETT-data
29 | gdown "https://drive.google.com/uc?id=10D9h6dVrlXknwYgYdnct8OfIaCRzIQXD"
30 | gdown "https://drive.google.com/uc?id=18S5BrHOLrgqmTba2pOLWNxldIT9hrEGd"
31 | gdown "https://drive.google.com/uc?id=1bxBD_uN1Gt3Tyn8Vb71ciAYyIbL4sZl1"
32 | cd ..
33 |
34 |
35 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy==1.19.4
2 | torch==1.8.0
3 | pandas
4 | scikit_learn
5 | tensorboard
6 |
--------------------------------------------------------------------------------
/run_ETTh.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import torch
4 | import numpy as np
5 | from torch.utils.tensorboard import SummaryWriter
6 | os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
7 | from experiments.exp_ETTh import Exp_ETTh
8 |
9 | parser = argparse.ArgumentParser(description='SCINet on ETT dataset')
10 |
11 |
12 | parser.add_argument('--model', type=str, required=False, default='SCINet', help='model of the experiment')
13 | ### ------- dataset settings --------------
14 | parser.add_argument('--data', type=str, required=False, default='ETTh1', choices=['ETTh1', 'ETTh2', 'ETTm1'], help='name of dataset')
15 | parser.add_argument('--root_path', type=str, default='./datasets/ETT-data/', help='root path of the data file')
16 | parser.add_argument('--data_path', type=str, default='ETTh1.csv', help='location of the data file')
17 | parser.add_argument('--features', type=str, default='M', choices=['S', 'M'], help='features S is univariate, M is multivariate')
18 | parser.add_argument('--target', type=str, default='OT', help='target feature')
19 | parser.add_argument('--freq', type=str, default='h', help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h')
20 | parser.add_argument('--checkpoints', type=str, default='exp/ETT_checkpoints/', help='location of model checkpoints')
21 | parser.add_argument('--inverse', type=bool, default =False, help='denorm the output data')
22 | parser.add_argument('--embed', type=str, default='timeF', help='time features encoding, options:[timeF, fixed, learned]')
23 |
24 |
25 | ### ------- device settings --------------
26 | parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu')
27 | parser.add_argument('--gpu', type=int, default=0, help='gpu')
28 | parser.add_argument('--use_multi_gpu', action='store_true', help='use multiple gpus', default=False)
29 | parser.add_argument('--devices', type=str, default='0',help='device ids of multile gpus')
30 |
31 | ### ------- input/output length settings --------------
32 | parser.add_argument('--seq_len', type=int, default=96, help='input sequence length of SCINet encoder, look back window')
33 | parser.add_argument('--label_len', type=int, default=48, help='start token length of Informer decoder')
34 | parser.add_argument('--pred_len', type=int, default=48, help='prediction sequence length, horizon')
35 | parser.add_argument('--concat_len', type=int, default=0)
36 | parser.add_argument('--single_step', type=int, default=0)
37 | parser.add_argument('--single_step_output_One', type=int, default=0)
38 | parser.add_argument('--lastWeight', type=float, default=1.0)
39 |
40 | ### ------- training settings --------------
41 | parser.add_argument('--cols', type=str, nargs='+', help='file list')
42 | parser.add_argument('--num_workers', type=int, default=0, help='data loader num workers')
43 | parser.add_argument('--itr', type=int, default=0, help='experiments times')
44 | parser.add_argument('--train_epochs', type=int, default=100, help='train epochs')
45 | parser.add_argument('--batch_size', type=int, default=32, help='batch size of train input data')
46 | parser.add_argument('--patience', type=int, default=5, help='early stopping patience')
47 | parser.add_argument('--lr', type=float, default=0.0001, help='optimizer learning rate')
48 | parser.add_argument('--loss', type=str, default='mae',help='loss function')
49 | parser.add_argument('--lradj', type=int, default=1,help='adjust learning rate')
50 | parser.add_argument('--use_amp', action='store_true', help='use automatic mixed precision training', default=False)
51 | parser.add_argument('--save', type=bool, default =False, help='save the output results')
52 | parser.add_argument('--model_name', type=str, default='SCINet')
53 | parser.add_argument('--resume', type=bool, default=False)
54 | parser.add_argument('--evaluate', type=bool, default=False)
55 |
56 | ### ------- model settings --------------
57 | parser.add_argument('--hidden-size', default=1, type=float, help='hidden channel of module')
58 | parser.add_argument('--INN', default=1, type=int, help='use INN or basic strategy')
59 | parser.add_argument('--kernel', default=5, type=int, help='kernel size, 3, 5, 7')
60 | parser.add_argument('--dilation', default=1, type=int, help='dilation')
61 | parser.add_argument('--window_size', default=12, type=int, help='input size')
62 | parser.add_argument('--dropout', type=float, default=0.5, help='dropout')
63 | parser.add_argument('--positionalEcoding', type=bool, default=False)
64 | parser.add_argument('--groups', type=int, default=1)
65 | parser.add_argument('--levels', type=int, default=3)
66 | parser.add_argument('--stacks', type=int, default=1, help='1 stack or 2 stacks')
67 | parser.add_argument('--num_decoder_layer', type=int, default=1)
68 | parser.add_argument('--RIN', type=bool, default=False)
69 | parser.add_argument('--decompose', type=bool,default=False)
70 |
71 | args = parser.parse_args()
72 |
73 | args.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False
74 |
75 | if args.use_gpu and args.use_multi_gpu:
76 | args.devices = args.devices.replace(' ', '')
77 | device_ids = args.devices.split(',')
78 | args.device_ids = [int(id_) for id_ in device_ids]
79 | args.gpu = args.device_ids[0]
80 |
81 | data_parser = {
82 | 'ETTh1': {'data': 'ETTh1.csv', 'T': 'OT', 'M': [7, 7, 7], 'S': [1, 1, 1], 'MS': [7, 7, 1]},
83 | 'ETTh2': {'data': 'ETTh2.csv', 'T': 'OT', 'M': [7, 7, 7], 'S': [1, 1, 1], 'MS': [7, 7, 1]},
84 | 'ETTm1': {'data': 'ETTm1.csv', 'T': 'OT', 'M': [7, 7, 7], 'S': [1, 1, 1], 'MS': [7, 7, 1]},
85 | 'ETTm2': {'data': 'ETTm2.csv', 'T': 'OT', 'M': [7, 7, 7], 'S': [1, 1, 1], 'MS': [7, 7, 1]},
86 | 'WTH': {'data': 'WTH.csv', 'T': 'WetBulbCelsius', 'M': [12, 12, 12], 'S': [1, 1, 1], 'MS': [12, 12, 1]},
87 | 'ECL': {'data': 'ECL.csv', 'T': 'MT_320', 'M': [321, 321, 321], 'S': [1, 1, 1], 'MS': [321, 321, 1]},
88 | 'Solar': {'data': 'solar_AL.csv', 'T': 'POWER_136', 'M': [137, 137, 137], 'S': [1, 1, 1], 'MS': [137, 137, 1]},
89 | }
90 | if args.data in data_parser.keys():
91 | data_info = data_parser[args.data]
92 | args.data_path = data_info['data']
93 | args.target = data_info['T']
94 | args.enc_in, args.dec_in, args.c_out = data_info[args.features]
95 |
96 | args.detail_freq = args.freq
97 | args.freq = args.freq[-1:]
98 |
99 | print('Args in experiment:')
100 | print(args)
101 |
102 | torch.manual_seed(4321) # reproducible
103 | torch.cuda.manual_seed_all(4321)
104 | torch.backends.cudnn.benchmark = False
105 | torch.backends.cudnn.deterministic = True # Can change it to False --> default: False
106 | torch.backends.cudnn.enabled = True
107 |
108 | Exp = Exp_ETTh
109 |
110 | mae_ = []
111 | maes_ = []
112 | mse_ = []
113 | mses_ = []
114 |
115 | if args.evaluate:
116 | setting = '{}_{}_ft{}_sl{}_ll{}_pl{}_lr{}_bs{}_hid{}_s{}_l{}_dp{}_inv{}_itr0'.format(args.model,args.data, args.features, args.seq_len, args.label_len, args.pred_len,args.lr,args.batch_size,args.hidden_size,args.stacks, args.levels,args.dropout,args.inverse)
117 | exp = Exp(args) # set experiments
118 | print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))
119 | mae, maes, mse, mses = exp.test(setting, evaluate=True)
120 | print('Final mean normed mse:{:.4f},mae:{:.4f},denormed mse:{:.4f},mae:{:.4f}'.format(mse, mae, mses, maes))
121 | else:
122 | if args.itr:
123 | for ii in range(args.itr):
124 | # setting record of experiments
125 | setting = '{}_{}_ft{}_sl{}_ll{}_pl{}_lr{}_bs{}_hid{}_s{}_l{}_dp{}_inv{}_itr{}'.format(args.model,args.data, args.features, args.seq_len, args.label_len, args.pred_len,args.lr,args.batch_size,args.hidden_size,args.stacks, args.levels,args.dropout,args.inverse,ii)
126 |
127 | exp = Exp(args) # set experiments
128 | print('>>>>>>>start training : {}>>>>>>>>>>>>>>>>>>>>>>>>>>'.format(setting))
129 | exp.train(setting)
130 |
131 | print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))
132 | mae, maes, mse, mses = exp.test(setting)
133 | mae_.append(mae)
134 | mse_.append(mse)
135 | maes_.append(maes)
136 | mses_.append(mses)
137 |
138 | torch.cuda.empty_cache()
139 | print('Final mean normed mse:{:.4f}, std mse:{:.4f}, mae:{:.4f}, std mae:{:.4f}'.format(np.mean(mse_), np.std(mse_), np.mean(mae_),np.std(mae_)))
140 | print('Final mean denormed mse:{:.4f}, std mse:{:.4f}, mae:{:.4f}, std mae:{:.4f}'.format(np.mean(mses_),np.std(mses_), np.mean(maes_), np.std(maes_)))
141 | print('Final min normed mse:{:.4f}, mae:{:.4f}'.format(min(mse_), min(mae_)))
142 | print('Final min denormed mse:{:.4f}, mae:{:.4f}'.format(min(mses_), min(maes_)))
143 | else:
144 | setting = '{}_{}_ft{}_sl{}_ll{}_pl{}_lr{}_bs{}_hid{}_s{}_l{}_dp{}_inv{}_itr0'.format(args.model,args.data, args.features, args.seq_len, args.label_len, args.pred_len,args.lr,args.batch_size,args.hidden_size,args.stacks, args.levels,args.dropout,args.inverse)
145 | exp = Exp(args) # set experiments
146 | print('>>>>>>>start training : {}>>>>>>>>>>>>>>>>>>>>>>>>>>'.format(setting))
147 | exp.train(setting)
148 |
149 | print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))
150 | mae, maes, mse, mses = exp.test(setting)
151 | print('Final mean normed mse:{:.4f},mae:{:.4f},denormed mse:{:.4f},mae:{:.4f}'.format(mse, mae, mses, maes))
152 |
153 |
154 |
155 |
--------------------------------------------------------------------------------
/run_financial.py:
--------------------------------------------------------------------------------
1 | import os
2 | import torch
3 | from datetime import datetime
4 | from experiments.exp_financial import Exp_financial
5 | import argparse
6 | import pandas as pd
7 | import numpy as np
8 | from torch.utils.tensorboard import SummaryWriter
9 |
10 | parser = argparse.ArgumentParser(description='SCINet on financial datasets')
11 | ### ------- dataset settings --------------
12 | parser.add_argument('--dataset_name', type=str, default='exchange_rate', choices=['electricity', 'solar_AL', 'exchange_rate', 'traffic'])
13 | parser.add_argument('--data', type=str, default='./datasets/exchange_rate.txt',
14 | help='location of the data file')
15 | parser.add_argument('--normalize', type=int, default=2)
16 |
17 | ### ------- device settings --------------
18 | parser.add_argument('--device',type=str,default='cuda:0',help='')
19 | parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu')
20 | parser.add_argument('--use_multi_gpu', action='store_true', help='use multiple gpus', default=False)
21 | parser.add_argument('--gpu', type=int, default=0, help='gpu')
22 |
23 | ### ------- input/output length settings --------------
24 | parser.add_argument('--window_size', type=int, default=168, help='input length')
25 | parser.add_argument('--horizon', type=int, default=3, help='prediction length')
26 | parser.add_argument('--concat_len', type=int, default=165)
27 | parser.add_argument('--single_step', type=int, default=0, help='only supervise the final setp')
28 | parser.add_argument('--single_step_output_One', type=int, default=0, help='only output the single final step')
29 | parser.add_argument('--lastWeight', type=float, default=1.0,help='Loss weight lambda on the final step')
30 |
31 | ### ------- training settings --------------
32 | parser.add_argument('--train', type=bool, default=True)
33 | parser.add_argument('--resume', type=bool, default=False)
34 | parser.add_argument('--evaluate', type=bool, default=False)
35 | parser.add_argument('--log_interval', type=int, default=2000, metavar='N',
36 | help='report interval')
37 | parser.add_argument('--save', type=str, default='model/model.pt',
38 | help='path to save the final model')
39 | parser.add_argument('--optim', type=str, default='adam')
40 | parser.add_argument('--L1Loss', type=bool, default=True)
41 | parser.add_argument('--num_nodes',type=int,default=8,help='number of nodes/variables')
42 | parser.add_argument('--batch_size',type=int,default=8,help='batch size')
43 | parser.add_argument('--lr',type=float,default=5e-3,help='learning rate')
44 | parser.add_argument('--weight_decay',type=float,default=0.00001,help='weight decay rate')
45 | parser.add_argument('--epochs',type=int,default=100,help='')
46 | parser.add_argument('--lradj', type=int, default=1,help='adjust learning rate')
47 | parser.add_argument('--save_path', type=str, default='exp/financial_checkpoints/')
48 | parser.add_argument('--model_name', type=str, default='SCINet')
49 |
50 | ### ------- model settings --------------
51 | parser.add_argument('--hidden-size', default=1.0, type=float, help='hidden channel of module')# H, EXPANSION RATE
52 | parser.add_argument('--INN', default=1, type=int, help='use INN or basic strategy')
53 | parser.add_argument('--kernel', default=5, type=int, help='kernel size')#k kernel size
54 | parser.add_argument('--dilation', default=1, type=int, help='dilation')
55 | parser.add_argument('--positionalEcoding', type = bool , default=False)
56 | parser.add_argument('--dropout', type=float, default=0.5)
57 | parser.add_argument('--groups', type=int, default=1)
58 | parser.add_argument('--levels', type=int, default=3)
59 | parser.add_argument('--num_decoder_layer', type=int, default=1)
60 | parser.add_argument('--stacks', type=int, default=1)
61 | parser.add_argument('--long_term_forecast', action='store_true', default=False)
62 | parser.add_argument('--RIN', type=bool, default=False)
63 | parser.add_argument('--decompose', type=bool,default=False)
64 |
65 | args = parser.parse_args()
66 |
67 | if not args.long_term_forecast:
68 | args.concat_len = args.window_size - args.horizon
69 |
70 | if __name__ == '__main__':
71 |
72 | torch.manual_seed(4321) # reproducible
73 | torch.cuda.manual_seed_all(4321)
74 | torch.backends.cudnn.benchmark = False
75 | torch.backends.cudnn.deterministic = True # Can change it to False --> default: False
76 | torch.backends.cudnn.enabled = True
77 |
78 | Exp=Exp_financial
79 | exp=Exp(args)
80 |
81 | if args.evaluate:
82 | data=exp._get_data()
83 | before_evaluation = datetime.now().timestamp()
84 | if args.stacks == 1:
85 | rse, rae, correlation = exp.validate(data,data.test[0],data.test[1], evaluate=True)
86 | else:
87 | rse, rae, correlation,rse_mid, rae_mid, correlation_mid = exp.validate(data,data.test[0],data.test[1], evaluate=True)
88 | after_evaluation = datetime.now().timestamp()
89 | print(f'Evaluation took {(after_evaluation - before_evaluation) / 60} minutes')
90 |
91 | elif args.train or args.resume:
92 | data=exp._get_data()
93 | before_train = datetime.now().timestamp()
94 | print("===================Normal-Start=========================")
95 | normalize_statistic = exp.train()
96 | after_train = datetime.now().timestamp()
97 | print(f'Training took {(after_train - before_train) / 60} minutes')
98 | print("===================Normal-End=========================")
99 | exp.validate(data,data.test[0],data.test[1], evaluate=True)
100 |
101 |
102 |
103 |
104 |
105 |
106 |
--------------------------------------------------------------------------------
/run_pems.py:
--------------------------------------------------------------------------------
1 | import os
2 | import torch
3 | from datetime import datetime
4 | from experiments.exp_pems import Exp_pems
5 | import argparse
6 | import pandas as pd
7 | import numpy as np
8 | from torch.utils.tensorboard import SummaryWriter
9 |
10 | parser = argparse.ArgumentParser(description='SCINet on pems datasets')
11 |
12 | ### ------- dataset settings --------------
13 | parser.add_argument('--dataset', type=str, default='PEMS08', choices=['PEMS03', 'PEMS04', 'PEMS07', 'PEMS08']) #sometimes use: PeMS08
14 | parser.add_argument('--norm_method', type=str, default='z_score')
15 | parser.add_argument('--normtype', type=int, default=0)
16 |
17 | ### ------- device settings --------------
18 | parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu')
19 | parser.add_argument('--use_multi_gpu', action='store_true', help='use multiple gpus', default=False)
20 | parser.add_argument('--gpu', type=int, default=0, help='gpu')
21 | parser.add_argument('--device', type=str, default='cuda:0')
22 |
23 | ### ------- input/output length settings --------------
24 | parser.add_argument('--window_size', type=int, default=12)
25 | parser.add_argument('--horizon', type=int, default=12)
26 | parser.add_argument('--concat_len', type=int, default=0)
27 | parser.add_argument('--single_step_output_One', type=int, default=0)
28 |
29 | parser.add_argument('--train_length', type=float, default=6)
30 | parser.add_argument('--valid_length', type=float, default=2)
31 | parser.add_argument('--test_length', type=float, default=2)
32 |
33 | ### ------- training settings --------------
34 | parser.add_argument('--train', type=bool, default=True)
35 | parser.add_argument('--resume', type=bool, default=False)
36 | parser.add_argument('--evaluate', type=bool, default=False)
37 | parser.add_argument('--finetune', type=bool, default=False)
38 | parser.add_argument('--validate_freq', type=int, default=1)
39 |
40 | parser.add_argument('--epoch', type=int, default=80)
41 | parser.add_argument('--lr', type=float, default=0.001)
42 | parser.add_argument('--batch_size', type=int, default=8)
43 | parser.add_argument('--optimizer', type=str, default='N') #
44 | parser.add_argument('--early_stop', type=bool, default=False)
45 | parser.add_argument('--exponential_decay_step', type=int, default=5)
46 | parser.add_argument('--decay_rate', type=float, default=0.5)
47 |
48 | parser.add_argument('--lradj', type=int, default=1,help='adjust learning rate')
49 | parser.add_argument('--weight_decay', type=float, default=1e-5)
50 | parser.add_argument('--model_name', type=str, default='SCINet')
51 |
52 | ### ------- model settings --------------
53 | parser.add_argument('--hidden-size', default=0.0625, type=float, help='hidden channel scale of module')
54 | parser.add_argument('--INN', default=1, type=int, help='use INN or basic strategy')
55 | parser.add_argument('--kernel', default=5, type=int, help='kernel size for the first layer')
56 | parser.add_argument('--dilation', default=1, type=int, help='dilation')
57 | parser.add_argument('--positionalEcoding', type=bool , default = True)
58 | parser.add_argument('--groups', type=int, default=1)
59 | parser.add_argument('--levels', type=int, default=2)
60 | parser.add_argument('--stacks', type=int, default=1)
61 | parser.add_argument('--dropout', type=float, default=0.5)
62 | parser.add_argument('--num_decoder_layer', type=int, default=1)
63 | parser.add_argument('--RIN', type=bool, default=False)
64 | parser.add_argument('--decompose', type=bool,default=False)
65 |
66 |
67 | args = parser.parse_args()
68 |
69 | if __name__ == '__main__':
70 |
71 | torch.manual_seed(4321) # reproducible
72 | torch.cuda.manual_seed_all(4321)
73 | torch.backends.cudnn.benchmark = False
74 | torch.backends.cudnn.deterministic = True # Can change it to False --> default: False
75 | torch.backends.cudnn.enabled = True
76 |
77 | Exp=Exp_pems
78 | exp=Exp(args)
79 |
80 | if args.evaluate:
81 | before_evaluation = datetime.now().timestamp()
82 | exp.test()
83 | after_evaluation = datetime.now().timestamp()
84 | print(f'Evaluation took {(after_evaluation - before_evaluation) / 60} minutes')
85 | elif args.train or args.resume:
86 | before_train = datetime.now().timestamp()
87 | print("===================Normal-Start=========================")
88 | _, normalize_statistic = exp.train()
89 | after_train = datetime.now().timestamp()
90 | print(f'Training took {(after_train - before_train) / 60} minutes')
91 | print("===================Normal-End=========================")
92 |
93 |
--------------------------------------------------------------------------------
/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cure-lab/SCINet/02e6b0af2d58243de09aaa1eac3840237b659847/utils/__init__.py
--------------------------------------------------------------------------------
/utils/histogram.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 2,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import numpy as np\n",
10 | "import os\n",
11 | "import seaborn as sns"
12 | ]
13 | },
14 | {
15 | "cell_type": "code",
16 | "execution_count": 4,
17 | "metadata": {},
18 | "outputs": [
19 | {
20 | "name": "stdout",
21 | "output_type": "stream",
22 | "text": [
23 | "data file: ../datasets/PEMS/PEMS08.npz\n"
24 | ]
25 | }
26 | ],
27 | "source": [
28 | "data_file = '../datasets/PEMS/PEMS08.npz'\n",
29 | "print('data file:',data_file)\n",
30 | "data = np.load(data_file,allow_pickle=True)\n",
31 | "data = data['data'][:,:,0]\n"
32 | ]
33 | },
34 | {
35 | "cell_type": "code",
36 | "execution_count": 8,
37 | "metadata": {},
38 | "outputs": [],
39 | "source": [
40 | "with open('./financial/electricity.txt','r') as f:\n",
41 | " data = np.loadtxt(f, delimiter=',')"
42 | ]
43 | },
44 | {
45 | "cell_type": "code",
46 | "execution_count": 5,
47 | "metadata": {},
48 | "outputs": [],
49 | "source": [
50 | "train_ratio = 6 / (6 + 2 + 2)\n",
51 | "valid_ratio = 2 / (6 + 2 + 2)\n",
52 | "test_ratio = 1 - train_ratio - valid_ratio\n",
53 | "train_data = data[:int(train_ratio * len(data))]\n",
54 | "valid_data = data[int(train_ratio * len(data)):int((train_ratio + valid_ratio) * len(data))]\n",
55 | "test_data = data[int((train_ratio + valid_ratio) * len(data)):]"
56 | ]
57 | },
58 | {
59 | "cell_type": "code",
60 | "execution_count": 6,
61 | "metadata": {},
62 | "outputs": [
63 | {
64 | "data": {
65 | "text/plain": [
66 | "array([435., 424., 445., ..., 120., 102., 102.])"
67 | ]
68 | },
69 | "execution_count": 6,
70 | "metadata": {},
71 | "output_type": "execute_result"
72 | }
73 | ],
74 | "source": [
75 | "test_data[:,0]\n"
76 | ]
77 | },
78 | {
79 | "cell_type": "code",
80 | "execution_count": 7,
81 | "metadata": {},
82 | "outputs": [
83 | {
84 | "data": {
85 | "text/plain": [
86 | ""
87 | ]
88 | },
89 | "execution_count": 7,
90 | "metadata": {},
91 | "output_type": "execute_result"
92 | },
93 | {
94 | "data": {
95 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAZUAAAD4CAYAAAAkRnsLAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8QVMy6AAAACXBIWXMAAAsTAAALEwEAmpwYAABRlElEQVR4nO29eZxc11nn/X3uUktvWluSbXmRHdmxshAnxnEgCYEQsAnEDGvyEhIIL8ZDMgv5vO+QsM0wL4EwMAxkCAkJMCSQzWQ1iYmzQPY4XuJVlmUtlqxdvdZ+1/O8f9xb1dWtVquk7lKrW+f7+ZSq6t5z7j2nWl2/fpbzHFFVLBaLxWJZCpzlHoDFYrFYVg9WVCwWi8WyZFhRsVgsFsuSYUXFYrFYLEuGFRWLxWKxLBnecg9gOdm4caNeddVVyz0Mi8ViWVE8+OCD46o6Ot+5i1pUrrrqKh544IHlHobFYrGsKETk4OnOWfeXxWKxWJYMKyoWi8ViWTKsqFgsFotlybCiYrFYLJYlw4qKxWKxWJYMKyoWi8ViWTKsqFgsFotlybCiYrFYLJYlw4rKSqW2d7lHYLFYLKdgRWUlEk3Bv9yw3KOwWCyWU7CishKJa5DUwcTLPRKLxWKZhRWVlUjSAOALD+9e5oFYLBbLbKyorERyUZmujC3zQCwWi2U2VlRWImkTACepLvNALBaLZTZWVFYiuaXiJdPLOw6LxWKZgxWVFYiJ6wA4aW2ZR2KxWCyzsaJyAXJosomqnvZ8vVEBwE0qnWPffWaKb+0b7/vYLBaLZSGsqFyAvOnv72fPyfppz9frmZh46UxM5Zt7xrln5/G+j81isVgWworKBUgQp0w3Z69BOTLdIkxSABqtTEy6RSU2SrWVnL9BWiwWyzxYUbkASYxSC2aLyu9++nE+/3hmiQStGqHxKabTnfNRklIP7GJIi8WyvFhRuQCJU0MtmG11VFoxe3OXWNiapmoGKWqXpZIq9TA9r+O0WCyWuVhRuQCZz1KJEsOuY5mIxEGVejpXVAzNyLq/LBbL8mJF5QIkSZXqHEslTg37xrL1Ka5pUDdDlLtEJUoMjchaKhaLZXnpq6iIyC0isltE9orI2+Y5LyLyrvz8oyLywjP1FZH/L2/7sIh8QUQu7Tr39rz9bhH50X7OrZ+kqlRasy2VODUcmW6hqnjaom4GKdGYdb5lRcVisSwzfRMVEXGBdwO3AjuA14nIjjnNbgW254/bgff00PdPVPX5qvoC4LPA7+V9dgCvBZ4D3AL8VX6dFYcxylQjmnUsSZUoMRyvBhS0SV0HKVOn0oqphwlRaghiKyoWi2V58fp47ZuAvaq6H0BEPgrcBjzR1eY24IOarfS7V0TWisglwFWn66uq3QWvBgHtutZHVTUEnhaRvfkYvt2vCfYLo8r0XEvFGIqew/6xBqO0qJtRSjR4z1f3MVTySVK1omKxWJadfrq/LgMOdb0/nB/rpc2CfUXkHSJyCPgFckulx/shIreLyAMi8sDY2IVZ5Tc1SnWOqCSpUvZdxushBVo0dJCytKi2IlpxSpIagsQs04gtFoslo5+iIvMcm1t75HRtFuyrqr+tqpcDHwLechb3Q1Xfp6o3quqNo6Oj8w78vHL8S7Dvb2cdUuWUlOLEKK4jxKlSICDUIikOadwgSlKiVEmNEqdWWCwWy/LRT1E5DFze9X4rcLTHNr30Bfgw8NNncb8Lj4kH4OTXO2+NURSohXMtFYMjQpwaCoSkUiDQEiRVwsSQ5GLStMF6i8WyjPRTVO4HtovINhEpkAXR75rT5i7gDXkW2M1ARVWPLdRXRLZ39X8N8GTXtV4rIkUR2UYW/L+vX5NbMuIKJHV+7R8eoNKMSUxmXDXmLGRMjOI4WZZXUUISLRCYEk5cIUwMcZr1s2tVLBbLctK3QL2qJiLyFuAewAX+TlV3isgd+fn3AncDPwbsBZrALy/UN7/0O0XkOsAAB4H29XaKyJ1kiQAJ8GZVvfD/bI+mIGnyzb0TPHBwku+7ZiNwqjikRnFEiBJDQUISfAIt4qUVGokhNpmlMleMLBaL5XzSz+wvVPVuMuHoPvbertcKvLnXvvnxn56nefvcO4B3nOt4l4W4AmmTODV85+kJbtq2Ht8VwthgjOI4WagoMYoIxEmKT0wqbVGp5e6vzFKxa1UsFstyYlfULzfRNCRNUqN8c+8EaR6QL3gO9dxaUc2C8IJA0iDGQ0QITIGiqRKlJj8PDev+slgsy4gVleUmrkLaxKiy50SdZpTiSCYq7QywxCiOgAho0iQyBRwhq1SsNeLUEKcG33OWN6aSBst3b4vFckFgRWW5iato0sIoFH2H49UARwTfdTpFJZM0s15EII3rhFrAEaFpSozIOFFiSIziu7KsMZX001dRqU0v2/0tFsvyY0VluUlqkDZxBBwRgjjFEXCdLK4CEOXpxA4CSZ1IfUSEqWSIS5zDRKkhMQbPcZYvppJGuOEJPvvAruW5v8ViuSCworLMBM1pSMNMNATC2CD568SYLIaSmo6lQtIkUg8RmEpG2OIeI0mVJFU8xzBYvXd5JhJnWxy3mtPLc3+LxXJBYEVlGVFVnLQBaSsXlcxSEbLyAFGcwKe2koQVHMlERdIGofFxgIl4mC3eSeIkC9RfWzjAK8bmTabrP7moNBuV5bm/xWK5ILCisowkcYArBkyISJYyHCRpJh4imLgB8TQy/Siuk4mOpC0i4yGOMJ0Osd6dJkkjUqNsKx7BN/XlmUwuKlGYPT9yaJoHDkwuz1gsFsuyYUVlGYmCCk1TAvEZcOPcUjEdq0STGgDO9MO50IBjmsTqIYDiUk2HWMsYiVGuLBzGJwA9v/W/Dk02qdcnAEjCbMz/tvsk//zIhV8lx2KxLC1WVJaRNJyiaYoYp0jZCTNLJc4tFcBEmdVRmL6/4x5z0gZJLiriwEQywkY5RmqUq/2DCApJ87zO48+/9BSP7DuYvYmznQnCxDDZjBboZbFYViNWVJaRJKgQmCJGigy4EYLQilNEBEHQuA7iUqw+nK1TIYupJHkhBEeESjLEJicTlSv9vPJ/UuczDx3hj+4+P5lYrTjFSTK3lySZEEaJYboZL9TNYrGsQqyoLCNJWCHQAsYpMOBEuaVishr+ApLWGNfL8IODlJw4ywpLWx1REaBqBnNRMVzmHSWkTNia5vc/+wRPnaidl3lEicFLMwvFNY38WHrKnjAWi2X1Y0VlGTFRhdAUMPiUJUSAMA/UAxA3qCRFarKRawpPIwKuaZJqtkuyI8J0Msxl3hFGnAqCUtX1fPupZ6g0o/O2t0qUGNxcVHzNRSVVqoEtGWOxXGxYUVlGNKoQqk8qPmU3QNrrVLIqX5DUCY3PlG7m+uJeHIEBqkRaALIMsel0hEu941zpH2HCbCDSAlFrGt91iNJT9ijrC1Fq8JNpmjrIgDQIk5Q4MdStqFgsFx1WVM6RI9Mt9o8tLn3XRFUi45PiU3YiQDqBegAnrROpx1S6lqv8g4gIW9xjVM0IkGWDVc0IV/pHeH55D9NmXbbaPq3jOHLeLJU4MfhphboZYthpUG0lhKmxxS0tlosQKyrnyGcfOcqHvvPMoq6hcZVIfRL8WdlfkO2D7KQNIuMxmQ5zuX8ER+AS7wQVMwxkMZWKruXJ4Ep+59L3UzMjxOojSR1HpLMbZL+JUqVgqtR1mCG3RTWIiRNDK0rJdjewWCwXC1ZUFkFqFvmlHVUyUVGPkhMCWSZVO6QiaYPYuIxHI1zmHUcQtvjjVNMZS8V1HP55+mV8avqV7Eu2E6mPk9ZxhM5ukP0mSQ0Fk1kqQ06TaismSg0KNOz+LhbLRYUVlUVwVl/aRz8PD/7GrENOPEVoCqS4lPJAfZDX/lIFL6kRqseJaIjN7kkGpEFBYlqUgSym4jpZUZdHW9czpRuI1cPJLZW+ub9U+eN/2cVjh7M04jg1+FqnZoYYcgOqQdK5t80As1guLqyoLIL2fvI90TwMlZ2zDjlJllKcqEfZCVFm3F9ZpleNIPWYjnwU4RpvDxPJWkSyH5vAzPqV/DlSL7NUHCExSjNKlj61+O7nsf/w0+zOr5sYpWBqVNMhhtwWlVZMnGSfTTWwomKxXExYUVkEZxWzSAMIJ2YdcpMKgSmQaGapAASJ6YiFZ+qE6hPEhsl0Lc/zv8tkMjLjHusWFMksllg9vLSGK5kF8Y094/yvz30Hdv3PpZhyRvMoA+kJJurZmBOjFLVB1Qwx6ARUWzGxaVsqNlhvsVxMWFFZBGdlqZgAotkFFt20RqgFYnU6MZUwTrOFjwieqROkPgAVs4bneg9RTQfnXFhmKhgLBKaAZ2qZpZIqUWoYNfth718vZqqzSVsMmClO1nJRSZUCTarpEGUnoBYkxInBc8S6vyyWiwwrKosgOZuYStKCaGrWITetEZgCsXoMOC0gq5kFmUB4aYNQM1GpmyGudXdSNYOdlOMZKyUTIREIjYdvGriSub+y1e61Tk2uRaMGTMAA0xyvZtsHOxogGOqmRNkJCJM0c4l5DrXQiorFcjFhRWURJGeT/ZW2si/2rgrCnqkTmgKx8ShJiKrSitKOWPjaJDKZqFTNML7ETHW5v8hftS0VEAJToKBZTCXNRaWgNUxc5+f++tudmM3Dh6b5Dx/57tlPOt+HfogKY9XMUilTJ6RMYAqUJCSIskB9ZqlY95fFcjHRV1ERkVtEZLeI7BWRt81zXkTkXfn5R0XkhWfqKyJ/IiJP5u0/JSJr8+NXiUhLRB7OH+/t59zgLC2VtAVoZ98RAC+tE2iR0HiUJehs0qVkMRJPmx1LZTpdA0AlHaYtJqfEVsgtFW3gShbziVJDQRtI2uS+pyf4r3ftpBEmvOnv7+cbe8bPftJpZlENSYWJxoyoRFomNUKKSxw1SVLFte4vi+Wio2+iIiIu8G7gVmAH8DoR2TGn2a3A9vxxO/CeHvp+EXiuqj4feAp4e9f19qnqC/LHHf2Z2QxpjzGVsVrIlx47kL1pu8BMiq8NWqZIaFxKEnQ26YIsUF+gSaTtmMpI/jw8Y6m0V94zO6ZS1OYsS6WkNQRlrR9z/9OTHJpq0oySU2JCJ3N31sKTzkRljUx1qhAPSJ2QEqoQagFN6iRG8Ryx5e8tlouMfloqNwF7VXW/qkbAR4Hb5rS5DfigZtwLrBWRSxbqq6pfUNW2T+VeYGsf57AgvQbqK62Ianub3TAP1sfTxFLG4BCpR9kJELJNuiCLkRQJOu6viq7hpNlCywzS5f/K2ubZX21LpUATN08pDhNDiayczBo/IEpN5hJznVnjj1PDy//k384slPleLWudaapBjKoyIA0iLWJQQi1CXCNODa4jNMOZxY8HJxp8/vFjPX1mFotlZdJPUbkMONT1/nB+rJc2vfQFeBPwL13vt4nIQyLyVRF52bkOvFd6jakkRilJ/hd7OwMsHCeSIQCCdMZSaSMCRQKMkxWPVDw+E72BWY10pm2blvEpku15nxolTg0DkonKkBcS56LiuQ5pl/suSgxBbBZ0V/3ZF3YzXpkGYI07TZwqjShlUBqZhWKUSAtI0iA1mfurGc/EVB4+NM3HHzzc02dmsVhWJv0UFZnn2Nw/g0/X5ox9ReS3gQT4UH7oGHCFqt4AvBX4sIiMnDIokdtF5AEReWBsbOwMU1iYXmMqSaoUnYhUnRlLJRwnZACAwHgUJZwjKkpRQowU20eALCjfxqCzrBQRITQ+RWnh5M2aYcKQZIsUh92AOM1cYpklMyOK7blMLyAqX3jiBEcmJkFc1jg1XEeYrEcMSZ1ICxglE5W0RpKLStBVpiVOlXpoA/cWy2qmn6JyGLi86/1WYO6m5adrs2BfEXkj8OPAL2hesVBVQ1WdyF8/COwDrp07KFV9n6reqKo3jo6OnuPUMtIeiyWmRhlwY6pmCKJJWlHKH3/mG4R5uZUgdXNRmRGMMhGJejiO08kGA2a1GSx6DBS8mfMCrdSnSNAp4VIPUwadbI+TTFSy4L3nCEbpFHyM8oWc0wvEQILEELRqUNjAiFNlsOByshawvfQMLR3AoCQU8NI6icmEqxV3C5ehObcWWP3pUxaFWiyWlUs/ReV+YLuIbBORAvBa4K45be4C3pBngd0MVFT12EJ9ReQW4DeB16hqZzN2ERnNA/yIyNVkwf/9fZzfLPfRQiRGKTsRtXQAokn2nqwzMXGUQItZHCR1KUqEAANOizeveS/Dbo1AC131vTK6rZmS51BwZzLBBIjVQVAKErHBr1ELYoaljsFh0G1lCyITgyPginTql7WtloUslTgxBEEdihsYcesUfZeJyaP8/PovsDN+LqoQq49rGqR59lc7hRkgNnqqqDz+DjjwkZ4+R4vFcuHTN1HJg+lvAe4BdgF3qupOEblDRNqZWXeTffHvBd4P/PpCffM+fwkMA1+ckzr8cuBREXkE+Dhwh6rOXsK+xPQaqE9N5v6qpoMQjvPUiRrrvSqNpIgItJK2paL86db/xetG/oln+XsITKFTsqX9g5J5HIOzFkMiBFrie/xH+NhVb6URpgw4TSJnhEEnIDEmt0oEx6FT+LFdq6uSZ3QdGG+ccp8wSQnDBvgj+JJQcmIuP/5uHmteQyUdQRUSfHzTINU5ovLUu7mi8klac/dYiauQLG5fGovFcuHg9fPiqno3mXB0H3tv12sF3txr3/z4s07T/hPAJxYz3rOl15TixBiKEnM8WYMGYzw5XWOTN0099XFEaOXur1vK9/Cc0n52R9vZ4T9GqD4i4LSrRpLFVObuUdK9asVxsrTeFxe/wRZ/nHo9ZqjYJJQRBpwZSyUrmy+dWErb/TWVu79ue/c3+dJbf4DR4WLnPolRwqAOAx71dID1XoVnVf6R/1X/KYqDucWDj6dNhqSC5wwR5BUCmHiA4WiQVvzcOR9OzYqKxbKKsCvqz5FyOsZrSnf21DY1mTuqZgYxwTiPH6mw0ZumEpcQgWbqUnRCfn7ww3yhejMn01F2eI8R5pYKzI6lyCnmSvca+2xV/U3+tyg5ESauMei2aMkwA5KtMWm7oFwRotSw61h1xv2Vi0ozShjPC0Y28uB6kiph0EAdn7op88LSY4QyxHQ6QpIqAqR43Fr6Z+6//hcZdupEbVEJx3DT6ix3WHbRxtKVkLFYLMuOFZVzZCR5hh8qf7mntkkuKnUziIaT7B2rs9GvMh0XcIDIeBQlpiQt9oeXMZ5uZLu3O1/42NmxvsNcTekO5Dv5AshR9yT1dIBSMsagE9DSQcpOC8+VTgaWI8KJasBr33dvx/01Vo8wRolT7YjKT73nWxyabGYpynEDxaVpSnx/6dtMSpbpnRqDOBBomUGnzvF4A8/y9xDmizkJx/FMc0Zk2qRNiJe4NL/FYlk2rKgsAo/eSpCkqVKQmLoZQsNJphoRG/0a01ERcQTNS9bvip8LCGPpKAWJiNRDBIZLLkVv5kc1V2S634oIgfEZM5s4mW5kRI/hS0zTlBl0skWRbcvDcTKBCeO0U6p+sh51XGHNqQOgSrUVUw8TEmNI4yZGPFqmzIvL9zFhNgNZEF4QHk9eyPvHf44D0WU8y3uqy1KZyLPCdLbbMGlkLjCLxbIqsKKyCFx6W3ORGMWXmIYOQzxN0XdY41ZpmhJOrgi7o+3sirNKNFUzQqClzmr6oaKH58wToZ8HkWyjruPpZTTMAFvkEC1TomX8jqjUgralkrm2otQQ51/+k42IME8DfvGB18Lkg50Fk6lRSDJRCbTIsNPguLkUyFx8IqDiEqTCiWQD17k7Z0QlmsTTLHbS6naBJU2IbUzFYlktWFFZBL70aKmY3FLRQdykiucII06Npil13FZ3N28hyhdDighjZguxzpNHofNngLVxRBiL1/FMejUtLbPVPUTLFAlSnyGniSuZdaKata2HCUazkvueI0w1o47LqpycgPp+wsQQJgaj4KRNFJ9IC7RMkRPx+iyWYrST1mwUTsSbuNp7KrN61EBcwTdZBnirO604bdlAvcWyirCisgg86dFSSRNcUmIKqLis9VqUJaBlip2V79L5J2PKbJhXVPSUogSzEeBfazdxWK+hZUpcWThMyxRppT4D0rZU4ixNWaARphQlpBHElHyHahATJgZfYoo0oHGAODU081RgV5sY8WmZIkeTS2nFWepwmmq2sj9XvHGznnUyQUkbaDQNmuLly4pmBeutqFgsqworKovA7zGmomlAgocjDpEM8aziQZpaRnE6X8Ld4XgBdqXfw8PNUwoCMFL2GSi4p79Z10UaZpBrikcItUAzdRlwsppgtSDplHdphAkf3PZ7lCrfoeS71IOEIE5Z7+YFMGt7SdJs0aLrCAVtkYrPnuhqHoxeRBCnOAKGmQ3DMlzGdQs7Bp4mbp7MPi+dnX0GZPuzpE0sFsvqwIrKIujVUtE06FgdJ51r+Lk1n6Fpsm2BZU6QvX1smo2cSDYytxJMyXMWjK84HZGChg6wtXCCwBQ6looI1IOks7FXLUy4qniMQvMpCq5DM0ppxSkbvUxUtLaXxCiNMMF3hQEnIjIuU+l6jiRXEKYmW0fDjDC2s9EmzCjfM7CP6aljGH8tBXL3V9tSSSPQBBPX+cO7d/X0WVoslgsbKyqLwO9RVEgDkjyTa58+n1cNfIWWZnW/nC4h6ZaK7g24zoQjMktM2o+mGcQTQ0yBRuJSktxSCTNLxRGhFYRs9KYphgdwRPBcYboZc0m5RiUdQhsHgcy6cEUYcCNaqZvXGpO85MvMHCBbgClAVddxXekA33xiN+PxGnwN8mvl616iLOtL4wZfeOJ4b5+lxWK5oLGisgi8HrO/yN1fgnA03sTh5FKCXFRkPiWZ8xf/mVhT9lhTnom/OPlPtaFZ4D9Wn0aS7dni5C4vJ69sTDSGK4ah6AAI+K7DVDPikmKVZ6ItSOsIoDSiBMfJRKWZOJ2xR4mZ5baDtsjBtK7nmuJhwvpJajpCgUxUOlsaP32Upg4gJjh1/YrFYlmRWFFZBL26v0haxHnJlWaU8FDwAiY0q5A818KAuX/x95ZK3EaETppyI3exxRRoxF5nz5Z6mHTiH154DKPCSJJZJJ4jVFoxG70q0+kw6pQZ9aZphimOCCUnpBlLJ9BvlI77q42Tq2HFrOcK/whJa4ymGcAhSwBoRZmATFSmaJoyogkmtdsOWyyrgb7W/lrtuKJgUnAWCJwDmMz9BZkbaXdyLYfEBYJT9pnvRvJtgs+GbutGxaWWDpCoT6A+JQlwJBuDk9fKL8bHOaKjrJdsR0bPdZhqxGxwJ4m0RORtYGvhBPUwwREoSch46oFIl2XSfs7rj0kmaw2GGXSaDAcHCdwiEUWG3bDj/pquTxMaH+OXOunGFotlZWMtlcVizrwHu6QhCTOiAl1fwN1/5cusp7MWlDbdG3lV00EiisTq4xMheUpy21IZTE5wJNpEgQZFCXDztSob3UmaOkDkrmWrf4JmlLnMihLRStp7vHSXsuwaf9eB8XQD13o7aaZlIi0y7EUd99dUZZpIfVIp4dM6t8laLJYLCisqi6UXUTEtEs2smVZHVPJz7TYyk1TcXcq+x4X03XebVXCyYcrZ+hiEmAJlJ6vn1XaRDZoT1E2Zmq7jEvc4rgjTzYj13hRNM0Agw2wtnMytGyhKSCttx1ROzfpqz609hCmzju3FAzRMiZACw17Qyf6q1tqiUqSg1lKxWFYDVlQWiUl7EZUZ91eSlzNxHKHgznZvnVIocu6KyB6YvVYEHmley9H4EiDb6ndAmjPtgPUco5oOUTFruMQ7jiMw1YxZ60zTYoAWa3jZ8He5JH4IEaEgMc3Ene2um3NP6XKNVdJhPDE0TIlYCwy6Qcdaa7aqRMYllQJFKyoWy6rAisoiSdPwjG0kDUiZibu0M6+2rivPG4iXrlfn4gLr7vJocD1Tuh6AiCJlp9UZgwLrZYxqOkglHeIS9xiSWyprnQotU+aYez2qDm/1/iPbCgcpSEQjnr3FcWdOs6yv7N9psxaAeloiUp9hL+hUSY7DOpHxSKRI0QlO2SfGYrGsPKyoLBKTnFlUSENSded86TLrTff6ktmycvY4p7F+Ii3w7MJT/OrGT3SOb3THM1FJhrjMP5otvGzFrHGqtHSAejrAZysv42h6GZf7x/ElotGVUjzffbqtmMk0E7RGWiRSnyE3oBEmGKNo0iRWlwSfAafV2drYYrGsXGz21yJJkpDiGdqItjrrVGB2cH5uUF66gxNwTpZKu1O7aGRnrBT4zXX/E4eUX678LFMU2eBOUU0HmYiHubxwBICgWUVEifEJ8uKSlXSIrd4RHJRmrMyN3cxKie6a2JRZh1GhnhaJjMegE1INEyYaEcNeSKIeidLZ6rhg/86xWFY09jd4kSxkqZysBfzZF3bjpCGG+S2VU3dxnDnvOYLrnP2PqFuoukUl0gJPRteyO7iS7y18C18jBpyApilzJFrHNf7TAPjJBHUzBDidMviVZIBt/kEiLRDEpmONdCcanDoCCLTE/z7x88TGIVavE1M5UQ0Y8SMSPCL1GHCCzkZhFotl5WJFZZEsFKh/ZqLJPTtPICbA4MxkfM1jfcxar5K/2TxSpOCevakiXSZD972+Ef8IX2r+ILuDK3lZ4cts9CayBY4Ix8M1rHEqDEuNDd40DTOICB1LZSIeZpt/MLNecqERZlbvdycVzJqfwhSjGKNE6jPotmiGKVPNiCE3JMUjNLmomHNfVf9Hd+/iiaN2W2KLZbmxorJIFgrUZ/uQpDjajqm0FwfOEzPpSs89t0jKDJ3U3vbr3AAIZQDEYVdrG8/1HuIXRj7KZDqSVxl2OJ5u4dnFp9jsT3Zqk7UtlfFokCv9Q8Tqk3Tt3OiIZHu8nG7tv4DnOKSqxMbNtjaOU1pRyoA0SfAJU4dBp0WcnruoPHJ4miPTdq2LxbLc2JjKIlnIUgmTlCgxuKZF2vVRz3IVzecSW8R4sgyqWdGNzqvsIbS0xN70eq7wDvOpqR/EEcGoMp5u5NnFPaxxDnI8zdKQ2wsVJ5Nh1rhVTqajs+7n5n6wubGgmbmC6wgmUSJ1GXRaBEFWCbnsBMTGJ0xhwAlIFhGoDxNDsghRslgsS4MVlUWyUEwljLMdE10NSHDndX/NDdRvGi7gnYPLq3M9kU65/G432Gw3FXwzeRW1VkIljfDzbOcxM8rzCo/z7IGdfLr1MwCd/VLqZpBYPWL1T7lf9zzm4jmCGjAGQuMz6DVpxSlBnDLqtKimZZqpMuQ2s10iz5EoMcTGxmQsluWmr+4vEblFRHaLyF4Reds850VE3pWff1REXnimviLyJyLyZN7+UyKytuvc2/P2u0XkR/s5tza6oKViiFODpwGpdmV/dbWZ6wgr+e7inF/d/q72IaFz724bplO4Mn9xMt3CTcX7qKYDVHUd5NsMZ2tahKl0LSmzRaW9en5unKi95mSo6LFuMOsTGJ9Bp0kYG1pRSlkCEnxaqcvQIt1fkbVULJYLgr6Jioi4wLuBW4EdwOtEZMecZrcC2/PH7cB7euj7ReC5qvp84Cng7XmfHcBrgecAtwB/lV+nr5gFYyopcaqZqHRnf82zwGNxUZQZHIT2V+uMgMgsK8mR2fdri8q0riPF48ngqk5acNtSAaiYkU4Ns25kTnrxYNFjuDRHfBwIUo9Bp0WYpLRiQ0myLQGaSXZ8Me6vODWL6m+xWJaGfloqNwF7VXW/qkbAR4Hb5rS5DfigZtwLrBWRSxbqq6pfUNV2zfl7ga1d1/qoqoaq+jSwN79OX+nVUknwZu3s2GapxGT2oPJrdycEdO49s/K9/cOfWTbj8Eh0Aw83r+ukI0epdtbV1MxQR1Talsh8mWwFV/DnuPAcEUL1KUuLMDEEcZKLSrbXy4DbIkoN+8fq/P03n4a974epR3qecpSaRWWPWSyWpaGfMZXLgENd7w8DL+6hzWU99gV4E/CxrmvdO8+1ZiEit5NZRVxxxRVnmsMZWWgfkDA2JEbxCEl1qFMgctbX7QJpxufCUNHF5MUrBwouRd+hFaVzhGx2dkBnO2CB++KXUkkDBhFEdFb7hhliQNO8bbdgnVkcHcliKmVpcUPxYQaaASUnzLLJ0pjBYsB4LeR177sXR4RfesVHQFxY9z09zTtOldhu9GWxLDv9tFTm+56Z6584XZsz9hWR3wYS4ENncT9U9X2qeqOq3jg6OjpPl7NDz+D+AjL3l7i4jlD2Z3vkltpScZ0ZK8ER8B2Z4+riFDfc7LIu+TGn+1j2fCTZwpRuOKeBiwiR+pSkxR9c+r95QesDlCQkUZ/I+Aw4ASdrIb7rZAH75lGIKxwYb/R0/Tg1s1KdLRbL8tCTqIjIJ0Tk1SJyNiJ0GLi86/1W4GiPbRbsKyJvBH4c+AWdqULYy/2WngVK37cXCbqalSNxJFvQ2I10PfpFd6B+PmZ2n+wuYCmnZKs9k1zJI0lmMDqz+p/Z0nJEiNXnUucQW/wJtqX3U5KAGJ9IM1GpBQlFzyE1igbHiYNpfvjPvkrag1gkqdraYRbLBUCvIvEe4P8C9ojIO0Xk2T30uR/YLiLbRKRAFkS/a06bu4A35FlgNwMVVT22UF8RuQX4TeA1qrPqpd8FvFZEiiKyjSz4f1+P8ztn1JzeUmnvcOgTzlqnMi99VJXZNbpm5KWzSLIr1uN0+nRbMnLKsXbvDYMFBorefElns8cAxPh4kvCtxg0MM8mwU8sWP6rPoBtQC2JcRxhwEySu0KxPkBjtWHwAxigf+NaBzvqZNomx2V8Wy4VAT6Kiql9S1V8AXggcAL4oIt8SkV8WEf80fRLgLcA9wC7gTlXdKSJ3iMgdebO7gf1kQfX3A7++UN+8z18Cw/kYHhaR9+Z9dgJ3Ak8AnwferKqzv3n6wEKB+vZmVB4RhtMnom0YKsxaZd9PfFcYKmUCJ3MsEREQZybteUZ08vNdlkz72XVk5j/RAlNwHCHWIkaFR4IdHEq24klKrB71dIC1bo16K8QRYUthCoCwOQFkFt9Pv+dbBHHK73/2Cf7rXTs5PDV7/5UkVbtOxWK5AOg5UC8iG4DXA78IPEQWy3gp8EbgFfP1UdW7yYSj+9h7u14r8OZe++bHn3W6MarqO4B3LDyTJWYBUcncX8qwVIm0cNp2I6X+rkF1ZbYIrC1nfwfMtVRAZsSkO0tsTlmZs9+NMiOiwJ3Rr9LQMofCTVzj7SPFJUVpmAEkOIaIz5bCNABJkIlLEKc8cmiaepjw6YeOUPIdgtigqvliTyU1SpSk/OHdu3jJ1Rv4wWdvOrdBWiyWRdFrTOWTwNeBAeAnVPU1qvoxVf0PwFA/B3iho3r67K9WnHJ9Kav8W9W152lEp1L03VNiOUBHaeZubbx5pJhnqc053yUyc3VFu87PR1uMaroWR2BPa0uWnqz5Gpl0hGJ0BEeEzf4UKj4mqgDQCJPcDZalaBc9lyBOecPf3ccXnziRxWDIFkA+cbTCR+5/5qw+H4vFsnT0GlP5G1Xdoap/lMc8EJEigKre2LfRrQROY6moKkGccuuab/JEaxvLWbvzdIkAp7i38ueBgjtrxf3cUizzBeZVF04GmBujORRu5mvhKzsXrZoRBuPDiMAmf5KkMApxVnW40sqEO8pFpeAKQWzYe7LOf/n4I0x3nQ8Tw9efGiey6cUWy7LQ6zfdH8xz7NtLOZAVyzwpxV/YeZw3/f39BHHKj6/5Bjtb2/qb3nWOdPaSP82q/rkpx7NF5uwmJF0pAiJZVeSn0ud2LtMwAwybLFlvkzdJ7I3ipjUAppuZaIRJSpIqnusQxGleBdrw8DPTQLYAMkoMQZJy39OTZzU+i8WyNCzozBeRLWQLCMsicgMz3yQjZK4wy5zsL1XlT+7ZTWqU64eOsdarcSjawqUX4KfVtkZOFyPpFpFu2Zmv1ld3+9Peb46bLcs2y97VdYgNZDtPbvbGCL1RCnoI15GOJdIIk85K/zDJBKToOTN73qdKlCoDvsuR6dmBfIvFcn44U4T4R4FfIlvz8Wddx2vAb/VpTCsKmSMqDx2a5pnJJmvKPjuG7+ep4Ap01vqPC4cZcWiLxexBdtKLOb2QtFk36J/Z/dV53ZVhlh+smREucQ6BwiZvgsC7jhFpMVz0mG5mLsZqkOA5TrZ5WJxtKzBY9KkFmegMpkd4qXcvH3NeSmjdXxbLsrCgqKjqB4APiMhPq+onztOYVhSis2Mqx6YDhksejSjhOu8xToTtVfsXnqq4jrBlpNS1Udjs8921yrpFIdvmeHZjtwfVnNukW1wqZoQXFE6CwEZ3koazgU0SMFCQjvurFiSd+7bilDg1uI5QDTJL5Zr0fl408Gn+yXnZKetYLBbL+eFM7q/Xq+o/AleJyFvnnlfVP5un20WFzFlR34gSCp7DZCPiOm8Xn6u/LGu3HIM7AwIUPadT3uSUMUr3yxlra9PwPJlkvdxvjqp0KiY7UEmH2OxN4Iiyzp1iXzxArD6DTsh0K/uMa0GMl4tKNV8o6YhQy0VlRE+wxpnO3GPxqZZKpRWTGmX94OnTuy0Wy+I4U6B+MH8eIltwOPdhmSsqYYIjwojXYtQdY9xsXKaB9Y50PeY/vjTuu84lFFyn7VYTHISIEiLKWrfGsFNjKi7R0hKDboNKKxONWpDwE2v+lU3OSaabcWczs3qYWTLr9CgjTg3XgeY8lsoHvnWA93xl7+InYrFYTsuZ3F9/nT///vkZzsqjbamM10MOT7VoRtmX2fcM7udItAnEBdILMqbSRuYpdAmnxlIWipmc3Q3nuMvydOOJZA03FB+lpQPUQiUwRQal2Ymp1IKYO9b/Ix9vtDjWuB4vr3rZtlQ2yHGGnTquA63oVFE5PNXEc5cvtdtiuRjodfHj/xCRERHxReTLIjIuIq/v9+BWAk4eU/nXXSd515f3QOMgrx2+kxcM7OFovLFTVv5CxgG2rJnfpdW9vmSxwjirHExXunLbGppOhvntdX/Ag8EN1IOEiCKDTrMTU2m16mz1j/HcwhNMNqOOK6wtKuvkJJ6kjLhBR1R2Hq1wcCKrdHyiGhIlNtZisfSTXv9s+xFVrZJVBj4MXAv8v30b1QqiHaifbEZM1EM2Nu/lLWv/ml9b/48cizeeslp9pdFeXbKm7C/ur3zJFkhC7vKaZ23Mw81r+ZfWq7kvuJF6mBBpgUFpdGIqg8FTJHhcX3iSqUbUCdpX85TjdTJBYHzWeTWacSY0v/vpx/ncY8cAGKuFNivMYukzvX5LtItG/hjwEVW1K8tyJN+EcqwWUg1iislxHgq/h93RdvYGW0/ZEGulsWm4gOcKIyUPdxFzcLo+gO5ila7rUPSzrZafCK7hQHINkFkfkRYYdBodS2VdtJvd4bPY5I0RtKp5oD5rW3IiStLiRLyBdV6NIDacrAU89Mw0QW61TDRCosQWnbRY+kmvovLPIvIkcCPwZREZBYL+DWvl4OQxlZO1bD+Q4eQoFbOOLzR+mLoZWvp4xHmm5LtLNvL217l05Sf7jjA6VJi1IFKEjqUy5DQ77q3N5knG0w2cSDdzqXkCJ7d4amHM1uIkVTNC3ZRZ51YJopS7Hz2GAo1cVCrNmDBJ+dITJ2zA3mLpE72Wvn8b8BLgRs0qKDY4db/5iw6jguQFJSfqEc0oZURP0NBMTBwbE+4gQkdV5gszzV69LyRGidVnyJlZGX8FTzKWbmQ8HeVq50kcJ1tdXw8SLitOUEmHCUyJNU6FIEl59HCFcr6dchCnBIkhTgwHJ5s8daLe/0lbLBchZ1Nz/Xqy9SrdfT64xONZUSTq4pKJymQjohWlrOMkB/MNKGeVjF+ZhsqSUfLdTgykXHApeHMVtytwn39WCV5HVBxRrnT38/XkxazxA15QfoLPxcL1xV38xOA/8NXmS6mmg4Tqs8atEoSGWpjguQ5XhF8jeuD/AK/s1Adrb6BmsViWlp5ERUT+AbgGeBhop88oF7uo4HYslelmjAJrZIKGZkt4nO5l6Bc5BVcouFnaskAnc6tNt5uwfSrSIsNuVlRyS6GCYKgzyJjZzMvLDyE1eI7/BLeMfJsma6imZVJ1WeNWCZOUepDgu8KGZA/O+LeAVxLlNcNadsW9xdIXerVUbgR2dO0HbwFSdXFyUakGMQVXWetWaDGU7TkiM0Fpqy290Z1uHFNgSLJ04J9c+xX2hlsBoWLWs9mfpCgB1/h7qadlXjPyRf6l8n24jsNaZ4owMTSiBN91GDBjeMFhfFeIUyVI0nlX3FsslsXTq9f/cWBLPweyEknUxdGEJDUEccoVg02apgTidcUNLsxikhcaneKVXcZdTJEhp86Q0+T2jR/jq9UXAaDicjzewFXeQbZ5+/lS9SY8SZlOhgi0xDpnmigxNMIEzxHW6EkK8UnKXlYev10232KxLD29WiobgSdE5D6gU5ZXVV/Tl1GtEFJ1KJAw3Yop+S6XFiaYToazdRjOwmXiLXPoDtTnr+N88eOvj97J09HlnEg2sK6QtTmRrOdZhX1c7h3iY62Xsb18nBPJBi7x6gzL8c4mXUXPYUQmcEjZUpgmTMuEsbGbeFksfaJXUflv/RzESiUhc39NNSJKvstmb4KaGUQkixm0t91dqenE55P2IsvuzyumyLXeLrZu2MdHKj+dNdRMpMejtXxf6Tsk6tE0Ze6q30o1SVhrEkacKlFiSFUZKnqskUkSfC4rjPFUuiUvm29jKhZLP+g1pfirwAHAz1/fD3y3j+NaESTq4pAw2YjwXWHUm6BhBjrVf7eMFK2V0itd5fXbn1mLIWIKfGD8x6kz0mnnAMfjjdxcvo9xsyk7nPcJKTMkNeLUEEQpviuskSkmuZTLCmPZ8SQlTK2lYrH0g15rf/0q8HHgr/NDlwGf7tOYVgwGL7NU8jpUo+44LS3PatPtzrGcnu5U4nagvsFa/rH1q4wn6zoFKJ08kH883kBRIiq6dtZ1WqbMkFSJ0myr4YKrDDl1JnQzl/iZqISJIbYr6y2WvtBroP7NwPcDVQBV3QNs6tegVgoGF5eUgxNNRITN3knqZmi5h7VCmb2HPWTVk2cKT8pMO4GaGaBuBqno+lnnE/FxMPhktcHWulVapkwlHeYy7ziJUcLYEFtLxWLpC72KSqg6s8VhvgDyjH/qicgtIrJbRPaKyNvmOS8i8q78/KMi8sIz9RWRnxWRnSJiROTGruNXiUhLRB7OH+/tcW7njOYf31d2H6PoOWx2T9LQ2aJiF9X3hucK6wezEnPtNGyHmc/P7VgxM3GXvfF2TpjLO8fzFrR0kNFCjYLnsN6ZpGoGmUqGuDQXlSgxRFZULJa+0Ot33ldF5LeAsoi8Cvgn4J8X6iAiLvBu4FZgB/A6Edkxp9mtwPb8cTvwnh76Pg78FPC1eW67T1VfkD/u6HFu54xIFqx/6MBJrh4Y57rCHo4no3Ma2TUqveAAQ8Usb8RxhE0jWSn+Ttn9PJuu/R/WceAr4SuZJtsETZCOGDUZZJNfxXcd1jlT1NIBppJhNnsnSVMlTFKS1Lq/LJZ+0KuovA0YAx4Dfg24G/idM/S5CdirqvtzK+ejnFov7Dbgg5pxL7BWRC5ZqK+q7lLV3T2Ou++k6jLsK/9+4M94JLqBWjrbUrGZX2ePAIOFfPV9/vF1KiR3SuZL114s+dbETuZCq+g6nlU6jOcK65xJ6mmZiXiQDc4YicnSia37y2LpD71mfxmywPyvq+rPqOr7e1hdfxlwqOv94fxYL2166Tsf20TkIRH5qoi8bL4GInK7iDwgIg+MjY31cMmFSfH4gbU72eY+xS5uYnTO/u02SL84hGzrYSevzjlro69csN2uNUEIVM06risdwHOEtc4UTVNiMipQlKw8fpAYEqPYAhEWy9KzoKjkMY//JiLjwJPAbhEZE5Hf6+Ha832dzv0tPl2bXvrO5RhwhareALwV+LCIjJxyEdX3qeqNqnrj6OjoKRc5WwwuLxt5nENmG4p3SgXegud0XDmWc+Py9QOdz7Xj/oLZC0yZ+U8zpRt4dnEfjghrZZxAy4SxMm428fyB/Z1dIW1cxWJZes5kqfxnsqyv71XVDaq6Hngx8P0i8htn6HsY8nK9GVuBoz226aXvLFQ1VNWJ/PWDwD6yHSr7isHjOYXHmTTzJ8MJULD7oi+K7k292i+2rClRcGdXNm7HX6Z0I9cUD+KIsF7GaJoBFDhktvHqNV/vFJPsXlU/Xg+5O98h0mKxnDtn+rZ7A/A6VX26fUBV9wOvz88txP3AdhHZJiIF4LXAXXPa3AW8IbeIbgYqqnqsx76zEJHRPMCPiFxNFvzff4YxLpoUl6ucPYzr5n7f6qJGumIpMOPyon20az1QVdexzpmm7LRY60xS1wEc4KC5jlvXfIMgzoqAHqsEPHp4GoDHjlR4/9f6/t/FYln1nElUfFUdn3tQVceY2WJ4XlQ1Ad4C3APsAu5U1Z0icoeItDOz7ib74t8LvB/49YX6AojIvxORw2Sbhn1ORO7Jr/Vy4FEReYRsoeYd52PbY4OLIkzrhn7f6qKmO5ZyunPttSqKw3i6kW3+QdbLSVo6iOtmP6NICzynsAsR+Pzjx/nTLzwFQBClTOV73VsslnPnTLW/onM8B4Cq3k0mHN3H3tv1WskWVvbUNz/+KeBT8xz/BPCJM41pqUlxGdNLOmtWLP1hJstr/syHudsLjKcbeOPIP+KSMJ5u6Fg2T4Xb+Mm1/8YT0XOYqIccmco2AQuSlKoVFYtl0Zzpm/B7RKQ6z6MGPO98DPBCx+AyYRYf8LecmdNVfB4sePhzdpKcTkd4afnb3Bv/ECpep8zLQ8Fz+cl1X2FraZLnN/6WW9wPAxDEWal8i8WyOBa0VFTVPV8DWakoWWDY0n8ckXn/ClpT9rraZM97o224DhyXyxGiTqZYwwzxYON6/vTSP+T5uosvFl9MmKS0omyPlSgx82x1bLFYesX+9iySvekODqVXL/cwLgq2ri/PCdDPR3a+5m7hEfN9QCY0nZRkgW/UbmCbf4j7wu9lkzfJyWpIkJfCrwXWBWaxLAYrKovkqfT5NBle7mFcFPTyn7XtHhssuF2p3F11DUQIpcRfTb2J79afzUZ/mhPVgCBqi4p1gVksi8GKimVVMZ8d072GRcjdaCKMhSU2uFWOVwPqeTylai0Vi2VR9Lrzo8WyIjhTWRzpuMKEQH1cSRmfmqQZZQshqy1rqVgsi8FaKpZVg+PIGWMu7bRkyYWlaoapTR+imbu/ZlkqadC3sVosqxUrKpZVw+hQgbI/T8LinFRkp+tNQ4doVI7QjBIcYWatShrBpy4DYy0Xi+VssO4vy6pnTXmm+IMgiMzUJm3pAG50gpakFDxnxlIJT0I0mT1KF/0mpxZLz1hLxbLq6d5BEpmxVAQIKVNKxwhig+86TDdzUQlOZM/hxHkercWysrGiYrnoaDu/XEcIGWBYx2hFKb7rMNXMqw+1jmfPVlQslrPCiorloqMdUvHcbD/7tYwRJikFV5hqzLFUIisqFsvZYEXFctHRsVRECBhkozueub88h+lWbqlY95fFck5YUbFclEgeW2kxxEZ3kmaUUHCdmXUqzSMgrhUVi+UssaJiuahQzTPAyPYgbukgG/0K1VaC70pnZT2to1DabEXFYjlLrKhYLipEQNFOXKXFAGvdGkazQH2n/H1wAsqXZKnFFoulZ6yoWC4q2mJS8l0GCy4GjxPJKH95xR+z0a90VtZ3RCUYW77BWiwrECsqlosKVzLXV8EVBgrZ6vsPV36azf4kry+9jyBOMUYhHIPypRBNkBo9w1UtFksbKyqWi4rhkse6QX/WMSM+DzSfy7Xu4/iuQzNoQtKE4iaa9ZO88n9+ZXkGa7GsQKyoWC56RISTySibnSOM+BETE4cInRFiZ4CgMc7R6QBjlA9++8CC19l1rGqtGstFjxUViwVQx2NCN/PcgafZtX8vJ4MSJ5sew06DOE2ZbsX83md2Lrgz5H/66EM8cnj6/A3aYrkAsaJisWi2ZmXcbOL55aeoTB2ikpSZCsAgjPghk40QgKfHG/NfIw14hfdZJurReRy4xXLh0VdREZFbRGS3iOwVkbfNc15E5F35+UdF5IVn6isiPysiO0XEiMiNc6739rz9bhH50X7OzbJ6ECdbZT+hm3h+aSet6hHqaYkj0y2aZpBRv87JWiYqh08chntuhrg6+yJTj/Dv176PiXp4/idgsVxA9E1URMQF3g3cCuwAXiciO+Y0uxXYnj9uB97TQ9/HgZ8CvjbnfjuA1wLPAW4B/iq/jsWyIO2qxWPmEl5QepwXpndRSwd5ZqJBU8tsKNQZy0UlOvp1mPgOPPnnsy9S38eIU2Oibjf2slzc9NNSuQnYq6r7VTUCPgrcNqfNbcAHNeNeYK2IXLJQX1Xdpaq757nfbcBHVTVU1aeBvfl1LJYFcQQUqOh6amaII8EIX6/fwKGpFoGWWe/VGM/dWuXKt2DDzbD7zyGanrlIbQ+uKNMVu67FcnHTT1G5DDjU9f5wfqyXNr30PZf7WSynUPSyhZCKwz/Vfpqv1W5ApUCcKhEl1ns1xmohRc/hyuR+GH0JjFwPBz7E5x49RpwatPIkAM2aXYFvubjpp6jMt1n43HzL07Xppe+53A8RuV1EHhCRB8bG7F+VFvBdYbg0exPUgpf9akSUWOvWGK+HbBowXOU9jQ5cxdeOlEkmHubtn3yURw9Po7U9GBXilv0/Zbm46aeoHAYu73q/FTjaY5te+p7L/VDV96nqjap64+jo6BkuabnYaMdXirmohAxypX+YsVrI88p7OBZvpBa77KkOYqZ3Ug8THjw4BY2nORKP2gKUloueforK/cB2EdkmIgWyIPpdc9rcBbwhzwK7Gaio6rEe+87lLuC1IlIUkW1kwf/7lnJCltVPuzZY0XNwHThgruVVg//GRK3BDaVHOJZs4Xg1YCxZh1Pfg1H47r6DSNLieLwBP5la3glYLMtM30RFVRPgLcA9wC7gTlXdKSJ3iMgdebO7gf1kQfX3A7++UF8AEfl3InIYeAnwORG5J++zE7gTeAL4PPBmVU37NT/L6sR1BEeg6LusKftUdAM1M8QN+kV+buiTPBFdz/FKQDUdQpI6w06TqRNPkhY3EVOibKay2mFdPHBgkhPVLCvs/gOT7D5eW46pWSznBe/MTc4dVb2bTDi6j72367UCb+61b378U8CnTtPnHcA7FjFky0WOI5mwuAJry1mNsN3Rdn5745/yRPQcJswoxysBitBwN/O8keNsco7TcjYQaYGNhRqVVsy6wULnmm//5GPc/vKr+dkbL+fjDx7mqg2DXLdleLmmaLH0Fbui3mLpwnOdTvXiNrvC6zgRb+De8GZEhGOVFgBV1vOs0mGuLh+nwTAtLbHJrzDRmFkAeazSYs/JOlFqAKg0Y1pRcv4mZLGcZ6yoWCxd+I6wocvKAAikzHvHfoZAB0BhLF81P52OcHXhGb63/AhVXU+oZdZ7VSYbM/XBvr5zDz+//h7COBOVehhTC62oWFYvVlQsljPg5NnqjiM4rpCaLH/9ZLSGW4e+zPbC0xzQ59DSEuvc6qyikxN77+atmz9EmLRFJZnZXdJiWYVYUbFYzkA7I8wBPCd747nC4dYaNntjfKX5UmqREGiZEbdKtUtULkkfY9SbIokzl1kjTKlbUbGsYqyoWCxnINcRRLIAPoDvODzdGOEztVezP7mGRpjQ0hIjTp1aMCMa253HcETxg8MANKN0Zstii2UVYkXFYjkDIm33F7hO9ivjuUKQCrvjawFoRAmhGWDYqVFtxfzzI0c5WW2yzd3L0XgTpTirINSKU5phl6g89t+h8cz5nZDF0kf6mlJssawGOpYKgpu/8d3Zf481whQjHoJSbVR4384TbEgdrtMSx5PNDMaZpRLEKa24y/21/+9h/Qth8IrzMRWLpe9YUbFYzkDHUpHssWGwgOe2jwlGlUaUUPZdGmaQuDFOo5FQrB3kaLyZhg4xkh5CVTNRabu/VKF1BAJbhNKyerDuL4vlDLTrgUlupawpe11xlux8kKcMt7TMC6J/4guX38Z1J97JiXgDNR1hvXmGMDEYhVbelnACTGRFxbKqsKJisZwBkSyFuPuXpS00riOd7DCAQMu82v0AH5n4Ucb9HTzevJqqWcMGPdIJ0G+VvZBG0MpcYhPjNqZiWT1Y95fFcgZcEUr+7FX2HeuFmZgLQEiJx4LreCK4hq2FzZyMTzBEyHo5QS1M2F46wgcu+w9wZD04RQAOHdvPhvM1GYulz1hRsVjOgCNwyZriKceyZ+ls2iMC98Y/wP6p7EgzX4/SYoghqXGiMcVfXvHH1M0ApcpOKG2iKWspJXYPFsvqwbq/LJZzoB1fcfKqxpBlh7UYJtasEGUtSPJsMYeKWcNlD72GGJ9/rX4vOvUINA5RYTMDasvlW1YPVlQslnPAYcb11XGFdb2GrCSL62QnptK1GJNyd+NHOJmsR6d3QuMA47qZIaaXYwoWS1+womKxnCPrBwv5/iu51cJM2rHnCI0wwREHR+Cz9R/hmbWvRXGZStdB4wBRZS8n002MONUsvdhiWQVYUbFYzpE1ZQ9hpjZY+4XnCJ4rNKMURzK3WNMUCZJszYtxClTNCM70w0wka4jUg7iybPOwWJYSKyoWyyLpdn9BFmfxnFxU8pTjNNXOniquCCfi9XjETMVDVNNBTMuuVbGsDqyoWCyLpLOOpSu12HOExGhmyQCpMZ3y944Ix8JhAh2gkbjU0wGa1WPLMXSLZcmxomKxLJK2C6ytKevKfmf3SEcyl1eqdDbqEoGxeC11HSZKDXVTplk7ujyDt1iWGLtOxWJZAhyRTo2wcsElTjurVzL3l8nqfjkiqCh7g8vZEDnEqRJqiah+fPkGb7EsIdZSsViWAM8VnC7/V3dtsPbRaivOjwtVM8QDrecSp4aIMknTxlQsqwNrqVgsS8Cla0qz3kt3QTCyGmHTrTgL3Bul6ApRbEhSJaSMtmxMxbI6sJaKxdIHui0UyCyXaivu7MdS9F1acYoALR3ACa37y7I66KuoiMgtIrJbRPaKyNvmOS8i8q78/KMi8sIz9RWR9SLyRRHZkz+vy49fJSItEXk4f7y3n3OzWBZCoCMgkKUZN6O0c8wRQRVcVziZjLK59R2o7Vum0VosS0ffREVEXODdwK3ADuB1IrJjTrNbge3543bgPT30fRvwZVXdDnw5f99mn6q+IH/c0Z+ZWSy94XaZKg5Z4UlXhIGCy1DJw3OynSQndQOP683oV34cksayjddiWQr6aancBOxV1f2qGgEfBW6b0+Y24IOacS+wVkQuOUPf24AP5K8/APxkH+dgsZwz0lXCRfLSLY5A0XMouILrSi4yHp8/eS37ph30yb9Y5lFbLIujn6JyGXCo6/3h/FgvbRbqu1lVjwHkz5u62m0TkYdE5Ksi8rL5BiUit4vIAyLywNiYLTlu6R9ZmvHM67n72ruSZYwVXGF0pMQXp19E/PifQFxdhtFaLEtDP0VF5jk2t2re6dr00ncux4ArVPUG4K3Ah0Vk5JSLqL5PVW9U1RtHR0fPcEmLZXF0lwXz3FMzwtpHHCApbGZn4zLMfW+ByQc77VSVQ5PN8zJei2Wx9FNUDgOXd73fCsxdNny6Ngv1PZG7yMifTwKoaqiqE/nrB4F9wLVLMhOL5Rxpr10RmR1jgZmKxm1KvsPXWy9hbGw/fPmH4cS/AfCtfRO86e/vP19DtlgWRT9F5X5gu4hsE5EC8Frgrjlt7gLekGeB3QxUcpfWQn3vAt6Yv34j8BkAERnNA/yIyNVkwf/9/ZuexbIwwyWPopf9imWlXOYa4ILMMcpTd4TPTr0MLnkV7PtbAO5/epKxengeRmyxLJ6+iYqqJsBbgHuAXcCdqrpTRO4QkXZm1t1kX/x7gfcDv75Q37zPO4FXicge4FX5e4CXA4+KyCPAx4E7VHWyX/OzWM7EYMHF65gip3p0RU497DpCPYxh/ffC4c9A0uCb+8ZphEm258qj/xUS6wqzXLj0dUW9qt5NJhzdx97b9VqBN/faNz8+AbxynuOfAD6xyCFbLH1B0Vm7Qp4O13GotmJqZgDjXcXwM59k59FRklSJDn6KwuP/HS7/d7DuBf0ftMVyDtgV9RbLecB3nY4rrE3JP/WYK9CKU/aPNfjXsauIH/0DBr2EwaLgPPJ28IagcQiL5ULF1v6yWM4DI6VTf9XKvnvKMccRklSZasbcV7mSl6w7yO9u/isiIxhjMrdY04qK5cLFWioWywWEAAVPODrdAoR/OPZ93Fx+gCsLRzk++nrwh6HxzHIP02I5LdZSsVguMDzH4eh0C88RJkOfT0S/RLUV8+p0gLFokNHG08s9RIvltFhLxWK5wHAdYbIRMVBwKXoOnpNtALZvrM5nnmgRVg4s9xAtltNiLRWL5QLDkaz45EjZn3X88FSLIB2iWXmG4vIMzWI5I9ZSsVguMNqZx15eF6zNsUqL1FvLoBkHNZ3jDx60y7EsFw5WVCyWC5B2ReM2jgipAdcrEGiBRjXbKbJSb/Hz7/m6rQ1muWCwomKxXGA4znwVjbNn3xUqZg3jJ/eiD72Nwbsv4z1X/iFf22MrblsuDKyoWCwXGHOtFMj2ZoFsEWXNDJPs/SBjj/0Njw38IjcM7mbfE19ahpFaLKdiA/UWywVGOc/66saVTGhcR2jqIM+e/Hs+Uf1BDjd8rnVv4me8PyJ5osb99ev45yNbedutz2ak5J/mDhZL/7CWisVygeFAVyHKDN8VRko+AjTMAJPpCI80r+VkLWSfeS7TZh2HHvsIO3b/HNH+j/Ohew8uy9gtFmupWCwrANcR1g9mlsee5NnsrF/CQKlILUjwPY/7kx/g+MGQHWu283ub/5w/+K5h6nt/l3WDhWUeueViw1oqFssKoylrORRtYbjksabs4TpCueCyYbBA07uML8Y/xe9s+gse/dBLCT53MySN5R6y5SLCiorFssLwHCevEeawYbBAe1uWNWUPR2BSN3FP/LNUdSO1RgX2/s0yj9hyMWFFxWJZYXiucOna0oK/vNO6kf26gy9Nvwie+GNIo/M2PsvFjRUVi2WFIXBKdth8lAouu6rrif311Hd/AGO0/4OzXPTYQL3FskpxgOGyx90nd/D88d/n29VXcaQS8NJnjfKSazYs9/AsqxRrqVgsq5iRss+u5qUUnJR7v/kh3v+1p/nPH3uIIE5Jd/0FevLryz1EyyrDiorFsopxgA2DRXbri/i1dR/gRzbtpSwB3/7K36Lf/X84+PXfWe4hWlYZ1v1lsVwE7DfPZp03xm+t/xM2bDyBnhD+YfLV/Pz6ezCtCZyydYdZlgYrKhbLRYDi8kDyivxdyvh0BfGH2BtdxVc/+Ed8qflD/OKLt3K4UaQZGX7r1dcz2YiYqIds3zy8nEO3rDCsqFgsFx0uhdIIvis8o9fxhsH38yuDf4XuV3xJSNTjiU/ewh+duJ3Hxlw+/59ezqcePMCGypf42Vf9BE/W1lH0HLZtHOSZySZXbhhc7glZLiD6KioicgvwF4AL/I2qvnPOecnP/xjQBH5JVb+7UF8RWQ98DLgKOAD8nKpO5efeDvwKkAL/UVXv6ef8LJaVykgp+9U/zrP4bpJwyGyjpQM4RLga85zgW7x/+Cc5MbCJQ3eu4bbCOIk61D/9dv5l4uf5Wv1FvHjbKN946ijXXHktP3XJExw6spvrvv838Ac2E8SGm69ez1gtZHS4iIicYUSW1ULfREVEXODdwKuAw8D9InKXqj7R1exWYHv+eDHwHuDFZ+j7NuDLqvpOEXlb/v43RWQH8FrgOcClwJdE5FpVTfs1R4tlpaO47DXPAcjL7RdIpcAjvIqd0csZYRrfaXLUFDmcbGGgdYJb1j3AL67/DEksvOkajxGZ5NDxrax1BnnWAy/kq43v5/O1l/HxQpktyeNct8HwjdoNXLJlG8+5/BJOBj6xu46brt7AtZuH8SVfP+O4y/Y5WJaOfloqNwF7VXU/gIh8FLgN6BaV24APqqoC94rIWhG5hMwKOV3f24BX5P0/AHwF+M38+EdVNQSeFpG9+Ri+3a8J+kQMm6P9urzFsuwkQEIRUtggx8GHR6PnAc+bt/1Bs50d5cd5xeAf45IybkZppR63rvsofpTAvq7Gz8zuW09LpOpR0xH2hFeypVTDcQocjrfgug6FQpmWDpGo4DmC6zj4hRKJcXAdQRyP2Aiu5wMeqgbXgRQfg4snipDikHJi6Aep+tto209BbFAUR/JrtzdJUyU1iusICsSp4ki2ANVoVj0aEdI0297ZdWRmP+g2qrSXnQqgZDt5GtXs/vm2BsLsforSXq8qWTOMZq+dOfcw+T1mrp9dr3187vUV5fmXreV5W9fM+3NcDP0UlcuAQ13vD5NZI2dqc9kZ+m5W1WMAqnpMRDZ1Xeveea41CxG5Hbg9f1sXkd29Tqgbv+BftX0L60U+di7dVwRTNcO64dWbdb7a5wcXwhxP5M8CzOzv4qC4koojSmw8RBRfgvxsHej9j7WxGoyeZS7BWPy/OZ5sPLtOy0jarOAOLK0AmCioJZOHnzrH7lee7kQ/RWU+J+rcOhGna9NL33O5H6r6PuB9Z7jWGRGRB3Ye1JXzv/IcEJEHjkwkNy73OPrFap8fXDxzPDimq36OSeXkiphjP/+EOQxc3vV+K6f++XG6Ngv1PZG7yMifT57F/SwWi8XSR/opKvcD20Vkm4gUyILod81pcxfwBsm4Gajkrq2F+t4FvDF//UbgM13HXysiRRHZRhb8v69fk7NYLBbLqfTN/aWqiYi8BbiHLC3471R1p4jckZ9/L3A3WTrxXrKU4l9eqG9+6XcCd4rIr5CF+n4277NTRO4kC+YnwJv7nPm1aBfaCmC1z3G1zw/sHFcLK2aOkiVeWSwWi8WyeFZ36ovFYrFYzitWVCwWi8WyZFhROUtE5BYR2S0ie/MV/SsSEblcRP5NRHaJyE4R+U/58fUi8kUR2ZM/r+vq8/Z83rtF5EeXb/S9IyKuiDwkIp/N36+2+a0VkY+LyJP5z/Ilq3COv5H/H31cRD4iIqWVPkcR+TsROSkij3cdO+s5iciLROSx/Ny75EKoh6Oq9tHjgyxpYB9wNVAAHgF2LPe4znEulwAvzF8PA08BO4D/AbwtP/424I/z1zvy+RaBbfnn4C73PHqY51uBDwOfzd+vtvl9APi/89cFYO1qmiPZAuangXL+/k7gl1b6HIGXAy8EHu86dtZzIstwfQnZOr1/AW5d7rlZS+Xs6JSeUdUIaJePWXGo6jHNi3eqag3YRfYLfBvZFxX580/mrztlcFT1abKMvZvO66DPEhHZCrwa+Juuw6tpfiNkX05/C6CqkapOs4rmmOMBZRHxgAGy9Wcreo6q+jVgcs7hs5pTvk5vRFW/rZnCfLCrz7JhReXsOF1ZmRWNiFwF3AB8hzllcIDuMjgrbe5/DvwXwHQdW03zuxoYA/5P7uL7GxEZZBXNUVWPAH9KtnzgGNlati+wiubYxdnO6bL89dzjy4oVlbPjXMrHXNCIyBDwCeA/q2p1oabzHLtg5y4iPw6cVNUHe+0yz7ELdn45HpkL5T2qegPQIHObnI4VN8c8rnAbmdvnUmBQRF6/UJd5jl3Qc+yBpSxn1XesqJwdq6oUjIj4ZILyIVX9ZH54tZTB+X7gNSJygMxN+UMi8o+snvlBNubDqvqd/P3HyURmNc3xh4GnVXVMVWPgk8D3sbrm2OZs53Q4fz33+LJiReXs6KX0zIogzxL5W2CXqv5Z16lVUQZHVd+uqltV9Sqyn9O/qurrWSXzA1DV48AhEbkuP/RKsooSq2aOZG6vm0VkIP8/+0qy+N9qmmObs5pT7iKricjN+Wfzhq4+y8dyZwqstAdZWZmnyDIwfnu5x7OIebyUzFR+FHg4f/wYsAH4MrAnf17f1ee383nv5gLIMjmLub6CmeyvVTU/4AXAA/nP8dPAulU4x98HngQeB/6BLAtqRc8R+AhZjCgmszh+5VzmBNyYfy77gL8kr5KynA9bpsVisVgsS4Z1f1ksFotlybCiYrFYLJYlw4qKxWKxWJYMKyoWi8ViWTKsqFgsFotlybCiYrFYLJYlw4qKxWKxWJaM/x9uWbMQyVqsbgAAAABJRU5ErkJggg==",
96 | "text/plain": [
97 | ""
98 | ]
99 | },
100 | "metadata": {
101 | "needs_background": "light"
102 | },
103 | "output_type": "display_data"
104 | }
105 | ],
106 | "source": [
107 | "sns.histplot(train_data.flatten(),stat='density',discrete=False,element='poly')\n",
108 | "sns.histplot(test_data.flatten(),color='orange',stat='density',discrete=False,alpha=0.4,element='poly')"
109 | ]
110 | }
111 | ],
112 | "metadata": {
113 | "interpreter": {
114 | "hash": "b3ba2566441a7c06988d0923437866b63cedc61552a5af99d1f4fb67d367b25f"
115 | },
116 | "kernelspec": {
117 | "display_name": "Python 3.8.8 64-bit ('base': conda)",
118 | "name": "python3"
119 | },
120 | "language_info": {
121 | "codemirror_mode": {
122 | "name": "ipython",
123 | "version": 3
124 | },
125 | "file_extension": ".py",
126 | "mimetype": "text/x-python",
127 | "name": "python",
128 | "nbconvert_exporter": "python",
129 | "pygments_lexer": "ipython3",
130 | "version": "3.8.8"
131 | },
132 | "orig_nbformat": 4
133 | },
134 | "nbformat": 4,
135 | "nbformat_minor": 2
136 | }
137 |
--------------------------------------------------------------------------------
/utils/math_utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 |
4 |
5 | def MAPE(v, v_, axis=None):
6 | '''
7 | Mean absolute percentage error.
8 | :param v: np.ndarray or int, ground truth.
9 | :param v_: np.ndarray or int, prediction.
10 | :param axis: axis to do calculation.
11 | :return: int, MAPE averages on all elements of input.
12 | '''
13 | mape = (np.abs(v_ - v) / np.abs(v) + 1e-5).astype(np.float64)
14 | mape = np.where(mape > 5, 0, mape)
15 | return np.mean(mape, axis)
16 |
17 |
18 | def RMSE(v, v_, axis=None):
19 | '''
20 | Mean squared error.
21 | :param v: np.ndarray or int, ground truth.
22 | :param v_: np.ndarray or int, prediction.
23 | :param axis: axis to do calculation.
24 | :return: int, RMSE averages on all elements of input.
25 | '''
26 | return np.sqrt(np.mean((v_ - v) ** 2, axis)).astype(np.float64)
27 |
28 |
29 | def MAE(v, v_, axis=None):
30 | '''
31 | Mean absolute error.
32 | :param v: np.ndarray or int, ground truth.
33 | :param v_: np.ndarray or int, prediction.
34 | :param axis: axis to do calculation.
35 | :return: int, MAE averages on all elements of input.
36 | '''
37 | return np.mean(np.abs(v_ - v), axis).astype(np.float64)
38 |
39 |
40 | def evaluate(y, y_hat, by_step=False, by_node=False):
41 | '''
42 | :param y: array in shape of [count, time_step, node]. GT
43 | :param y_hat: in same shape with y. Pred
44 | :param by_step: evaluate by time_step dim.
45 | :param by_node: evaluate by node dim.
46 | :return: array of mape, mae and rmse.
47 | '''
48 | if not by_step and not by_node:
49 | return MAPE(y, y_hat), MAE(y, y_hat), RMSE(y, y_hat)
50 | if by_step and by_node:
51 | return MAPE(y, y_hat, axis=0), MAE(y, y_hat, axis=0), RMSE(y, y_hat, axis=0)
52 | if by_step:
53 | return MAPE(y, y_hat, axis=(0, 2)), MAE(y, y_hat, axis=(0, 2)), RMSE(y, y_hat, axis=(0, 2))
54 | if by_node:
55 | return MAPE(y, y_hat, axis=(0, 1)), MAE(y, y_hat, axis=(0, 1)), RMSE(y, y_hat, axis=(0, 1))
56 |
57 |
58 | def creatMask(x):
59 | res = x
60 | b, l, c = res.shape
61 | mask_ratio = torch.nn.Dropout(p=0.2)
62 | Mask = torch.ones(b, l, c, device=x.device)
63 | Mask = mask_ratio(Mask)
64 | Mask = Mask > 0 # torch.Size([8, 1, 48, 48])
65 | Mask = Mask
66 | # res.masked_fill_(Mask, 0)
67 | return Mask
68 |
69 | def normal_std(x):
70 | return x.std() * np.sqrt((len(x) - 1.) / (len(x)))
71 |
72 |
73 | def smooth_l1_loss(input, target, beta=1. / 9, size_average=True):
74 | """
75 | very similar to the smooth_l1_loss from pytorch, but with
76 | the extra beta parameter
77 | """
78 | n = torch.abs(input - target)
79 | cond = n < beta
80 | loss = torch.where(cond, 0.5 * n ** 2 / beta, n - 0.5 * beta)
81 | if size_average:
82 | return loss.mean()
83 | return loss.sum()
84 |
85 |
--------------------------------------------------------------------------------
/utils/recursive_demo.py:
--------------------------------------------------------------------------------
1 | import math
2 | import torch.nn.functional as F
3 | from torch.autograd import Variable
4 | from torch import nn
5 | import torch
6 | from torch.nn.utils import weight_norm
7 | import argparse
8 | import numpy as np
9 |
10 | class Splitting(nn.Module):
11 | def __init__(self):
12 | super(Splitting, self).__init__()
13 | # Deciding the stride base on the direction
14 | # self.conv_even = lambda x: x[:, ::2, :]
15 | # self.conv_odd = lambda x: x[:, 1::2, :]
16 | # To simplify, we removed the dimensions except for the length dimension.
17 |
18 | def even(self, x):
19 | return x[::2]
20 |
21 | def odd(self, x):
22 | return x[1::2]
23 |
24 | def forward(self, x):
25 | '''Returns the odd and even part'''
26 | return self.even(x), self.odd(x)
27 |
28 | class SCINet_Tree(nn.Module):
29 | def __init__(self, args, in_planes, current_layer):
30 | super().__init__()
31 | self.current_layer=current_layer
32 | self.workingblock=Splitting() # To simplyfy, we replaced the actual SCINetblock with a splitting block.
33 | if current_layer!=0:
34 | self.SCINet_Tree_odd=SCINet_Tree(args, in_planes, current_layer-1)
35 | self.SCINet_Tree_even=SCINet_Tree(args, in_planes, current_layer-1)
36 |
37 | def zip_up_the_pants(self, even, odd):
38 | even_len=even.shape[0]
39 | odd_len=odd.shape[0]
40 | mlen=min((odd_len,even_len))
41 | _=[]
42 | for i in range(mlen):
43 | _.append(even[i].unsqueeze(0))
44 | _.append(odd[i].unsqueeze(0))
45 | if odd_len np.ndarray:
13 | pass
14 |
15 | def __repr__(self):
16 | return self.__class__.__name__ + "()"
17 |
18 | class SecondOfMinute(TimeFeature):
19 | """Minute of hour encoded as value between [-0.5, 0.5]"""
20 | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
21 | return index.second / 59.0 - 0.5
22 |
23 | class MinuteOfHour(TimeFeature):
24 | """Minute of hour encoded as value between [-0.5, 0.5]"""
25 | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
26 | return index.minute / 59.0 - 0.5
27 |
28 | class HourOfDay(TimeFeature):
29 | """Hour of day encoded as value between [-0.5, 0.5]"""
30 | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
31 | return index.hour / 23.0 - 0.5
32 |
33 | class DayOfWeek(TimeFeature):
34 | """Hour of day encoded as value between [-0.5, 0.5]"""
35 | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
36 | return index.dayofweek / 6.0 - 0.5
37 |
38 | class DayOfMonth(TimeFeature):
39 | """Day of month encoded as value between [-0.5, 0.5]"""
40 | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
41 | return (index.day - 1) / 30.0 - 0.5
42 |
43 | class DayOfYear(TimeFeature):
44 | """Day of year encoded as value between [-0.5, 0.5]"""
45 | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
46 | return (index.dayofyear - 1) / 365.0 - 0.5
47 |
48 | class MonthOfYear(TimeFeature):
49 | """Month of year encoded as value between [-0.5, 0.5]"""
50 | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
51 | return (index.month - 1) / 11.0 - 0.5
52 |
53 | class WeekOfYear(TimeFeature):
54 | """Week of year encoded as value between [-0.5, 0.5]"""
55 | def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
56 | return (index.isocalendar().week - 1) / 52.0 - 0.5
57 |
58 | def time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]:
59 | """
60 | Returns a list of time features that will be appropriate for the given frequency string.
61 | Parameters
62 | ----------
63 | freq_str
64 | Frequency string of the form [multiple][granularity] such as "12H", "5min", "1D" etc.
65 | """
66 |
67 | features_by_offsets = {
68 | offsets.YearEnd: [],
69 | offsets.QuarterEnd: [MonthOfYear],
70 | offsets.MonthEnd: [MonthOfYear],
71 | offsets.Week: [DayOfMonth, WeekOfYear],
72 | offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear],
73 | offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear],
74 | offsets.Hour: [HourOfDay, DayOfWeek, DayOfMonth, DayOfYear],
75 | offsets.Minute: [
76 | MinuteOfHour,
77 | HourOfDay,
78 | DayOfWeek,
79 | DayOfMonth,
80 | DayOfYear,
81 | ],
82 | offsets.Second: [
83 | SecondOfMinute,
84 | MinuteOfHour,
85 | HourOfDay,
86 | DayOfWeek,
87 | DayOfMonth,
88 | DayOfYear,
89 | ],
90 | }
91 |
92 | offset = to_offset(freq_str)
93 |
94 | for offset_type, feature_classes in features_by_offsets.items():
95 | if isinstance(offset, offset_type):
96 | return [cls() for cls in feature_classes]
97 |
98 | supported_freq_msg = f"""
99 | Unsupported frequency {freq_str}
100 | The following frequencies are supported:
101 | Y - yearly
102 | alias: A
103 | M - monthly
104 | W - weekly
105 | D - daily
106 | B - business days
107 | H - hourly
108 | T - minutely
109 | alias: min
110 | S - secondly
111 | """
112 | raise RuntimeError(supported_freq_msg)
113 |
114 | def time_features(dates, timeenc=1, freq='h'):
115 | if timeenc==0:
116 | dates['month'] = dates.date.apply(lambda row:row.month,1)
117 | dates['day'] = dates.date.apply(lambda row:row.day,1)
118 | dates['weekday'] = dates.date.apply(lambda row:row.weekday(),1)
119 | dates['hour'] = dates.date.apply(lambda row:row.hour,1)
120 | dates['minute'] = dates.date.apply(lambda row:row.minute,1)
121 | dates['minute'] = dates.minute.map(lambda x:x//15)
122 | freq_map = {
123 | 'y':[],'m':['month'],'w':['month'],'d':['month','day','weekday'],
124 | 'b':['month','day','weekday'],'h':['month','day','weekday','hour'],
125 | 't':['month','day','weekday','hour','minute'],
126 | }
127 | return dates[freq_map[freq.lower()]].values
128 | if timeenc==1:
129 | dates = pd.to_datetime(dates.date.values)
130 | return np.vstack([feat(dates) for feat in time_features_from_frequency_str(freq)]).transpose(1,0)
--------------------------------------------------------------------------------
/utils/tools.py:
--------------------------------------------------------------------------------
1 |
2 | import os
3 | import numpy as np
4 | import torch
5 |
6 | def save_model(epoch, lr, model, model_dir, model_name='pems08', horizon=12):
7 | if model_dir is None:
8 | return
9 | if not os.path.exists(model_dir):
10 | os.makedirs(model_dir)
11 | file_name = os.path.join(model_dir, model_name+str(horizon)+'.bin')
12 | torch.save(
13 | {
14 | 'epoch': epoch,
15 | 'lr': lr,
16 | 'model': model.state_dict(),
17 | }, file_name)
18 | print('save model in ',file_name)
19 |
20 |
21 | def load_model(model, model_dir, model_name='pems08', horizon=12):
22 | if not model_dir:
23 | return
24 | file_name = os.path.join(model_dir, model_name+str(horizon)+'.bin')
25 |
26 | if not os.path.exists(file_name):
27 | return
28 | with open(file_name, 'rb') as f:
29 | checkpoint = torch.load(f, map_location=lambda storage, loc: storage)
30 | print('This model was trained for {} epochs'.format(checkpoint['epoch']))
31 | model.load_state_dict(checkpoint['model'])
32 | epoch = checkpoint['epoch']
33 | lr = checkpoint['lr']
34 | print('loaded the model...', file_name, 'now lr:', lr, 'now epoch:', epoch)
35 | return model, lr, epoch
36 |
37 | def adjust_learning_rate(optimizer, epoch, args):
38 | if args.lradj==1:
39 | lr_adjust = {epoch: args.lr * (0.95 ** (epoch // 1))}
40 |
41 | elif args.lradj==2:
42 | lr_adjust = {
43 | 0: 0.0001, 5: 0.0005, 10:0.001, 20: 0.0001, 30: 0.00005, 40: 0.00001
44 | , 70: 0.000001
45 | }
46 |
47 | if epoch in lr_adjust.keys():
48 | lr = lr_adjust[epoch]
49 | for param_group in optimizer.param_groups:
50 | param_group['lr'] = lr
51 | print('Updating learning rate to {}'.format(lr))
52 | else:
53 | for param_group in optimizer.param_groups:
54 | lr = param_group['lr']
55 | return lr
56 |
57 | class EarlyStopping:
58 | def __init__(self, patience=7, verbose=False, delta=0):
59 | self.patience = patience
60 | self.verbose = verbose
61 | self.counter = 0
62 | self.best_score = None
63 | self.early_stop = False
64 | self.val_loss_min = np.Inf
65 | self.delta = delta
66 |
67 | def __call__(self, val_loss, model, path):
68 | score = -val_loss
69 | if self.best_score is None:
70 | self.best_score = score
71 | self.save_checkpoint(val_loss, model, path)
72 | elif score < self.best_score + self.delta:
73 | self.counter += 1
74 | print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
75 | if self.counter >= self.patience:
76 | self.early_stop = True
77 | else:
78 | self.best_score = score
79 | self.save_checkpoint(val_loss, model, path)
80 | self.counter = 0
81 |
82 | def save_checkpoint(self, val_loss, model, path):
83 | if self.verbose:
84 | print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')
85 | torch.save(model.state_dict(), path+'/'+'checkpoint.pth')
86 | self.val_loss_min = val_loss
87 |
88 | class dotdict(dict):
89 | """dot.notation access to dictionary attributes"""
90 | __getattr__ = dict.get
91 | __setattr__ = dict.__setitem__
92 | __delattr__ = dict.__delitem__
93 |
94 | class StandardScaler():
95 | def __init__(self):
96 | self.mean = 0.
97 | self.std = 1.
98 |
99 | def fit(self, data):
100 | self.mean = data.mean(0)
101 | self.std = data.std(0)
102 |
103 | def transform(self, data):
104 | mean = torch.from_numpy(self.mean).type_as(data).to(data.device) if torch.is_tensor(data) else self.mean
105 | std = torch.from_numpy(self.std).type_as(data).to(data.device) if torch.is_tensor(data) else self.std
106 | return (data - mean) / std
107 |
108 | def inverse_transform(self, data):
109 | mean = torch.from_numpy(self.mean).type_as(data).to(data.device) if torch.is_tensor(data) else self.mean
110 | std = torch.from_numpy(self.std).type_as(data).to(data.device) if torch.is_tensor(data) else self.std
111 | return (data * std) + mean
112 |
--------------------------------------------------------------------------------