├── .idea
├── .gitignore
├── misc.xml
├── vcs.xml
├── inspectionProfiles
│ ├── profiles_settings.xml
│ └── Project_Default.xml
├── modules.xml
└── transformer-vs-lstm-forecasting.iml
├── res_readme
├── T_1.png
├── T_2.png
├── LSTM_1.png
├── LSTM_2.png
├── t_p_5.png
└── t_p_6.png
├── bash
├── train_lstm0.sh
├── train_lstm4.sh
├── train_lstm2.sh
├── train_lstm6.sh
├── train_transformer0.sh
├── train_transformer8.sh
├── train_transformer4.sh
├── train_transformer12.sh
├── train_lstm1.sh
├── train_lstm5.sh
├── train_lstm3.sh
├── train_lstm7.sh
├── train_transformer1.sh
├── train_transformer10.sh
├── train_transformer2.sh
├── train_transformer9.sh
├── train_transformer5.sh
├── train_transformer6.sh
├── train_transformer13.sh
├── train_transformer14.sh
├── train_transformer3.sh
├── train_transformer11.sh
├── train_transformer7.sh
├── train_transformer15.sh
├── test_lstm.sh
├── generate_data0.sh
├── generate_data1.sh
├── test_transformer.sh
├── train_lstm.sh
├── Untitled.ipynb
├── train_transformer_.sh
└── train_transformer.sh
├── plot_images.py
├── .gitignore
├── README.md
├── data_utils.py
├── scripts
├── generate_time_series.py
└── preprocess_time_series.py
├── models
├── transformer.py
└── lstm.py
├── train.py
├── test.py
└── viz.ipynb
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /shelf/
3 | /workspace.xml
4 |
--------------------------------------------------------------------------------
/res_readme/T_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/linyijun/transformer-vs-lstm-forecasting/HEAD/res_readme/T_1.png
--------------------------------------------------------------------------------
/res_readme/T_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/linyijun/transformer-vs-lstm-forecasting/HEAD/res_readme/T_2.png
--------------------------------------------------------------------------------
/res_readme/LSTM_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/linyijun/transformer-vs-lstm-forecasting/HEAD/res_readme/LSTM_1.png
--------------------------------------------------------------------------------
/res_readme/LSTM_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/linyijun/transformer-vs-lstm-forecasting/HEAD/res_readme/LSTM_2.png
--------------------------------------------------------------------------------
/res_readme/t_p_5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/linyijun/transformer-vs-lstm-forecasting/HEAD/res_readme/t_p_5.png
--------------------------------------------------------------------------------
/res_readme/t_p_6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/linyijun/transformer-vs-lstm-forecasting/HEAD/res_readme/t_p_6.png
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/profiles_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/Project_Default.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/.idea/transformer-vs-lstm-forecasting.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/bash/train_lstm0.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -l
2 | #SBATCH --time=24:00:00
3 | #SBATCH --ntasks=1
4 | #SBATCH --gres=gpu:v100:1
5 | #SBATCH --mail-type=ALL
6 | #SBATCH --mail-user=lin00786@umn.edu
7 | #SBATCH -p v100
8 |
9 | module load python3
10 |
11 | source activate torch-env
12 |
13 | cd /home/yaoyi/lin00786/transformer-vs-lstm-forecasting/
14 |
15 | # auxiliary_feat = ["day_of_week", "day_of_month", "day_of_year", "month", "week_of_year", "year"]
16 |
17 | python train.py --model_name lstm --data_name p7_10000
18 | python train.py --model_name lstm --data_name p7_10000 --auxiliary_feat 35
19 | python train.py --model_name lstm --data_name p7_10000 --auxiliary_feat 012345
20 |
--------------------------------------------------------------------------------
/bash/train_lstm4.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -l
2 | #SBATCH --time=24:00:00
3 | #SBATCH --ntasks=1
4 | #SBATCH --gres=gpu:v100:1
5 | #SBATCH --mail-type=ALL
6 | #SBATCH --mail-user=lin00786@umn.edu
7 | #SBATCH -p v100
8 |
9 | module load python3
10 |
11 | source activate torch-env
12 |
13 | cd /home/yaoyi/lin00786/transformer-vs-lstm-forecasting/
14 |
15 | # auxiliary_feat = ["day_of_week", "day_of_month", "day_of_year", "month", "week_of_year", "year"]
16 |
17 | python train.py --model_name lstm --data_name p11_10000
18 | python train.py --model_name lstm --data_name p11_10000 --auxiliary_feat 35
19 | python train.py --model_name lstm --data_name p11_10000 --auxiliary_feat 012345
20 |
--------------------------------------------------------------------------------
/bash/train_lstm2.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -l
2 | #SBATCH --time=24:00:00
3 | #SBATCH --ntasks=1
4 | #SBATCH --gres=gpu:v100:1
5 | #SBATCH --mail-type=ALL
6 | #SBATCH --mail-user=lin00786@umn.edu
7 | #SBATCH -p v100
8 |
9 | module load python3
10 |
11 | source activate torch-env
12 |
13 | cd /home/yaoyi/lin00786/transformer-vs-lstm-forecasting/
14 |
15 | # auxiliary_feat = ["day_of_week", "day_of_month", "day_of_year", "month", "week_of_year", "year"]
16 |
17 | python train.py --model_name lstm --data_name p71428_10000
18 | python train.py --model_name lstm --data_name p71428_10000 --auxiliary_feat 35
19 | python train.py --model_name lstm --data_name p71428_10000 --auxiliary_feat 012345
20 |
--------------------------------------------------------------------------------
/bash/train_lstm6.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -l
2 | #SBATCH --time=24:00:00
3 | #SBATCH --ntasks=1
4 | #SBATCH --gres=gpu:v100:1
5 | #SBATCH --mail-type=ALL
6 | #SBATCH --mail-user=lin00786@umn.edu
7 | #SBATCH -p v100
8 |
9 | module load python3
10 |
11 | source activate torch-env
12 |
13 | cd /home/yaoyi/lin00786/transformer-vs-lstm-forecasting/
14 |
15 | # auxiliary_feat = ["day_of_week", "day_of_month", "day_of_year", "month", "week_of_year", "year"]
16 |
17 | python train.py --model_name lstm --data_name p112337_10000
18 | python train.py --model_name lstm --data_name p112337_10000 --auxiliary_feat 35
19 | python train.py --model_name lstm --data_name p112337_10000 --auxiliary_feat 012345
20 |
--------------------------------------------------------------------------------
/bash/train_transformer0.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -l
2 | #SBATCH --time=24:00:00
3 | #SBATCH --ntasks=1
4 | #SBATCH --gres=gpu:v100:1
5 | #SBATCH --mail-type=ALL
6 | #SBATCH --mail-user=lin00786@umn.edu
7 | #SBATCH -p v100
8 |
9 | module load python3
10 |
11 | source activate torch-env
12 |
13 | cd /home/yaoyi/lin00786/transformer-vs-lstm-forecasting/
14 |
15 | # auxiliary_feat = ["day_of_week", "day_of_month", "day_of_year", "month", "week_of_year", "year"]
16 |
17 | python train.py --model_name transformer --data_name p7_10000
18 | python train.py --model_name transformer --data_name p7_10000 --auxiliary_feat 35
19 | python train.py --model_name transformer --data_name p7_10000 --auxiliary_feat 012345
20 |
--------------------------------------------------------------------------------
/bash/train_transformer8.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -l
2 | #SBATCH --time=24:00:00
3 | #SBATCH --ntasks=1
4 | #SBATCH --gres=gpu:v100:1
5 | #SBATCH --mail-type=ALL
6 | #SBATCH --mail-user=lin00786@umn.edu
7 | #SBATCH -p v100
8 |
9 | module load python3
10 |
11 | source activate torch-env
12 |
13 | cd /home/yaoyi/lin00786/transformer-vs-lstm-forecasting/
14 |
15 | # auxiliary_feat = ["day_of_week", "day_of_month", "day_of_year", "month", "week_of_year", "year"]
16 |
17 | python train.py --model_name transformer --data_name p11_10000
18 | python train.py --model_name transformer --data_name p11_10000 --auxiliary_feat 35
19 | python train.py --model_name transformer --data_name p11_10000 --auxiliary_feat 012345
20 |
--------------------------------------------------------------------------------
/bash/train_transformer4.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -l
2 | #SBATCH --time=24:00:00
3 | #SBATCH --ntasks=1
4 | #SBATCH --gres=gpu:v100:1
5 | #SBATCH --mail-type=ALL
6 | #SBATCH --mail-user=lin00786@umn.edu
7 | #SBATCH -p v100
8 |
9 | module load python3
10 |
11 | source activate torch-env
12 |
13 | cd /home/yaoyi/lin00786/transformer-vs-lstm-forecasting/
14 |
15 | # auxiliary_feat = ["day_of_week", "day_of_month", "day_of_year", "month", "week_of_year", "year"]
16 |
17 | python train.py --model_name transformer --data_name p71428_10000
18 | python train.py --model_name transformer --data_name p71428_10000 --auxiliary_feat 35
19 | python train.py --model_name transformer --data_name p71428_10000 --auxiliary_feat 012345
20 |
--------------------------------------------------------------------------------
/bash/train_transformer12.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -l
2 | #SBATCH --time=24:00:00
3 | #SBATCH --ntasks=1
4 | #SBATCH --gres=gpu:v100:1
5 | #SBATCH --mail-type=ALL
6 | #SBATCH --mail-user=lin00786@umn.edu
7 | #SBATCH -p v100
8 |
9 | module load python3
10 |
11 | source activate torch-env
12 |
13 | cd /home/yaoyi/lin00786/transformer-vs-lstm-forecasting/
14 |
15 | # auxiliary_feat = ["day_of_week", "day_of_month", "day_of_year", "month", "week_of_year", "year"]
16 |
17 | python train.py --model_name transformer --data_name p112337_10000
18 | python train.py --model_name transformer --data_name p112337_10000 --auxiliary_feat 35
19 | python train.py --model_name transformer --data_name p112337_10000 --auxiliary_feat 012345
20 |
--------------------------------------------------------------------------------
/bash/train_lstm1.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -l
2 | #SBATCH --time=24:00:00
3 | #SBATCH --ntasks=1
4 | #SBATCH --gres=gpu:v100:1
5 | #SBATCH --mail-type=ALL
6 | #SBATCH --mail-user=lin00786@umn.edu
7 | #SBATCH -p v100
8 |
9 | module load python3
10 |
11 | source activate torch-env
12 |
13 | cd /home/yaoyi/lin00786/transformer-vs-lstm-forecasting/
14 |
15 | # auxiliary_feat = ["day_of_week", "day_of_month", "day_of_year", "month", "week_of_year", "year"]
16 |
17 | python train.py --model_name lstm --data_name p7_10000 --use_periodic_as_feat
18 | python train.py --model_name lstm --data_name p7_10000 --auxiliary_feat 35 --use_periodic_as_feat
19 | python train.py --model_name lstm --data_name p7_10000 --auxiliary_feat 012345 --use_periodic_as_feat
20 |
--------------------------------------------------------------------------------
/bash/train_lstm5.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -l
2 | #SBATCH --time=24:00:00
3 | #SBATCH --ntasks=1
4 | #SBATCH --gres=gpu:v100:1
5 | #SBATCH --mail-type=ALL
6 | #SBATCH --mail-user=lin00786@umn.edu
7 | #SBATCH -p v100
8 |
9 | module load python3
10 |
11 | source activate torch-env
12 |
13 | cd /home/yaoyi/lin00786/transformer-vs-lstm-forecasting/
14 |
15 | # auxiliary_feat = ["day_of_week", "day_of_month", "day_of_year", "month", "week_of_year", "year"]
16 |
17 | python train.py --model_name lstm --data_name p11_10000 --use_periodic_as_feat
18 | python train.py --model_name lstm --data_name p11_10000 --auxiliary_feat 35 --use_periodic_as_feat
19 | python train.py --model_name lstm --data_name p11_10000 --auxiliary_feat 012345 --use_periodic_as_feat
20 |
--------------------------------------------------------------------------------
/bash/train_lstm3.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -l
2 | #SBATCH --time=24:00:00
3 | #SBATCH --ntasks=1
4 | #SBATCH --gres=gpu:v100:1
5 | #SBATCH --mail-type=ALL
6 | #SBATCH --mail-user=lin00786@umn.edu
7 | #SBATCH -p v100
8 |
9 | module load python3
10 |
11 | source activate torch-env
12 |
13 | cd /home/yaoyi/lin00786/transformer-vs-lstm-forecasting/
14 |
15 | # auxiliary_feat = ["day_of_week", "day_of_month", "day_of_year", "month", "week_of_year", "year"]
16 |
17 | python train.py --model_name lstm --data_name p71428_10000 --use_periodic_as_feat
18 | python train.py --model_name lstm --data_name p71428_10000 --auxiliary_feat 35 --use_periodic_as_feat
19 | python train.py --model_name lstm --data_name p71428_10000 --auxiliary_feat 012345 --use_periodic_as_feat
20 |
--------------------------------------------------------------------------------
/bash/train_lstm7.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -l
2 | #SBATCH --time=24:00:00
3 | #SBATCH --ntasks=1
4 | #SBATCH --gres=gpu:v100:1
5 | #SBATCH --mail-type=ALL
6 | #SBATCH --mail-user=lin00786@umn.edu
7 | #SBATCH -p v100
8 |
9 | module load python3
10 |
11 | source activate torch-env
12 |
13 | cd /home/yaoyi/lin00786/transformer-vs-lstm-forecasting/
14 |
15 | # auxiliary_feat = ["day_of_week", "day_of_month", "day_of_year", "month", "week_of_year", "year"]
16 |
17 | python train.py --model_name lstm --data_name p112337_10000 --use_periodic_as_feat
18 | python train.py --model_name lstm --data_name p112337_10000 --auxiliary_feat 35 --use_periodic_as_feat
19 | python train.py --model_name lstm --data_name p112337_10000 --auxiliary_feat 012345 --use_periodic_as_feat
20 |
--------------------------------------------------------------------------------
/bash/train_transformer1.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -l
2 | #SBATCH --time=24:00:00
3 | #SBATCH --ntasks=1
4 | #SBATCH --gres=gpu:v100:1
5 | #SBATCH --mail-type=ALL
6 | #SBATCH --mail-user=lin00786@umn.edu
7 | #SBATCH -p v100
8 |
9 | module load python3
10 |
11 | source activate torch-env
12 |
13 | cd /home/yaoyi/lin00786/transformer-vs-lstm-forecasting/
14 |
15 | # auxiliary_feat = ["day_of_week", "day_of_month", "day_of_year", "month", "week_of_year", "year"]
16 |
17 | python train.py --model_name transformer --data_name p7_10000 --use_periodic_as_feat
18 | python train.py --model_name transformer --data_name p7_10000 --auxiliary_feat 35 --use_periodic_as_feat
19 | python train.py --model_name transformer --data_name p7_10000 --auxiliary_feat 012345 --use_periodic_as_feat
20 |
--------------------------------------------------------------------------------
/bash/train_transformer10.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -l
2 | #SBATCH --time=24:00:00
3 | #SBATCH --ntasks=1
4 | #SBATCH --gres=gpu:v100:1
5 | #SBATCH --mail-type=ALL
6 | #SBATCH --mail-user=lin00786@umn.edu
7 | #SBATCH -p v100
8 |
9 | module load python3
10 |
11 | source activate torch-env
12 |
13 | cd /home/yaoyi/lin00786/transformer-vs-lstm-forecasting/
14 |
15 | # auxiliary_feat = ["day_of_week", "day_of_month", "day_of_year", "month", "week_of_year", "year"]
16 |
17 | python train.py --model_name transformer --data_name p11_10000 --use_periodic_encoder
18 | python train.py --model_name transformer --data_name p11_10000 --auxiliary_feat 35 --use_periodic_encoder
19 | python train.py --model_name transformer --data_name p11_10000 --auxiliary_feat 012345 --use_periodic_encoder
20 |
--------------------------------------------------------------------------------
/bash/train_transformer2.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -l
2 | #SBATCH --time=24:00:00
3 | #SBATCH --ntasks=1
4 | #SBATCH --gres=gpu:v100:1
5 | #SBATCH --mail-type=ALL
6 | #SBATCH --mail-user=lin00786@umn.edu
7 | #SBATCH -p v100
8 |
9 | module load python3
10 |
11 | source activate torch-env
12 |
13 | cd /home/yaoyi/lin00786/transformer-vs-lstm-forecasting/
14 |
15 | # auxiliary_feat = ["day_of_week", "day_of_month", "day_of_year", "month", "week_of_year", "year"]
16 |
17 | python train.py --model_name transformer --data_name p7_10000 --use_periodic_encoder
18 | python train.py --model_name transformer --data_name p7_10000 --auxiliary_feat 35 --use_periodic_encoder
19 | python train.py --model_name transformer --data_name p7_10000 --auxiliary_feat 012345 --use_periodic_encoder
20 |
--------------------------------------------------------------------------------
/bash/train_transformer9.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -l
2 | #SBATCH --time=24:00:00
3 | #SBATCH --ntasks=1
4 | #SBATCH --gres=gpu:v100:1
5 | #SBATCH --mail-type=ALL
6 | #SBATCH --mail-user=lin00786@umn.edu
7 | #SBATCH -p v100
8 |
9 | module load python3
10 |
11 | source activate torch-env
12 |
13 | cd /home/yaoyi/lin00786/transformer-vs-lstm-forecasting/
14 |
15 | # auxiliary_feat = ["day_of_week", "day_of_month", "day_of_year", "month", "week_of_year", "year"]
16 |
17 | python train.py --model_name transformer --data_name p11_10000 --use_periodic_as_feat
18 | python train.py --model_name transformer --data_name p11_10000 --auxiliary_feat 35 --use_periodic_as_feat
19 | python train.py --model_name transformer --data_name p11_10000 --auxiliary_feat 012345 --use_periodic_as_feat
20 |
--------------------------------------------------------------------------------
/bash/train_transformer5.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -l
2 | #SBATCH --time=24:00:00
3 | #SBATCH --ntasks=1
4 | #SBATCH --gres=gpu:v100:1
5 | #SBATCH --mail-type=ALL
6 | #SBATCH --mail-user=lin00786@umn.edu
7 | #SBATCH -p v100
8 |
9 | module load python3
10 |
11 | source activate torch-env
12 |
13 | cd /home/yaoyi/lin00786/transformer-vs-lstm-forecasting/
14 |
15 | # auxiliary_feat = ["day_of_week", "day_of_month", "day_of_year", "month", "week_of_year", "year"]
16 |
17 | python train.py --model_name transformer --data_name p71428_10000 --use_periodic_as_feat
18 | python train.py --model_name transformer --data_name p71428_10000 --auxiliary_feat 35 --use_periodic_as_feat
19 | python train.py --model_name transformer --data_name p71428_10000 --auxiliary_feat 012345 --use_periodic_as_feat
20 |
--------------------------------------------------------------------------------
/bash/train_transformer6.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -l
2 | #SBATCH --time=24:00:00
3 | #SBATCH --ntasks=1
4 | #SBATCH --gres=gpu:v100:1
5 | #SBATCH --mail-type=ALL
6 | #SBATCH --mail-user=lin00786@umn.edu
7 | #SBATCH -p v100
8 |
9 | module load python3
10 |
11 | source activate torch-env
12 |
13 | cd /home/yaoyi/lin00786/transformer-vs-lstm-forecasting/
14 |
15 | # auxiliary_feat = ["day_of_week", "day_of_month", "day_of_year", "month", "week_of_year", "year"]
16 |
17 | python train.py --model_name transformer --data_name p71428_10000 --use_periodic_encoder
18 | python train.py --model_name transformer --data_name p71428_10000 --auxiliary_feat 35 --use_periodic_encoder
19 | python train.py --model_name transformer --data_name p71428_10000 --auxiliary_feat 012345 --use_periodic_encoder
20 |
--------------------------------------------------------------------------------
/bash/train_transformer13.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -l
2 | #SBATCH --time=24:00:00
3 | #SBATCH --ntasks=1
4 | #SBATCH --gres=gpu:v100:1
5 | #SBATCH --mail-type=ALL
6 | #SBATCH --mail-user=lin00786@umn.edu
7 | #SBATCH -p v100
8 |
9 | module load python3
10 |
11 | source activate torch-env
12 |
13 | cd /home/yaoyi/lin00786/transformer-vs-lstm-forecasting/
14 |
15 | # auxiliary_feat = ["day_of_week", "day_of_month", "day_of_year", "month", "week_of_year", "year"]
16 |
17 | python train.py --model_name transformer --data_name p112337_10000 --use_periodic_as_feat
18 | python train.py --model_name transformer --data_name p112337_10000 --auxiliary_feat 35 --use_periodic_as_feat
19 | python train.py --model_name transformer --data_name p112337_10000 --auxiliary_feat 012345 --use_periodic_as_feat
20 |
--------------------------------------------------------------------------------
/bash/train_transformer14.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -l
2 | #SBATCH --time=24:00:00
3 | #SBATCH --ntasks=1
4 | #SBATCH --gres=gpu:v100:1
5 | #SBATCH --mail-type=ALL
6 | #SBATCH --mail-user=lin00786@umn.edu
7 | #SBATCH -p v100
8 |
9 | module load python3
10 |
11 | source activate torch-env
12 |
13 | cd /home/yaoyi/lin00786/transformer-vs-lstm-forecasting/
14 |
15 | # auxiliary_feat = ["day_of_week", "day_of_month", "day_of_year", "month", "week_of_year", "year"]
16 |
17 | python train.py --model_name transformer --data_name p112337_10000 --use_periodic_encoder
18 | python train.py --model_name transformer --data_name p112337_10000 --auxiliary_feat 35 --use_periodic_encoder
19 | python train.py --model_name transformer --data_name p112337_10000 --auxiliary_feat 012345 --use_periodic_encoder
20 |
--------------------------------------------------------------------------------
/bash/train_transformer3.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -l
2 | #SBATCH --time=24:00:00
3 | #SBATCH --ntasks=1
4 | #SBATCH --gres=gpu:v100:1
5 | #SBATCH --mail-type=ALL
6 | #SBATCH --mail-user=lin00786@umn.edu
7 | #SBATCH -p v100
8 |
9 | module load python3
10 |
11 | source activate torch-env
12 |
13 | cd /home/yaoyi/lin00786/transformer-vs-lstm-forecasting/
14 |
15 | # auxiliary_feat = ["day_of_week", "day_of_month", "day_of_year", "month", "week_of_year", "year"]
16 |
17 | python train.py --model_name transformer --data_name p7_10000 --use_periodic_as_feat --use_periodic_encoder
18 | python train.py --model_name transformer --data_name p7_10000 --auxiliary_feat 35 --use_periodic_as_feat --use_periodic_encoder
19 | python train.py --model_name transformer --data_name p7_10000 --auxiliary_feat 012345 --use_periodic_as_feat --use_periodic_encoder
20 |
--------------------------------------------------------------------------------
/bash/train_transformer11.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -l
2 | #SBATCH --time=24:00:00
3 | #SBATCH --ntasks=1
4 | #SBATCH --gres=gpu:v100:1
5 | #SBATCH --mail-type=ALL
6 | #SBATCH --mail-user=lin00786@umn.edu
7 | #SBATCH -p v100
8 |
9 | module load python3
10 |
11 | source activate torch-env
12 |
13 | cd /home/yaoyi/lin00786/transformer-vs-lstm-forecasting/
14 |
15 | # auxiliary_feat = ["day_of_week", "day_of_month", "day_of_year", "month", "week_of_year", "year"]
16 |
17 | python train.py --model_name transformer --data_name p11_10000 --use_periodic_as_feat --use_periodic_encoder
18 | python train.py --model_name transformer --data_name p11_10000 --auxiliary_feat 35 --use_periodic_as_feat --use_periodic_encoder
19 | python train.py --model_name transformer --data_name p11_10000 --auxiliary_feat 012345 --use_periodic_as_feat --use_periodic_encoder
20 |
--------------------------------------------------------------------------------
/bash/train_transformer7.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -l
2 | #SBATCH --time=24:00:00
3 | #SBATCH --ntasks=1
4 | #SBATCH --gres=gpu:v100:1
5 | #SBATCH --mail-type=ALL
6 | #SBATCH --mail-user=lin00786@umn.edu
7 | #SBATCH -p v100
8 |
9 | module load python3
10 |
11 | source activate torch-env
12 |
13 | cd /home/yaoyi/lin00786/transformer-vs-lstm-forecasting/
14 |
15 | # auxiliary_feat = ["day_of_week", "day_of_month", "day_of_year", "month", "week_of_year", "year"]
16 |
17 | python train.py --model_name transformer --data_name p71428_10000 --use_periodic_as_feat --use_periodic_encoder
18 | python train.py --model_name transformer --data_name p71428_10000 --auxiliary_feat 35 --use_periodic_as_feat --use_periodic_encoder
19 | python train.py --model_name transformer --data_name p71428_10000 --auxiliary_feat 012345 --use_periodic_as_feat --use_periodic_encoder
20 |
--------------------------------------------------------------------------------
/bash/train_transformer15.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -l
2 | #SBATCH --time=24:00:00
3 | #SBATCH --ntasks=1
4 | #SBATCH --gres=gpu:v100:1
5 | #SBATCH --mail-type=ALL
6 | #SBATCH --mail-user=lin00786@umn.edu
7 | #SBATCH -p v100
8 |
9 | module load python3
10 |
11 | source activate torch-env
12 |
13 | cd /home/yaoyi/lin00786/transformer-vs-lstm-forecasting/
14 |
15 | # auxiliary_feat = ["day_of_week", "day_of_month", "day_of_year", "month", "week_of_year", "year"]
16 |
17 | python train.py --model_name transformer --data_name p112337_10000 --use_periodic_as_feat --use_periodic_encoder
18 | python train.py --model_name transformer --data_name p112337_10000 --auxiliary_feat 35 --use_periodic_as_feat --use_periodic_encoder
19 | python train.py --model_name transformer --data_name p112337_10000 --auxiliary_feat 012345 --use_periodic_as_feat --use_periodic_encoder
20 |
--------------------------------------------------------------------------------
/bash/test_lstm.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -l
2 | #SBATCH --time=24:00:00
3 | #SBATCH --ntasks=1
4 | #SBATCH --gres=gpu:v100:1
5 | #SBATCH --mail-type=ALL
6 | #SBATCH --mail-user=lin00786@umn.edu
7 | #SBATCH -p v100
8 |
9 | module load python3
10 |
11 | source activate torch-env
12 |
13 | cd /home/yaoyi/lin00786/transformer-adding-periodic/
14 |
15 | # python test.py --model_name lstm_aux0_penc0_pfeat0_1637795369 --num_test 2000
16 | # python test.py --model_name lstm_aux0_penc0_pfeat1_1637892971 --num_test 2000
17 | # python test.py --model_name lstm_aux1_penc0_pfeat0_1637792135 --num_test 2000
18 | # python test.py --model_name lstm_aux1_penc0_pfeat1_1637827717 --num_test 2000
19 |
20 | python test.py --model_name lstm_aux1_penc0_pfeat1_p10_5000_1638189182 --num_test 1000
21 | python test.py --model_name lstm_aux1_penc0_pfeat1_p7_5000_1638164884 --num_test 1000
22 | python test.py --model_name lstm_aux0_penc0_pfeat0_p7_5000_1638152573 --num_test 1000
23 | python test.py --model_name lstm_aux1_penc0_pfeat0_p7_5000_1638152573 --num_test 1000
24 | python test.py --model_name lstm_aux0_penc0_pfeat1_p7_5000_1638167639 --num_test 1000
25 | python test.py --model_name lstm_aux0_penc0_pfeat0_p10_5000_1638172477 --num_test 1000
26 | python test.py --model_name lstm_aux1_penc0_pfeat0_p10_5000_1638182825 --num_test 1000
27 |
--------------------------------------------------------------------------------
/bash/generate_data0.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -l
2 | cd /data/yijun/transformer-vs-lstm-forecasting/
3 |
4 | python scripts/generate_time_series.py --data_path data/data_p7_10000.csv --num_samples 10000 --periods 7
5 | python scripts/preprocess_time_series.py --data_path data/data_p7_10000.csv --preprocess_data_path data/preprocess_data_p7_10000.csv --config_path data/config_p7_10000.json
6 |
7 | python scripts/generate_time_series.py --data_path data/data_p71428_10000.csv --num_samples 10000 --periods 7 14 28
8 | python scripts/preprocess_time_series.py --data_path data/data_p71428_10000.csv --preprocess_data_path data/preprocess_data_p71428_10000.csv --config_path data/config_p71428_10000.json
9 |
10 | python scripts/generate_time_series.py --data_path data/data_p11_10000.csv --num_samples 10000 --periods 11
11 | python scripts/preprocess_time_series.py --data_path data/data_p11_10000.csv --preprocess_data_path data/preprocess_data_p11_10000.csv --config_path data/config_p11_10000.json
12 |
13 | python scripts/generate_time_series.py --data_path data/data_p112337_10000.csv --num_samples 10000 --periods 11 23 37
14 | python scripts/preprocess_time_series.py --data_path data/data_p112337_10000.csv --preprocess_data_path data/preprocess_data_p112337_10000.csv --config_path data/config_p112337_10000.json
15 |
16 |
17 |
--------------------------------------------------------------------------------
/bash/generate_data1.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -l
2 | #SBATCH --time=24:00:00
3 | #SBATCH --ntasks=1
4 | #SBATCH --mail-type=ALL
5 | #SBATCH --mail-user=lin00786@umn.edu
6 | #SBATCH -p amd2tb
7 |
8 | module load python3
9 |
10 | source activate torch-env
11 |
12 | cd /home/yaoyi/lin00786/transformer-vs-lstm-forecasting/
13 |
14 | python scripts/generate_time_series.py --data_path data/data_p11_10000.csv --num_samples 10000 --periods 11
15 | python scripts/preprocess_time_series.py --data_path data/data_p11_10000.csv --preprocess_data_path data/preprocess_data_p11_10000.csv --config_path data/config_p11_10000.json
16 |
17 | python scripts/generate_time_series.py --data_path data/data_p112337_10000.csv --num_samples 10000 --periods 11 23 37
18 | python scripts/preprocess_time_series.py --data_path data/data_p112337_10000.csv --preprocess_data_path data/preprocess_data_p112337_10000.csv --config_path data/config_p112337_10000.json
19 |
20 | python scripts/generate_time_series.py --data_path data/data_p11_20000.csv --num_samples 20000 --periods 11
21 | python scripts/preprocess_time_series.py --data_path data/data_p11_20000.csv --preprocess_data_path data/preprocess_data_p11_20000.csv --config_path data/config_p11_20000.json
22 |
23 | python scripts/generate_time_series.py --data_path data/data_p112337_20000.csv --num_samples 20000 --periods 11 23 37
24 | python scripts/preprocess_time_series.py --data_path data/data_p112337_20000.csv --preprocess_data_path data/preprocess_data_p112337_20000.csv --config_path data/config_p112337_20000.json
25 |
--------------------------------------------------------------------------------
/plot_images.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 |
4 | import matplotlib.pyplot as plt
5 |
6 |
7 | if __name__ == "__main__":
8 |
9 | import argparse
10 |
11 | parser = argparse.ArgumentParser()
12 | parser.add_argument("--model_name", required=True)
13 | parser.add_argument("--result_dir", default="results/")
14 | parser.add_argument("--num_test", type=int, default=10)
15 | args = parser.parse_args()
16 |
17 | output_dir = os.path.join(args.result_dir, args.model_name, "outputs")
18 | data_for_visualization_path = os.path.join(output_dir, "visualization.json")
19 |
20 | with open(data_for_visualization_path, "r") as f:
21 | data = json.load(f)
22 |
23 | if not os.path.exists(os.path.join(output_dir, "images")):
24 | os.makedirs(os.path.join(output_dir, "images"))
25 |
26 | for i, sample in enumerate(data[:args.num_test]):
27 | hist_size = len(sample["history"])
28 | gt_size = len(sample["ground_truth"])
29 | plt.figure()
30 | plt.plot(range(hist_size), sample["history"], label="History")
31 | plt.plot(
32 | range(hist_size, hist_size + gt_size), sample["ground_truth"], label="Ground Truth"
33 | )
34 | plt.plot(
35 | range(hist_size, hist_size + gt_size), sample["prediction"], label="Prediction"
36 | )
37 |
38 | plt.xlabel("Time")
39 |
40 | plt.ylabel("Time Series")
41 |
42 | plt.legend()
43 |
44 | plt.savefig(os.path.join(output_dir, "images", f"{i}.png"))
45 | plt.close()
--------------------------------------------------------------------------------
/bash/test_transformer.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -l
2 | #SBATCH --time=24:00:00
3 | #SBATCH --ntasks=1
4 | #SBATCH --gres=gpu:v100:1
5 | #SBATCH --mail-type=ALL
6 | #SBATCH --mail-user=lin00786@umn.edu
7 | #SBATCH -p v100
8 |
9 | module load python3
10 |
11 | source activate torch-env
12 |
13 | cd /home/yaoyi/lin00786/transformer-adding-periodic/
14 |
15 | # python test.py --model_name transformer_aux0_penc0_pfeat0_1637787108 --num_test 2000
16 | # python test.py --model_name transformer_aux0_penc0_pfeat1_1637812993 --num_test 2000
17 | # python test.py --model_name transformer_aux0_penc1_pfeat0_1637781703 --num_test 2000
18 | # python test.py --model_name transformer_aux0_penc1_pfeat1_1637781703 --num_test 2000
19 | # python test.py --model_name transformer_aux1_penc0_pfeat0_1637868980 --num_test 2000
20 | # python test.py --model_name transformer_aux1_penc0_pfeat1_1637838954 --num_test 2000
21 | # python test.py --model_name transformer_aux1_penc1_pfeat0_1637805164 --num_test 2000
22 | # python test.py --model_name transformer_aux1_penc1_pfeat1_1637818343 --num_test 2000
23 |
24 | python test.py --model_name transformer_aux1_penc0_pfeat1_p10_5000_1638183260 --num_test 1000
25 | python test.py --model_name transformer_aux0_penc0_pfeat0_p10_5000_1638188294 --num_test 1000
26 | python test.py --model_name transformer_aux1_penc1_pfeat1_p7_5000_1638190548 --num_test 1000
27 | python test.py --model_name transformer_aux0_penc1_pfeat0_p7_5000_1638152733 --num_test 1000
28 | python test.py --model_name transformer_aux0_penc0_pfeat1_p10_5000_1638152778 --num_test 1000
29 | python test.py --model_name transformer_aux0_penc1_pfeat1_p7_5000_1638152923 --num_test 1000
30 | python test.py --model_name transformer_aux1_penc0_pfeat0_p10_5000_1638162417 --num_test 1000
31 | python test.py --model_name transformer_aux1_penc1_pfeat0_p7_5000_1638163121 --num_test 1000
32 | python test.py --model_name transformer_aux0_penc0_pfeat1_p7_5000_1638164780 --num_test 1000
33 | python test.py --model_name transformer_aux1_penc0_pfeat1_p7_5000_1638169717 --num_test 1000
34 | python test.py --model_name transformer_aux1_penc0_pfeat0_p7_5000_1638177176 --num_test 1000
35 | python test.py --model_name transformer_aux0_penc0_pfeat0_p7_5000_1638181986 --num_test 1000
36 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 |
131 | data/
132 | _archive/
133 | results/
134 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # CSCI 5525: Homework 4
2 | ## Title: A Comparison of Transformer and LSTM Time-series Data Forecasting
3 | ### Group member: Yijun Lin (lin00786@umn.edu), Min Namgung (namgu007@umn.edu)
4 | ### Date: Dec 17, 2021
5 |
6 |
7 | This repo is to compare Transformer and LSTM on time series forecasting
8 | The dataset is generated based on the resource: https://github.com/CVxTz/time_series_forecasting.
9 | Sample data can be found [HERE](https://drive.google.com/drive/folders/1MfJ8tRGins8WN5pseKwyT4bIXhN1cYBI).
10 |
11 | You can generate your data using the code in the /scripts folder. See the sample commands in /bash/generate_data0.sh.
12 |
13 | ### Experiment 1: Transformer VS. LSTM Results:
14 |
15 | The images demonstrates that Transformer produced more accurate prediction than LSTM.
16 |
17 | More detailed metrics comparison can be found below.
18 |
19 | | Tramsformer | LSTM model
20 | |------------- | -------------
21 | || 
22 | || 
23 |
24 |
25 | --------------------------------------------------------------------------------------------
26 |
27 | ### Experiment 2: Adding Periodic Positional Encoding on Transformer:
28 |
29 | The syntatic data are generated based on: **A * Cos(Bx + C) + D**
30 |
31 | where, A controls the amplitude, B is controlling the periodic of the function, e.g., 7, 14, 28, C is the horizonal shift, and D is the vertical shift
32 |
33 | **So, we intentionally add periodic information in the Transformer in a postional encoding way**
34 |
35 |
36 | | Sign | Description
37 | |------------- | -------------
38 | |AUX| Auxiliary Features (e.g., Hour, Day)
39 | |Penc| Periodic as Positional Encoding
40 | ------------- | -----------------------------------------
41 | |0| Not using
42 | |1| Using
43 |
44 | The images demonstrates that Transformer with periodic positional encoding produced more accurate prediction.
45 |
46 | |ID | Transformer W/WO Positional Encoding |
47 | |------------- |------------- |
48 | |Without||
49 | |With||
50 |
51 |
52 | Model| Using Auxiliary Features | Using Periodic as Positional Encoding | MAE | SMAPE
53 | |------------- |------------- |------------- |------------- |------------- |
54 | |LSTM|1|0|0.29625|47.51880
55 | |Transformer|1|0|0.23089|37.93381
56 | |Transformer|1|1|**0.19829**|**34.05033**
57 |
58 |
59 |
60 |
61 | ### How to run the code:
62 |
63 | Please refer to bash folder about how to train/test model
64 |
65 |
66 |
--------------------------------------------------------------------------------
/bash/train_lstm.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -l
2 | #SBATCH --time=24:00:00
3 | #SBATCH --ntasks=1
4 | #SBATCH --gres=gpu:v100:1
5 | #SBATCH --mail-type=ALL
6 | #SBATCH --mail-user=lin00786@umn.edu
7 | #SBATCH -p v100
8 |
9 | module load python3
10 |
11 | source activate torch-env
12 |
13 | cd /data/yijun/transformer-vs-lstm-forecasting/
14 |
15 | # auxiliary_feat = ["day_of_week", "day_of_month", "day_of_year", "month", "week_of_year", "year"]
16 |
17 | python train.py --model_name lstm --data_name p7_10000
18 | python train.py --model_name lstm --data_name p7_10000 --auxiliary_feat 35
19 | python train.py --model_name lstm --data_name p7_10000 --auxiliary_feat 012345
20 |
21 | python train.py --model_name lstm --data_name p7_10000 --use_periodic_as_feat
22 | python train.py --model_name lstm --data_name p7_10000 --auxiliary_feat 35 --use_periodic_as_feat
23 | python train.py --model_name lstm --data_name p7_10000 --auxiliary_feat 012345 --use_periodic_as_feat
24 |
25 | python train.py --model_name lstm --data_name p71428_10000
26 | python train.py --model_name lstm --data_name p71428_10000 --auxiliary_feat 35
27 | python train.py --model_name lstm --data_name p71428_10000 --auxiliary_feat 012345
28 |
29 | python train.py --model_name lstm --data_name p71428_10000 --use_periodic_as_feat
30 | python train.py --model_name lstm --data_name p71428_10000 --auxiliary_feat 35 --use_periodic_as_feat
31 | python train.py --model_name lstm --data_name p71428_10000 --auxiliary_feat 012345 --use_periodic_as_feat
32 |
33 | python train.py --model_name lstm --data_name p11_10000
34 | python train.py --model_name lstm --data_name p11_10000 --auxiliary_feat 35
35 | python train.py --model_name lstm --data_name p11_10000 --auxiliary_feat 012345
36 |
37 | python train.py --model_name lstm --data_name p11_10000 --use_periodic_as_feat
38 | python train.py --model_name lstm --data_name p11_10000 --auxiliary_feat 35 --use_periodic_as_feat
39 | python train.py --model_name lstm --data_name p11_10000 --auxiliary_feat 012345 --use_periodic_as_feat
40 |
41 | python train.py --model_name lstm --data_name p112337_10000
42 | python train.py --model_name lstm --data_name p112337_10000 --auxiliary_feat 35
43 | python train.py --model_name lstm --data_name p112337_10000 --auxiliary_feat 012345
44 |
45 | python train.py --model_name lstm --data_name p112337_10000 --use_periodic_as_feat
46 | python train.py --model_name lstm --data_name p112337_10000 --auxiliary_feat 35 --use_periodic_as_feat
47 | python train.py --model_name lstm --data_name p112337_10000 --auxiliary_feat 012345 --use_periodic_as_feat
48 |
49 |
--------------------------------------------------------------------------------
/bash/Untitled.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "id": "464b2b3a-796a-4400-a0cc-b15c55db81bd",
7 | "metadata": {},
8 | "outputs": [],
9 | "source": [
10 | "file_name = 'train_transformer.sh'"
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": 2,
16 | "id": "872d2de8-0d46-4206-991c-0a0af95e2ce9",
17 | "metadata": {},
18 | "outputs": [],
19 | "source": [
20 | "f = open(file_name, 'r')\n",
21 | "\n",
22 | "count = 0\n",
23 | "header = []\n",
24 | "\n",
25 | "commands = []\n",
26 | "\n",
27 | "for line in f:\n",
28 | " \n",
29 | " if line.startswith('python'):\n",
30 | " if len(commands) == 3:\n",
31 | "\n",
32 | " with open(f'train_transformer{count}.sh', 'w') as out:\n",
33 | " \n",
34 | " for l in header[:16]:\n",
35 | " out.write(l)\n",
36 | " for l in commands:\n",
37 | " out.write(l)\n",
38 | " \n",
39 | " count += 1\n",
40 | " commands = [line]\n",
41 | " \n",
42 | " else:\n",
43 | " commands.append(line)\n",
44 | "\n",
45 | " else:\n",
46 | " header.append(line)\n",
47 | "\n",
48 | " \n",
49 | "with open(f'train_transformer{count}.sh', 'w') as out:\n",
50 | " \n",
51 | " for l in header[:16]:\n",
52 | " out.write(l)\n",
53 | " for l in commands:\n",
54 | " out.write(l) \n",
55 | " "
56 | ]
57 | },
58 | {
59 | "cell_type": "code",
60 | "execution_count": 33,
61 | "id": "ddc650a2-de79-46ef-91e9-ad74159ee30b",
62 | "metadata": {},
63 | "outputs": [],
64 | "source": []
65 | },
66 | {
67 | "cell_type": "code",
68 | "execution_count": null,
69 | "id": "65afd221-8f2b-4d8e-87ac-c15f9e23eede",
70 | "metadata": {},
71 | "outputs": [],
72 | "source": []
73 | }
74 | ],
75 | "metadata": {
76 | "kernelspec": {
77 | "display_name": "Python 3",
78 | "language": "python",
79 | "name": "python3"
80 | },
81 | "language_info": {
82 | "codemirror_mode": {
83 | "name": "ipython",
84 | "version": 3
85 | },
86 | "file_extension": ".py",
87 | "mimetype": "text/x-python",
88 | "name": "python",
89 | "nbconvert_exporter": "python",
90 | "pygments_lexer": "ipython3",
91 | "version": "3.8.5"
92 | }
93 | },
94 | "nbformat": 4,
95 | "nbformat_minor": 5
96 | }
97 |
--------------------------------------------------------------------------------
/data_utils.py:
--------------------------------------------------------------------------------
1 | import random
2 | import numpy as np
3 | import pandas as pd
4 |
5 | import torch
6 |
7 |
8 | def split_df(
9 | df: pd.DataFrame, split: str, history_size: int = 120, horizon_size: int = 30
10 | ):
11 | """
12 | Create a training / validation samples
13 | Validation samples are the last horizon_size rows
14 | :param df:
15 | :param split:
16 | :param history_size:
17 | :param horizon_size:
18 | :return:
19 | """
20 | if split == "train":
21 | end_index = random.randint(horizon_size + 1, df.shape[0] - (horizon_size + history_size) * 2)
22 | elif split == "val":
23 | end_index = df.shape[0] - (horizon_size + history_size)
24 | elif split == "test":
25 | end_index = df.shape[0]
26 | else:
27 | raise ValueError
28 |
29 | label_index = end_index - horizon_size
30 | start_index = max(0, label_index - history_size)
31 |
32 | history = df[start_index:label_index]
33 | targets = df[label_index:end_index]
34 |
35 | return history, targets
36 |
37 |
38 | def pad_arr(arr: np.ndarray, expected_size: int = 120):
39 | """
40 | Pad top of array when there is not enough history
41 | :param arr:
42 | :param expected_size:
43 | :return:
44 | """
45 | arr = np.pad(arr, [(expected_size - arr.shape[0], 0), (0, 0)], mode="edge")
46 | return arr
47 |
48 |
49 | def df_to_np(df):
50 | arr = np.array(df)
51 | arr = pad_arr(arr)
52 | return arr
53 |
54 |
55 | class Dataset(torch.utils.data.Dataset):
56 | def __init__(
57 | self,
58 | groups,
59 | grp_by,
60 | split,
61 | features,
62 | target,
63 | seq_len=120,
64 | horizon=30,
65 | use_periodic_as_feat=True,
66 | ):
67 | self.groups = groups
68 | self.grp_by = grp_by
69 | self.split = split
70 | self.features = features
71 | self.target = target
72 | self.target_lag_1 = f"{self.target}_lag_1"
73 | self.seq_len = seq_len
74 | self.horizon = horizon
75 | self.use_periodic_as_feat = use_periodic_as_feat
76 | if use_periodic_as_feat:
77 | self.features += ['norm_period']
78 |
79 | def __len__(self):
80 | return len(self.groups)
81 |
82 | def __getitem__(self, idx):
83 |
84 | group = self.groups[idx]
85 |
86 | df = self.grp_by.get_group(group)
87 |
88 | src, trg = split_df(df, split=self.split, history_size=self.seq_len, horizon_size=self.horizon)
89 |
90 | src = src[[self.target] + self.features + ['period']]
91 | src = df_to_np(src)
92 |
93 | trg_in = trg[[self.target_lag_1] + self.features + ['period']]
94 | trg_in = np.array(trg_in)
95 |
96 | trg_out = np.array(trg[self.target])
97 |
98 | src = torch.tensor(src, dtype=torch.float)
99 | trg_in = torch.tensor(trg_in, dtype=torch.float)
100 | trg_out = torch.tensor(trg_out, dtype=torch.float)
101 |
102 | return src, trg_in, trg_out
--------------------------------------------------------------------------------
/scripts/generate_time_series.py:
--------------------------------------------------------------------------------
1 | import random
2 |
3 | import pandas as pd
4 | from tqdm import tqdm
5 | import numpy as np
6 | from uuid import uuid4
7 | import argparse
8 |
9 |
10 | def get_init_df():
11 |
12 | date_rng = pd.date_range(start="2015-01-01", end="2020-01-01", freq="D")
13 |
14 | dataframe = pd.DataFrame(date_rng, columns=["timestamp"])
15 |
16 | dataframe["index"] = range(dataframe.shape[0])
17 |
18 | dataframe["article"] = uuid4().hex
19 |
20 | return dataframe
21 |
22 |
23 | def set_amplitude(dataframe):
24 |
25 | max_step = random.randint(90, 365)
26 | max_amplitude = random.uniform(0.1, 1)
27 | offset = random.uniform(-1, 1)
28 |
29 | phase = random.randint(-1000, 1000)
30 |
31 | amplitude = (
32 | dataframe["index"]
33 | .apply(lambda x: max_amplitude * (x % max_step + phase) / max_step + offset)
34 | .values
35 | )
36 |
37 | if random.random() < 0.5:
38 | amplitude = amplitude[::-1]
39 |
40 | dataframe["amplitude"] = amplitude
41 |
42 | return dataframe
43 |
44 |
45 | def set_offset(dataframe):
46 |
47 | max_step = random.randint(15, 45)
48 | max_offset = random.uniform(-1, 1)
49 | base_offset = random.uniform(-1, 1)
50 |
51 | phase = random.randint(-1000, 1000)
52 |
53 | offset = (
54 | dataframe["index"]
55 | .apply(
56 | lambda x: max_offset * np.cos(x * 2 * np.pi / max_step + phase)
57 | + base_offset
58 | )
59 | .values
60 | )
61 |
62 | if random.random() < 0.5:
63 | offset = offset[::-1]
64 |
65 | dataframe["offset"] = offset
66 |
67 | return dataframe
68 |
69 |
70 | def generate_time_series(dataframe, periods):
71 |
72 | clip_val = random.uniform(0.3, 1)
73 |
74 | period = random.choice(periods)
75 |
76 | phase = random.randint(-1000, 1000)
77 |
78 | dataframe["views"] = dataframe.apply(
79 | lambda x: np.clip(
80 | np.cos(x["index"] * 2 * np.pi / period + phase), -clip_val, clip_val
81 | )
82 | * x["amplitude"]
83 | + x["offset"],
84 | axis=1,
85 | ) + np.random.normal(
86 | 0, dataframe["amplitude"].abs().max() / 10, size=(dataframe.shape[0],)
87 | )
88 |
89 | dataframe["period"] = dataframe["index"] % period
90 | dataframe["norm_period"] = dataframe["period"] / period
91 | dataframe["max_period"] = period
92 | return dataframe
93 |
94 |
95 | def generate_df(periods):
96 | dataframe = get_init_df()
97 | dataframe = set_amplitude(dataframe)
98 | dataframe = set_offset(dataframe)
99 | dataframe = generate_time_series(dataframe, periods)
100 | return dataframe
101 |
102 |
103 | if __name__ == "__main__":
104 |
105 | parser = argparse.ArgumentParser()
106 | parser.add_argument("--data_path", default="data/data.csv")
107 | parser.add_argument("--num_samples", type=int, default=20)
108 | parser.add_argument("--periods", type=int, default=[7, 14, 28], nargs='+')
109 |
110 | args = parser.parse_args()
111 |
112 | dataframes = []
113 |
114 | for _ in tqdm(range(args.num_samples)):
115 | df = generate_df(args.periods)
116 | dataframes.append(df)
117 |
118 | all_data = pd.concat(dataframes, ignore_index=True)
119 |
120 | all_data.to_csv(args.data_path, index=False)
--------------------------------------------------------------------------------
/scripts/preprocess_time_series.py:
--------------------------------------------------------------------------------
1 | import json
2 | from typing import List
3 | import pandas as pd
4 | from pathlib import Path
5 | import numpy as np
6 | import argparse
7 |
8 |
9 | def add_date_cols(dataframe: pd.DataFrame, date_col: str = "timestamp"):
10 | """
11 | add time features like month, week of the year ...
12 | :param dataframe:
13 | :param date_col:
14 | :return:
15 | """
16 |
17 | dataframe[date_col] = pd.to_datetime(dataframe[date_col], format="%Y-%m-%d")
18 |
19 | dataframe["day_of_week"] = dataframe[date_col].dt.weekday / 7
20 | dataframe["day_of_month"] = dataframe[date_col].dt.day / 31
21 | dataframe["day_of_year"] = dataframe[date_col].dt.dayofyear / 365
22 | dataframe["month"] = dataframe[date_col].dt.month / 12
23 | dataframe["week_of_year"] = dataframe[date_col].dt.isocalendar().week / 53
24 | dataframe["year"] = (dataframe[date_col].dt.year - 2015) / 5
25 |
26 | dataframe["day_of_week"] = dataframe["day_of_week"].astype(np.float32)
27 | dataframe["day_of_month"] = dataframe["day_of_month"].astype(np.float32)
28 | dataframe["day_of_year"] = dataframe["day_of_year"].astype(np.float32)
29 | dataframe["month"] = dataframe["month"].astype(np.float32)
30 | dataframe["week_of_year"] = dataframe["week_of_year"].astype(np.float32)
31 | dataframe["year"] = dataframe["year"].astype(np.float32)
32 |
33 | return dataframe, ["day_of_week", "day_of_month", "day_of_year", "month", "week_of_year", "year"]
34 |
35 |
36 | def add_basic_lag_features(
37 | dataframe: pd.DataFrame,
38 | group_by_cols: List,
39 | col_names: List,
40 | horizons: List,
41 | fill_na=True,
42 | ):
43 | """
44 | Computes simple lag features
45 | :param dataframe:
46 | :param group_by_cols:
47 | :param col_names:
48 | :param horizons:
49 | :param fill_na:
50 | :return:
51 | """
52 | group_by_data = dataframe.groupby(by=group_by_cols)
53 |
54 | new_cols = []
55 |
56 | for horizon in horizons:
57 | dataframe[[a + "_lag_%s" % horizon for a in col_names]] = group_by_data[
58 | col_names
59 | ].shift(periods=horizon)
60 | new_cols += [a + "_lag_%s" % horizon for a in col_names]
61 |
62 | if fill_na:
63 | dataframe[new_cols] = dataframe[new_cols].fillna(0)
64 |
65 | return dataframe, new_cols
66 |
67 |
68 | def process_df(dataframe: pd.DataFrame, target_col: str = "views"):
69 |
70 | """
71 | :param dataframe:
72 | :param target_col:
73 | :return:
74 | """
75 |
76 | dataframe, new_cols = add_date_cols(dataframe, date_col="timestamp")
77 | dataframe, lag_cols = add_basic_lag_features(
78 | dataframe, group_by_cols=["article"], col_names=[target_col], horizons=[1]
79 | )
80 |
81 | return dataframe, new_cols
82 |
83 |
84 | if __name__ == "__main__":
85 |
86 | parser = argparse.ArgumentParser()
87 | parser.add_argument("--data_path", default="data/data.csv")
88 | parser.add_argument("--preprocess_data_path", default="data/preprocess_data.csv")
89 | parser.add_argument("--config_path", default="data/config.json")
90 | args = parser.parse_args()
91 |
92 | data = pd.read_csv(args.data_path)
93 |
94 | data, cols = process_df(data)
95 |
96 | data.to_csv(args.preprocess_data_path, index=False)
97 |
98 | config = {
99 | "features": cols,
100 | "target": "views",
101 | "group_by_key": "article",
102 | "lag_features": ["views_lag_1"],
103 | }
104 |
105 | with open(args.config_path, "w") as f:
106 | json.dump(config, f, indent=4)
--------------------------------------------------------------------------------
/bash/train_transformer_.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -l
2 |
3 | cd /data/yijun/transformer-vs-lstm-forecasting/
4 |
5 | # auxiliary_feat = ["day_of_week", "day_of_month", "day_of_year", "month", "week_of_year", "year"]
6 |
7 |
8 | python train.py --model_name transformer --data_name p7_10000 --use_periodic_encoder --use_periodic_as_feat --auxiliary_feat 012345 --gpu 1
9 | python train.py --model_name transformer --data_name p11_10000 --use_periodic_encoder --use_periodic_as_feat --auxiliary_feat 012345 --gpu 1
10 | python train.py --model_name transformer --data_name p71428_10000 --use_periodic_encoder --use_periodic_as_feat --auxiliary_feat 012345 --gpu 1
11 | python train.py --model_name transformer --data_name p112337_10000 --use_periodic_encoder --use_periodic_as_feat --auxiliary_feat 012345 --gpu 1
12 |
13 | python train.py --model_name transformer --data_name p7_10000 --use_periodic_encoder --use_periodic_as_feat --auxiliary_feat 35 --gpu 1
14 | python train.py --model_name transformer --data_name p11_10000 --use_periodic_encoder --use_periodic_as_feat --auxiliary_feat 35 --gpu 1
15 | python train.py --model_name transformer --data_name p71428_10000 --use_periodic_encoder --use_periodic_as_feat --auxiliary_feat 35 --gpu 1
16 | python train.py --model_name transformer --data_name p112337_10000 --use_periodic_encoder --use_periodic_as_feat --auxiliary_feat 35 --gpu 1
17 |
18 | python train.py --model_name transformer --data_name p7_10000 --use_periodic_encoder --use_periodic_as_feat --gpu 1
19 | python train.py --model_name transformer --data_name p11_10000 --use_periodic_encoder --use_periodic_as_feat --gpu 1
20 | python train.py --model_name transformer --data_name p71428_10000 --use_periodic_encoder --use_periodic_as_feat --gpu 1
21 | python train.py --model_name transformer --data_name p112337_10000 --use_periodic_encoder --use_periodic_as_feat --gpu 1
22 |
23 | python train.py --model_name transformer --data_name p7_10000 --use_periodic_as_feat --auxiliary_feat 012345 --gpu 1
24 | python train.py --model_name transformer --data_name p11_10000 --use_periodic_as_feat --auxiliary_feat 012345 --gpu 1
25 | python train.py --model_name transformer --data_name p71428_10000 --use_periodic_as_feat --auxiliary_feat 012345 --gpu 1
26 | python train.py --model_name transformer --data_name p112337_10000 --use_periodic_as_feat --auxiliary_feat 012345 --gpu 1
27 |
28 | python train.py --model_name transformer --data_name p7_10000 --use_periodic_as_feat --auxiliary_feat 35 --gpu 1
29 | python train.py --model_name transformer --data_name p11_10000 --use_periodic_as_feat --auxiliary_feat 35 --gpu 1
30 | python train.py --model_name transformer --data_name p71428_10000 --use_periodic_as_feat --auxiliary_feat 35 --gpu
31 | python train.py --model_name transformer --data_name p112337_10000 --use_periodic_as_feat --auxiliary_feat 35 --gpu 1
32 |
33 | python train.py --model_name transformer --data_name p7_10000 --use_periodic_encoder --auxiliary_feat 012345 --gpu 1
34 | python train.py --model_name transformer --data_name p11_10000 --use_periodic_encoder --auxiliary_feat 012345 --gpu 1
35 | python train.py --model_name transformer --data_name p71428_10000 --use_periodic_encoder --auxiliary_feat 012345 --gpu 1
36 | python train.py --model_name transformer --data_name p112337_10000 --use_periodic_encoder --auxiliary_feat 012345 --gpu 1
37 |
38 | python train.py --model_name transformer --data_name p7_10000 --use_periodic_encoder --auxiliary_feat 35 --gpu 1
39 | python train.py --model_name transformer --data_name p11_10000 --use_periodic_encoder --auxiliary_feat 35 --gpu 1
40 | python train.py --model_name transformer --data_name p71428_10000 --use_periodic_encoder --auxiliary_feat 35 --gpu 1
41 | python train.py --model_name transformer --data_name p112337_10000 --use_periodic_encoder --auxiliary_feat 35 --gpu 1
42 |
43 | python train.py --model_name transformer --data_name p7_10000 --auxiliary_feat 012345 --gpu 1
44 | python train.py --model_name transformer --data_name p11_10000 --auxiliary_feat 012345 --gpu 1
45 | python train.py --model_name transformer --data_name p71428_10000 --auxiliary_feat 012345 --gpu 1
46 | python train.py --model_name transformer --data_name p112337_10000 --auxiliary_feat 012345 --gpu 1
47 |
48 | python train.py --model_name transformer --data_name p7_10000 --auxiliary_feat 35 --gpu 1
49 | python train.py --model_name transformer --data_name p11_10000 --auxiliary_feat 35 --gpu 1
50 | python train.py --model_name transformer --data_name p71428_10000 --auxiliary_feat 35 --gpu 1
51 | python train.py --model_name transformer --data_name p112337_10000 --auxiliary_feat 35 --gpu 1
52 |
53 | python train.py --model_name transformer --data_name p7_10000 --use_periodic_as_feat --gpu 1
54 | python train.py --model_name transformer --data_name p11_10000 --use_periodic_as_feat --gpu 1
55 | python train.py --model_name transformer --data_name p71428_10000 --use_periodic_as_feat --gpu 1
56 | python train.py --model_name transformer --data_name p112337_10000 --use_periodic_as_feat --gpu 1
57 |
58 | python train.py --model_name transformer --data_name p7_10000 --use_periodic_encoder --gpu 1
59 | python train.py --model_name transformer --data_name p11_10000 --use_periodic_encoder --gpu 1
60 | python train.py --model_name transformer --data_name p71428_10000 --use_periodic_encoder --gpu 1
61 | python train.py --model_name transformer --data_name p112337_10000 --use_periodic_encoder --gpu 1
62 |
63 | python train.py --model_name transformer --data_name p7_10000 --gpu 1
64 | python train.py --model_name transformer --data_name p11_10000 --gpu 1
65 | python train.py --model_name transformer --data_name p71428_10000 --gpu 1
66 | python train.py --model_name transformer --data_name p112337_10000 --gpu 1
67 |
68 |
--------------------------------------------------------------------------------
/bash/train_transformer.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -l
2 |
3 | cd /data/yijun/transformer-vs-lstm-forecasting/
4 |
5 | # auxiliary_feat = ["day_of_week", "day_of_month", "day_of_year", "month", "week_of_year", "year"]
6 |
7 | # python train.py --model_name transformer --data_name p7_10000 --gpu 2
8 | # python train.py --model_name transformer --data_name p11_10000 --gpu 2
9 | # python train.py --model_name transformer --data_name p71428_10000 --gpu 2
10 | # python train.py --model_name transformer --data_name p112337_10000 --gpu 2
11 |
12 | python train.py --model_name transformer --data_name p7_10000 --use_periodic_encoder --gpu 2
13 | python train.py --model_name transformer --data_name p11_10000 --use_periodic_encoder --gpu 2
14 | python train.py --model_name transformer --data_name p71428_10000 --use_periodic_encoder --gpu 2
15 | python train.py --model_name transformer --data_name p112337_10000 --use_periodic_encoder --gpu 2
16 |
17 | python train.py --model_name transformer --data_name p7_10000 --use_periodic_as_feat --gpu 2
18 | python train.py --model_name transformer --data_name p11_10000 --use_periodic_as_feat --gpu 2
19 | python train.py --model_name transformer --data_name p71428_10000 --use_periodic_as_feat --gpu 2
20 | python train.py --model_name transformer --data_name p112337_10000 --use_periodic_as_feat --gpu 2
21 |
22 |
23 | python train.py --model_name transformer --data_name p7_10000 --auxiliary_feat 35 --gpu 2
24 | python train.py --model_name transformer --data_name p11_10000 --auxiliary_feat 35 --gpu 2
25 | python train.py --model_name transformer --data_name p71428_10000 --auxiliary_feat 35 --gpu 2
26 | python train.py --model_name transformer --data_name p112337_10000 --auxiliary_feat 35 --gpu 2
27 |
28 | python train.py --model_name transformer --data_name p7_10000 --auxiliary_feat 012345 --gpu 2
29 | python train.py --model_name transformer --data_name p11_10000 --auxiliary_feat 012345 --gpu 2
30 | python train.py --model_name transformer --data_name p71428_10000 --auxiliary_feat 012345 --gpu 2
31 | python train.py --model_name transformer --data_name p112337_10000 --auxiliary_feat 012345 --gpu 2
32 |
33 |
34 | python train.py --model_name transformer --data_name p7_10000 --use_periodic_encoder --auxiliary_feat 35 --gpu 2
35 | python train.py --model_name transformer --data_name p11_10000 --use_periodic_encoder --auxiliary_feat 35 --gpu 2
36 | python train.py --model_name transformer --data_name p71428_10000 --use_periodic_encoder --auxiliary_feat 35 --gpu 2
37 | python train.py --model_name transformer --data_name p112337_10000 --use_periodic_encoder --auxiliary_feat 35 --gpu 2
38 |
39 | python train.py --model_name transformer --data_name p7_10000 --use_periodic_encoder --auxiliary_feat 012345 --gpu 2
40 | python train.py --model_name transformer --data_name p11_10000 --use_periodic_encoder --auxiliary_feat 012345 --gpu 2
41 | python train.py --model_name transformer --data_name p71428_10000 --use_periodic_encoder --auxiliary_feat 012345 --gpu 2
42 | python train.py --model_name transformer --data_name p112337_10000 --use_periodic_encoder --auxiliary_feat 012345 --gpu 2
43 |
44 | python train.py --model_name transformer --data_name p7_10000 --use_periodic_as_feat --auxiliary_feat 35 --gpu 2
45 | python train.py --model_name transformer --data_name p11_10000 --use_periodic_as_feat --auxiliary_feat 35 --gpu 2
46 | python train.py --model_name transformer --data_name p71428_10000 --use_periodic_as_feat --auxiliary_feat 35 --gpu 2
47 | python train.py --model_name transformer --data_name p112337_10000 --use_periodic_as_feat --auxiliary_feat 35 --gpu 2
48 |
49 | python train.py --model_name transformer --data_name p7_10000 --use_periodic_as_feat --auxiliary_feat 012345 --gpu 2
50 | python train.py --model_name transformer --data_name p11_10000 --use_periodic_as_feat --auxiliary_feat 012345 --gpu 2
51 | python train.py --model_name transformer --data_name p71428_10000 --use_periodic_as_feat --auxiliary_feat 012345 --gpu 2
52 | python train.py --model_name transformer --data_name p112337_10000 --use_periodic_as_feat --auxiliary_feat 012345 --gpu 2
53 |
54 | python train.py --model_name transformer --data_name p7_10000 --use_periodic_encoder --use_periodic_as_feat --gpu 2
55 | python train.py --model_name transformer --data_name p11_10000 --use_periodic_encoder --use_periodic_as_feat --gpu 2
56 | python train.py --model_name transformer --data_name p71428_10000 --use_periodic_encoder --use_periodic_as_feat --gpu 2
57 | python train.py --model_name transformer --data_name p112337_10000 --use_periodic_encoder --use_periodic_as_feat --gpu 2
58 |
59 | python train.py --model_name transformer --data_name p7_10000 --use_periodic_encoder --use_periodic_as_feat --auxiliary_feat 35 --gpu 2
60 | python train.py --model_name transformer --data_name p11_10000 --use_periodic_encoder --use_periodic_as_feat --auxiliary_feat 35 --gpu 2
61 | python train.py --model_name transformer --data_name p71428_10000 --use_periodic_encoder --use_periodic_as_feat --auxiliary_feat 35 --gpu 2
62 | python train.py --model_name transformer --data_name p112337_10000 --use_periodic_encoder --use_periodic_as_feat --auxiliary_feat 35 --gpu 2
63 |
64 | python train.py --model_name transformer --data_name p7_10000 --use_periodic_encoder --use_periodic_as_feat --auxiliary_feat 012345 --gpu 2
65 | python train.py --model_name transformer --data_name p11_10000 --use_periodic_encoder --use_periodic_as_feat --auxiliary_feat 012345 --gpu 2
66 | python train.py --model_name transformer --data_name p71428_10000 --use_periodic_encoder --use_periodic_as_feat --auxiliary_feat 012345 --gpu 2
67 | python train.py --model_name transformer --data_name p112337_10000 --use_periodic_encoder --use_periodic_as_feat --auxiliary_feat 012345 --gpu 2
68 |
--------------------------------------------------------------------------------
/models/transformer.py:
--------------------------------------------------------------------------------
1 | import pytorch_lightning as pl
2 | import torch
3 | import torch.nn as nn
4 | from torch.nn import Linear
5 |
6 |
7 | def smape_loss(y_pred, target):
8 | loss = 2 * (y_pred - target).abs() / (y_pred.abs() + target.abs() + 1e-8)
9 | return loss.mean()
10 |
11 |
12 | def gen_trg_mask(length, device):
13 | mask = torch.tril(torch.ones(length, length, device=device)) == 1
14 |
15 | mask = (
16 | mask.float()
17 | .masked_fill(mask == 0, float("-inf"))
18 | .masked_fill(mask == 1, float(0.0))
19 | )
20 |
21 | return mask
22 |
23 |
24 | class TransformerForecasting(pl.LightningModule):
25 | def __init__(
26 | self,
27 | n_encoder_inputs,
28 | n_decoder_inputs,
29 | h_channels=512,
30 | out_channels=1,
31 | dropout=0.1,
32 | lr=1e-5,
33 | use_periodic_encoder=True,
34 | ):
35 | super().__init__()
36 |
37 | self.save_hyperparameters()
38 |
39 | self.n_encoder_inputs = n_encoder_inputs
40 | self.n_decoder_inputs = n_decoder_inputs
41 | self.lr = lr
42 | self.dropout = dropout
43 | self.use_periodic_encoder=use_periodic_encoder
44 |
45 | self.input_pos_embedding = torch.nn.Embedding(1024, embedding_dim=h_channels)
46 | self.target_pos_embedding = torch.nn.Embedding(1024, embedding_dim=h_channels)
47 |
48 | self.periodic_embedding = torch.nn.Embedding(64, embedding_dim=h_channels)
49 |
50 | encoder_layer = nn.TransformerEncoderLayer(
51 | d_model=h_channels,
52 | nhead=8,
53 | dropout=self.dropout,
54 | dim_feedforward=4 * h_channels,
55 | )
56 | decoder_layer = nn.TransformerDecoderLayer(
57 | d_model=h_channels,
58 | nhead=8,
59 | dropout=self.dropout,
60 | dim_feedforward=4 * h_channels,
61 | )
62 |
63 | self.encoder = torch.nn.TransformerEncoder(encoder_layer, num_layers=8)
64 | self.decoder = torch.nn.TransformerDecoder(decoder_layer, num_layers=8)
65 |
66 | self.input_projection = Linear(n_encoder_inputs, h_channels)
67 | self.output_projection = Linear(n_decoder_inputs, h_channels)
68 |
69 | self.linear = Linear(h_channels, out_channels)
70 |
71 | self.do = nn.Dropout(p=self.dropout)
72 |
73 | def encode_src(self, src):
74 |
75 | src_start = self.input_projection(src[:, :, :self.n_encoder_inputs]).permute(1, 0, 2)
76 | in_sequence_len, batch_size = src_start.size(0), src_start.size(1)
77 |
78 | pos_encoder = (
79 | torch.arange(0, in_sequence_len, device=src.device)
80 | .unsqueeze(0)
81 | .repeat(batch_size, 1)
82 | ) # pos_encoder: [batch_size, in_sequence_len]
83 |
84 | pos_encoder = self.input_pos_embedding(pos_encoder)
85 | pos_encoder = pos_encoder.permute(1, 0, 2) # pos_encoder: [in_sequence_len, batch_size, h_channels]
86 |
87 | src_out = src_start + pos_encoder
88 |
89 | if self.use_periodic_encoder:
90 | periodic_encoder = src[:, :, -1].long() # periodic_encoder: [batch_size, in_sequence_len]
91 | periodic_encoder = self.periodic_embedding(periodic_encoder)
92 | periodic_encoder = periodic_encoder.permute(1, 0, 2) # periodic_encoder: [in_sequence_len, batch_size, h_channels]
93 |
94 | src_out = src_out + periodic_encoder
95 |
96 | out = self.encoder(src_out) + src_start
97 | return out
98 |
99 | def decode_trg(self, trg, memory):
100 |
101 | trg_start = self.output_projection(trg[:, :, :self.n_decoder_inputs]).permute(1, 0, 2)
102 |
103 | out_sequence_len, batch_size = trg_start.size(0), trg_start.size(1)
104 |
105 | pos_encoder = (
106 | torch.arange(0, out_sequence_len, device=trg.device)
107 | .unsqueeze(0)
108 | .repeat(batch_size, 1)
109 | ) # pos_encoder: [batch_size, in_sequence_len]
110 |
111 | pos_encoder = self.input_pos_embedding(pos_encoder)
112 | pos_encoder = pos_encoder.permute(1, 0, 2) # pos_decoder: [in_sequence_len, batch_size, h_channels]
113 |
114 | trg_out = trg_start + pos_encoder
115 |
116 | if self.use_periodic_encoder:
117 | periodic_encoder = trg[:, :, -1].long() # periodic_decoder: [batch_size, in_sequence_len]
118 | periodic_encoder = self.periodic_embedding(periodic_encoder)
119 | periodic_encoder = periodic_encoder.permute(1, 0, 2) # periodic_decoder: [in_sequence_len, batch_size, h_channels]
120 |
121 | trg_out = trg_out + periodic_encoder
122 |
123 | trg_mask = gen_trg_mask(out_sequence_len, trg.device)
124 |
125 | out = self.decoder(tgt=trg_out, memory=memory, tgt_mask=trg_mask) + trg_start
126 | out = out.permute(1, 0, 2)
127 | out = self.linear(out)
128 | return out
129 |
130 | def forward(self, x):
131 |
132 | src, trg = x
133 |
134 | src = self.encode_src(src)
135 |
136 | out = self.decode_trg(trg=trg, memory=src)
137 |
138 | return out
139 |
140 | def training_step(self, batch, batch_idx):
141 | src, trg_in, trg_out = batch
142 |
143 | y_hat = self((src, trg_in))
144 |
145 | y_hat = y_hat.view(-1)
146 | y = trg_out.view(-1)
147 |
148 | loss = smape_loss(y_hat, y)
149 |
150 | self.log("train_loss", loss)
151 |
152 | return loss
153 |
154 | def validation_step(self, batch, batch_idx):
155 | src, trg_in, trg_out = batch
156 |
157 | y_hat = self((src, trg_in))
158 |
159 | y_hat = y_hat.view(-1)
160 | y = trg_out.view(-1)
161 |
162 | loss = smape_loss(y_hat, y)
163 |
164 | self.log("valid_loss", loss)
165 |
166 | return loss
167 |
168 | def test_step(self, batch, batch_idx):
169 | src, trg_in, trg_out = batch
170 |
171 | y_hat = self((src, trg_in))
172 |
173 | y_hat = y_hat.view(-1)
174 | y = trg_out.view(-1)
175 |
176 | loss = smape_loss(y_hat, y)
177 |
178 | self.log("test_loss", loss)
179 |
180 | return loss
181 |
182 | def configure_optimizers(self):
183 | optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
184 | scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
185 | optimizer, patience=10, factor=0.1
186 | )
187 | return {
188 | "optimizer": optimizer,
189 | "lr_scheduler": scheduler,
190 | "monitor": "valid_loss",
191 | }
192 |
193 |
194 | if __name__ == "__main__":
195 | n_classes = 100
196 |
197 | source = torch.rand(size=(32, 16, 9))
198 | target_in = torch.rand(size=(32, 16, 8))
199 | target_out = torch.rand(size=(32, 16, 1))
200 |
201 | ts = TimeSeriesForcasting(n_encoder_inputs=9, n_decoder_inputs=8)
202 |
203 | pred = ts((source, target_in))
204 |
205 | print(pred.size())
206 |
207 | ts.training_step((source, target_in, target_out), batch_idx=1)
--------------------------------------------------------------------------------
/models/lstm.py:
--------------------------------------------------------------------------------
1 | import random
2 |
3 | import torch
4 | import torch.nn as nn
5 | from torch.nn import Linear
6 | import pytorch_lightning as pl
7 |
8 |
9 | def smape_loss(y_pred, target):
10 | loss = 2 * (y_pred - target).abs() / (y_pred.abs() + target.abs() + 1e-8)
11 | return loss.mean()
12 |
13 |
14 | def gen_trg_mask(length, device):
15 | mask = torch.tril(torch.ones(length, length, device=device)) == 1
16 |
17 | mask = (
18 | mask.float()
19 | .masked_fill(mask == 0, float("-inf"))
20 | .masked_fill(mask == 1, float(0.0))
21 | )
22 |
23 | return mask
24 |
25 |
26 | class LSTMEncoder(nn.Module):
27 | ''' Encodes time-series sequence '''
28 |
29 | def __init__(self, input_size, hidden_size, num_layers=1):
30 |
31 | '''
32 | : param input_size: the number of features in the input X
33 | : param hidden_size: the number of features in the hidden state h
34 | : param num_layers: number of recurrent layers (i.e., 2 means there are
35 | : 2 stacked LSTMs)
36 | '''
37 |
38 | super(LSTMEncoder, self).__init__()
39 | self.input_size = input_size
40 | self.hidden_size = hidden_size
41 | self.num_layers = num_layers
42 |
43 | # define LSTM layer
44 | self.lstm = nn.LSTM(input_size=input_size,
45 | hidden_size=hidden_size,
46 | num_layers=num_layers,
47 | batch_first=True)
48 |
49 | self.linear = nn.Linear(1, 1)
50 |
51 | def forward(self, x, hidden_state): # Inputs: input, (h_0, c_0)
52 |
53 | '''
54 | : param x_input: input of shape [seq_len, batch_size, input_size]
55 | : return lstm_out, hidden: lstm_out gives all the hidden states in the sequence;
56 | : hidden gives the hidden state and cell state for the last
57 | : element in the sequence
58 | '''
59 |
60 | lstm_out, self.hidden = self.lstm(x, hidden_state) # lstm_out: [seq_len, batch_size, input_size]
61 | return lstm_out, self.hidden # Outputs: output, (h_n, c_n)
62 |
63 | def init_hidden(self, batch_size):
64 |
65 | '''
66 | initialize hidden state
67 | : param batch_size: x_input.shape[1]
68 | : return: zeroed hidden state and cell state
69 | '''
70 |
71 | return (torch.zeros(self.num_layers, batch_size, self.hidden_size,
72 | device=self.linear.weight.device),
73 | torch.zeros(self.num_layers, batch_size, self.hidden_size,
74 | device=self.linear.weight.device))
75 |
76 |
77 | class LSTMDecoder(nn.Module):
78 | ''' Decodes hidden state output by encoder '''
79 |
80 | def __init__(self, input_size, hidden_size, output_size, num_layers=1):
81 |
82 | '''
83 | : param input_size: the number of features in the input X
84 | : param hidden_size: the number of features in the hidden state h
85 | : param num_layers: number of recurrent layers (i.e., 2 means there are
86 | : 2 stacked LSTMs)
87 | '''
88 |
89 | super(LSTMDecoder, self).__init__()
90 | self.input_size = input_size
91 | self.hidden_size = hidden_size
92 | self.num_layers = num_layers
93 |
94 | self.lstm = nn.LSTM(input_size=input_size,
95 | hidden_size=hidden_size,
96 | num_layers=num_layers)
97 |
98 | self.linear = nn.Linear(hidden_size, output_size)
99 |
100 | def forward(self, x_input, encoder_hidden_states):
101 |
102 | '''
103 | : param x_input: should be 2D (batch_size, input_size)
104 | : param encoder_hidden_states: hidden states
105 | : return output, hidden: output gives all the hidden states in the sequence;
106 | : hidden gives the hidden state and cell state for the last
107 | : element in the sequence
108 |
109 | '''
110 |
111 | lstm_out, self.hidden = self.lstm(x_input.unsqueeze(0), encoder_hidden_states)
112 | output = self.linear(lstm_out.squeeze(0))
113 |
114 | return output, self.hidden
115 |
116 |
117 | class LSTMForecasting(pl.LightningModule):
118 | def __init__(
119 | self,
120 | n_encoder_inputs,
121 | n_decoder_inputs,
122 | h_channels=512,
123 | out_channels=1,
124 | lr=1e-5,
125 | dropout=0.1,
126 | teacher_forcing_ratio=1.
127 | ):
128 | super().__init__()
129 |
130 | self.save_hyperparameters()
131 |
132 | self.n_encoder_inputs = n_encoder_inputs
133 | self.n_decoder_inputs = n_decoder_inputs
134 | self.lr = lr
135 | self.dropout = dropout
136 | self.teacher_forcing_ratio = teacher_forcing_ratio
137 |
138 | self.encoder = LSTMEncoder(input_size=n_encoder_inputs,
139 | hidden_size=h_channels)
140 |
141 | self.decoder = LSTMDecoder(input_size=n_decoder_inputs,
142 | hidden_size=h_channels,
143 | output_size=out_channels)
144 |
145 | self.do = nn.Dropout(p=self.dropout)
146 |
147 | def forward(self, x, teacher_forcing_ratio=1):
148 |
149 | src, trg = x
150 | src = src[:, :, :self.n_encoder_inputs]
151 | trg = trg[:, :, :self.n_decoder_inputs]
152 |
153 | batch_size, seq_len, _ = src.shape
154 | _, horizon, _ = trg.shape
155 |
156 | outputs = [] #torch.zeros(horizon, batch_size, trg.shape[2])
157 | en_hidden = self.encoder.init_hidden(batch_size)
158 |
159 | en_output, en_hidden = self.encoder(x=src, hidden_state=en_hidden)
160 |
161 | # decoder with teacher forcing
162 | de_hidden = en_hidden
163 | de_output = trg[:, 0, :] # shape: (batch_size, input_size)
164 |
165 | # use teacher forcing
166 | if random.random() < self.teacher_forcing_ratio:
167 | for t in range(horizon):
168 | de_input = trg[:, t, :]
169 | de_output, de_hidden = self.decoder(de_input, de_hidden)
170 | outputs.append(de_output)
171 | else:
172 | for t in range(horizon):
173 | de_input = de_output
174 | de_output, de_hidden = self.decoder(de_input, de_hidden)
175 | outputs.append(de_output)
176 |
177 | outputs = torch.stack(outputs, dim=1) # shape: [batch_size, horizon, input_size]
178 | return outputs
179 |
180 | def training_step(self, batch, batch_idx):
181 | src, trg_in, trg_out = batch
182 |
183 | y_hat = self((src, trg_in), teacher_forcing_ratio=self.teacher_forcing_ratio) # these is an out_channels dimension in the end
184 |
185 | y_hat = y_hat.view(-1)
186 | y = trg_out.view(-1)
187 |
188 | loss = smape_loss(y_hat, y)
189 |
190 | self.log("train_loss", loss)
191 |
192 | return loss
193 |
194 | def validation_step(self, batch, batch_idx):
195 | src, trg_in, trg_out = batch
196 |
197 | y_hat = self((src, trg_in), teacher_forcing_ratio=0)
198 |
199 | y_hat = y_hat.view(-1)
200 | y = trg_out.view(-1)
201 |
202 | loss = smape_loss(y_hat, y)
203 |
204 | self.log("valid_loss", loss)
205 |
206 | return loss
207 |
208 | def test_step(self, batch, batch_idx):
209 | src, trg_in, trg_out = batch
210 |
211 | y_hat = self((src, trg_in), teacher_forcing_ratio=0)
212 |
213 | y_hat = y_hat.view(-1)
214 | y = trg_out.view(-1)
215 |
216 | loss = smape_loss(y_hat, y)
217 |
218 | self.log("test_loss", loss)
219 |
220 | return loss
221 |
222 | def configure_optimizers(self):
223 | optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
224 | scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
225 | optimizer, patience=10, factor=0.1
226 | )
227 | return {
228 | "optimizer": optimizer,
229 | "lr_scheduler": scheduler,
230 | "monitor": "valid_loss",
231 | }
232 |
233 |
234 | if __name__ == "__main__":
235 | n_classes = 100
236 |
237 | source = torch.rand(size=(32, 16, 9))
238 | target_in = torch.rand(size=(32, 16, 8))
239 | target_out = torch.rand(size=(32, 16, 1))
240 |
241 | ts = TimeSeriesForcasting(n_encoder_inputs=9, n_decoder_inputs=8)
242 |
243 | pred = ts((source, target_in))
244 |
245 | print(pred.size())
246 |
247 | ts.training_step((source, target_in, target_out), batch_idx=1)
--------------------------------------------------------------------------------
/train.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import json
4 | import random
5 | import time
6 | import copy
7 | import glob
8 |
9 | import numpy as np
10 | import pandas as pd
11 | import pytorch_lightning as pl
12 | import torch
13 | from torch.utils.data import DataLoader
14 | from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
15 | from pytorch_lightning.loggers import TensorBoardLogger, CSVLogger
16 |
17 | from data_utils import Dataset
18 | from models.transformer import TransformerForecasting
19 | from models.lstm import LSTMForecasting
20 |
21 |
22 | def train(
23 | data_csv_path: str,
24 | feature_target_names_path: str,
25 | output_json_path: str,
26 | model_name: str,
27 | log_dir: str,
28 | model_dir: str,
29 | auxiliary_feat: list,
30 | use_periodic_encoder: bool,
31 | use_periodic_as_feat: bool,
32 | seq_len: int = 120,
33 | horizon: int = 30,
34 | batch_size: int = 64,
35 | epochs: int = 200,
36 | lr: float = 0.001,
37 | gpu: int = 3,
38 | ):
39 |
40 | device = 1 if torch.cuda.is_available() else None
41 |
42 | data = pd.read_csv(data_csv_path)
43 |
44 | with open(feature_target_names_path) as f:
45 | feature_target_names = json.load(f)
46 |
47 | data_train = data[~data[feature_target_names["target"]].isna()]
48 |
49 | grp_by_train = data_train.groupby(by=feature_target_names["group_by_key"])
50 |
51 | groups = list(grp_by_train.groups)
52 |
53 | full_groups = [
54 | grp for grp in groups if grp_by_train.get_group(grp).shape[0] > 2 * horizon
55 | ]
56 |
57 | train_data = Dataset(
58 | groups=full_groups,
59 | grp_by=grp_by_train,
60 | split="train",
61 | features=copy.copy(auxiliary_feat),
62 | target=feature_target_names["target"],
63 | seq_len=seq_len,
64 | horizon=horizon,
65 | use_periodic_as_feat=use_periodic_as_feat
66 | )
67 |
68 | val_data = Dataset(
69 | groups=full_groups,
70 | grp_by=grp_by_train,
71 | split="val",
72 | features=copy.copy(auxiliary_feat),
73 | target=feature_target_names["target"],
74 | seq_len=seq_len,
75 | horizon=horizon,
76 | use_periodic_as_feat=use_periodic_as_feat
77 | )
78 |
79 | in_channels = len(auxiliary_feat) + use_periodic_as_feat + 1
80 | assert in_channels == train_data[0][0].size(1) - 1 == val_data[0][0].size(1) - 1
81 | print(f"len(in_channels) - {in_channels}")
82 |
83 | train_loader = DataLoader(
84 | train_data,
85 | batch_size=batch_size,
86 | num_workers=10,
87 | shuffle=True,
88 | )
89 | val_loader = DataLoader(
90 | val_data,
91 | batch_size=batch_size,
92 | num_workers=10,
93 | shuffle=False,
94 | )
95 |
96 | if model_name == 'transformer':
97 | model = TransformerForecasting(
98 | n_encoder_inputs=in_channels,
99 | n_decoder_inputs=in_channels,
100 | h_channels=512,
101 | out_channels=1,
102 | lr=lr,
103 | dropout=0.1,
104 | use_periodic_encoder=use_periodic_encoder,
105 | )
106 |
107 | elif model_name == 'lstm':
108 | model = LSTMForecasting(
109 | n_encoder_inputs=in_channels,
110 | n_decoder_inputs=in_channels,
111 | h_channels=512,
112 | out_channels=1,
113 | lr=lr,
114 | dropout=0.1,)
115 | else:
116 | raise NotImplementedError
117 |
118 | tensorboard_logger = TensorBoardLogger(
119 | save_dir=log_dir,
120 | name='tensorboard_log',
121 | )
122 |
123 | csv_logger = CSVLogger(
124 | save_dir=log_dir,
125 | name='csv_log',
126 | )
127 |
128 | checkpoint_callback = ModelCheckpoint(
129 | monitor="valid_loss",
130 | mode="min",
131 | dirpath=model_dir,
132 | filename='{epoch}-{val_loss:.5f}',
133 | )
134 |
135 | earlystop_callback = EarlyStopping(
136 | monitor='valid_loss',
137 | min_delta=0.00,
138 | mode='min',
139 | patience=20,
140 | verbose=True,
141 | )
142 |
143 | trainer = pl.Trainer(
144 | max_epochs=epochs,
145 | gpus=[gpu],
146 | progress_bar_refresh_rate=0.5,
147 | logger=[tensorboard_logger, csv_logger],
148 | callbacks=[checkpoint_callback, earlystop_callback],
149 | )
150 |
151 | trainer.fit(model, train_loader, val_loader)
152 |
153 | result_val = trainer.test(test_dataloaders=val_loader)
154 |
155 | output_json = {
156 | "model_name": model_name,
157 | "epochs": epochs,
158 | "batch_size": batch_size,
159 | "seq_len": seq_len,
160 | "horizon": horizon,
161 | "val_loss": result_val[0]["test_loss"],
162 | "best_model_path": checkpoint_callback.best_model_path,
163 | }
164 |
165 | if output_json_path is not None:
166 | with open(output_json_path, "w") as f:
167 | json.dump(output_json, f, indent=4)
168 |
169 | return output_json
170 |
171 |
172 | if __name__ == "__main__":
173 |
174 | import argparse
175 |
176 | parser = argparse.ArgumentParser()
177 | parser.add_argument("--model_name", type=str, required=True)
178 | parser.add_argument("--data_name", type=str, required=True)
179 |
180 | parser.add_argument("--data_csv_path", default="data/")
181 | parser.add_argument("--feature_target_names_path", default="data/")
182 | parser.add_argument("--result_dir", default="results/")
183 |
184 | parser.add_argument("--auxiliary_feat", type=str, default="", help="Default: not using auxiliary features")
185 | parser.add_argument("--use_periodic_encoder", action="store_true", help="Default: False")
186 | parser.add_argument("--use_periodic_as_feat", action="store_true", help="Default: False")
187 |
188 | parser.add_argument("--seq_len", type=int, default=120)
189 | parser.add_argument("--horizon", type=int, default=30)
190 | parser.add_argument("--batch_size", type=int, default=64)
191 | parser.add_argument("--epochs", type=int, default=600)
192 | parser.add_argument("--lr", type=float, default=1e-5)
193 | parser.add_argument("--gpu", type=int, default=3)
194 |
195 | args = parser.parse_args()
196 | for arg in vars(args):
197 | print(f'{arg} - {getattr(args, arg)}')
198 |
199 | auxiliary_feat = ["day_of_week", "day_of_month", "day_of_year", "month", "week_of_year", "year"]
200 | auxiliary_feat = [auxiliary_feat[int(i)] for i in list(args.auxiliary_feat)]
201 | print(f'auxiliary_feat - {auxiliary_feat}')
202 |
203 | if args.model_name == "lstm":
204 | assert args.use_periodic_encoder == False , "cannot use periodic encoder in LSTM"
205 |
206 | """ checking if the setting has been trained """
207 | fname = "{}_aux{}_penc{}_pfeat{}_{}_*".format(
208 | args.model_name,
209 | args.auxiliary_feat,
210 | 1 if args.use_periodic_encoder else 0,
211 | 1 if args.use_periodic_as_feat else 0,
212 | args.data_name)
213 |
214 | fname = glob.glob(f"{args.result_dir}/{fname}")
215 | if len(fname) > 0:
216 | print(f'This setting has been trained for {len(fname)} time(s).')
217 | sys.exit(0)
218 |
219 | model_name = "{}_aux{}_penc{}_pfeat{}_{}_{}".format(
220 | args.model_name,
221 | args.auxiliary_feat,
222 | 1 if args.use_periodic_encoder else 0,
223 | 1 if args.use_periodic_as_feat else 0,
224 | args.data_name,
225 | int(time.time()))
226 | print(f'model_name - {model_name}')
227 |
228 | data_csv_path = os.path.join(args.data_csv_path, f"preprocess_data_{args.data_name}.csv")
229 | feature_target_names_path = os.path.join(args.feature_target_names_path, f"config_{args.data_name}.json")
230 | result_dir = os.path.join(args.result_dir, model_name)
231 | log_dir = os.path.join(result_dir, "logs")
232 | model_dir = os.path.join(result_dir, "models")
233 | output_json_path = os.path.join(result_dir, "trained_config.json")
234 |
235 | if not os.path.exists(result_dir):
236 | os.makedirs(result_dir)
237 | else:
238 | print(f'{result_dir} has already exists! Please change the model name.')
239 | sys.exit(0)
240 |
241 | print(f'data_csv_path - {data_csv_path}')
242 | print(f'feature_target_names_path - {feature_target_names_path}')
243 | print(f'result_dir - {result_dir}')
244 | print(f'log_dir - {log_dir}')
245 | print(f'model_dir - {model_dir}')
246 | print(f'output_json_path - {output_json_path}')
247 |
248 | train(
249 | data_csv_path=data_csv_path,
250 | feature_target_names_path=feature_target_names_path,
251 | output_json_path=output_json_path,
252 | model_name=args.model_name,
253 | log_dir=log_dir,
254 | model_dir=model_dir,
255 | auxiliary_feat=auxiliary_feat,
256 | use_periodic_encoder=args.use_periodic_encoder,
257 | use_periodic_as_feat=args.use_periodic_as_feat,
258 | seq_len=args.seq_len,
259 | horizon=args.horizon,
260 | batch_size=args.batch_size,
261 | epochs=args.epochs,
262 | lr=args.lr,
263 | gpu=args.gpu,
264 | )
--------------------------------------------------------------------------------
/test.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 | from typing import Optional
4 | import numpy as np
5 | import pandas as pd
6 | import torch
7 | from sklearn.metrics import mean_absolute_error
8 | from tqdm import tqdm
9 | import copy
10 | import random
11 | import matplotlib.pyplot as plt
12 |
13 | from models.transformer import TransformerForecasting
14 | from models.lstm import LSTMForecasting
15 | from data_utils import split_df, Dataset
16 |
17 |
18 | def smape(true, pred):
19 | """
20 | Symmetric mean absolute percentage error
21 | :param true:
22 | :param pred:
23 | :return:
24 | """
25 | true = np.array(true)
26 | pred = np.array(pred)
27 |
28 | smape_val = (
29 | 100
30 | / pred.size
31 | * np.sum(2 * (np.abs(true - pred)) / (np.abs(pred) + np.abs(true) + 1e-8))
32 | )
33 |
34 | return smape_val
35 |
36 |
37 | def evaluate_regression(true, pred):
38 | """
39 | eval mae + smape
40 | :param true:
41 | :param pred:
42 | :return:
43 | """
44 |
45 | return {"smape": smape(true, pred), "mae": mean_absolute_error(true, pred)}
46 |
47 |
48 | def test(
49 | data_csv_path: str,
50 | feature_target_names_path: str,
51 | model_name: str,
52 | model_path: str,
53 | test_json_path: str,
54 | seq_len: int = 120,
55 | horizon: int = 30,
56 | data_for_visualization_path: Optional[str] = None,
57 | image_path: Optional[str] = None,
58 | num_test: Optional[int] = 100,
59 | gpu: int = 1
60 | ):
61 |
62 | auxiliary_feat = ''
63 | for i in range(model_name.find('aux') + 3, len(model_name)):
64 | if model_name[i] != '_':
65 | auxiliary_feat += model_name[i]
66 | else:
67 | break
68 |
69 | auxiliary_feats = ["day_of_week", "day_of_month", "day_of_year", "month", "week_of_year", "year"]
70 | auxiliary_feat = [auxiliary_feats[int(i)] for i in list(auxiliary_feat)]
71 |
72 | use_periodic_encoder = int(model_name[model_name.find('penc') + 4])
73 | use_periodic_as_feat = int(model_name[model_name.find('pfeat') + 5])
74 | print("data_csv_path - ", data_csv_path)
75 | print("auxiliary_feat - ", auxiliary_feat)
76 | print("use_periodic_encoder - ", use_periodic_encoder)
77 | print("use_periodic_as_feat - ", use_periodic_as_feat)
78 |
79 | data = pd.read_csv(data_csv_path)
80 |
81 | with open(feature_target_names_path) as f:
82 | feature_target_names = json.load(f)
83 | target = feature_target_names["target"]
84 |
85 | data_train = data[~data[target].isna()]
86 | grp_by_train = data_train.groupby(by=feature_target_names["group_by_key"])
87 | groups = list(grp_by_train.groups)
88 |
89 | full_groups = [
90 | grp for grp in groups if grp_by_train.get_group(grp).shape[0] > horizon
91 | ]
92 |
93 | test_data = Dataset(
94 | groups=full_groups,
95 | grp_by=grp_by_train,
96 | split="test",
97 | features=copy.copy(auxiliary_feat),
98 | target=feature_target_names["target"],
99 | seq_len=seq_len,
100 | horizon=horizon,
101 | use_periodic_as_feat=use_periodic_as_feat
102 | )
103 |
104 | in_channels = len(auxiliary_feat) + use_periodic_as_feat + 1
105 | assert in_channels == test_data[0][0].size(1) - 1
106 | print(f"len(in_channels) - {in_channels}")
107 |
108 | if 'transformer' in model_name:
109 | model = TransformerForecasting(
110 | n_encoder_inputs=in_channels,
111 | n_decoder_inputs=in_channels,
112 | h_channels=512,
113 | out_channels=1,
114 | use_periodic_encoder=use_periodic_encoder,)
115 |
116 | elif 'lstm' in model_name:
117 | model = LSTMForecasting(
118 | n_encoder_inputs=in_channels,
119 | n_decoder_inputs=in_channels,
120 | h_channels=512,
121 | out_channels=1,)
122 | else:
123 | raise NotImplementedError
124 |
125 | device = torch.device(f'cuda:{gpu}' if torch.cuda.is_available() else 'cpu')
126 | model.load_state_dict(torch.load(model_path, map_location=device)["state_dict"], strict=False)
127 |
128 | model.eval()
129 |
130 | gt = []
131 | baseline_last_known_values = []
132 | neural_predictions = []
133 |
134 | data_for_visualization = []
135 |
136 | random.seed(1234)
137 | test_idx = random.sample([i for i in range(len(full_groups))], num_test)
138 |
139 | for i, group in tqdm(enumerate(full_groups)):
140 |
141 | if i not in test_idx:
142 | continue
143 |
144 | time_series_data = {"history": [], "ground_truth": [], "prediction": []}
145 |
146 | df = grp_by_train.get_group(group)
147 | src, trg = split_df(df, split="test")
148 |
149 | time_series_data["history"] = src[target].tolist()[-seq_len:]
150 | time_series_data["ground_truth"] = trg[target].tolist()
151 |
152 | last_known_value = src[target].values[-1]
153 |
154 | trg["last_known_value"] = last_known_value
155 |
156 | gt += trg[target].tolist()
157 | baseline_last_known_values += trg["last_known_value"].tolist()
158 |
159 | src, trg_in, _ = test_data[i]
160 | src, trg_in = src.unsqueeze(0), trg_in.unsqueeze(0) # src/trg_in: [1, seq_len/horizon, channels]
161 |
162 | with torch.no_grad():
163 | prediction = model((src, trg_in[:, :1, :]))
164 | for j in range(1, horizon):
165 | last_prediction = prediction[0, -1]
166 | trg_in[:, j, -2] = last_prediction
167 | prediction = model((src, trg_in[:, : (j + 1), :])) # using the prediction as the input
168 |
169 | trg[target + "_predicted"] = (prediction.squeeze().numpy()).tolist()
170 | neural_predictions += trg[target + "_predicted"].tolist()
171 | time_series_data["prediction"] = trg[target + "_predicted"].tolist()
172 |
173 | data_for_visualization.append(time_series_data)
174 |
175 | baseline_eval = evaluate_regression(gt, baseline_last_known_values)
176 | model_eval = evaluate_regression(gt, neural_predictions)
177 |
178 | eval_dict = {
179 | "Baseline_MAE": baseline_eval["mae"],
180 | "Baseline_SMAPE": baseline_eval["smape"],
181 | "Model_MAE": model_eval["mae"],
182 | "Model_SMAPE": model_eval["smape"],
183 | }
184 |
185 | for k, v in eval_dict.items():
186 | print(k, round(v, 5))
187 |
188 | if test_json_path is not None:
189 | with open(test_json_path, "w") as f:
190 | json.dump(eval_dict, f, indent=4)
191 |
192 | if data_for_visualization_path is not None:
193 | with open(data_for_visualization_path, "w") as f:
194 | json.dump(data_for_visualization, f, indent=4)
195 |
196 | # visualization
197 | if image_path is not None:
198 |
199 | for i, sample in enumerate(data_for_visualization[:50]):
200 |
201 | hist_size = len(sample["history"])
202 | gt_size = len(sample["ground_truth"])
203 |
204 | plt.figure()
205 | plt.plot(range(hist_size), sample["history"], label="History")
206 | plt.plot(range(hist_size, hist_size + gt_size), sample["ground_truth"], label="Ground Truth")
207 | plt.plot(range(hist_size, hist_size + gt_size), sample["prediction"], label="Prediction")
208 |
209 | plt.xlabel("Time")
210 | plt.ylabel("Time Series")
211 | plt.legend()
212 | plt.savefig(os.path.join(image_path, f"{i}.png"))
213 | plt.close()
214 |
215 | return eval_dict
216 |
217 |
218 | if __name__ == "__main__":
219 | import argparse
220 |
221 | parser = argparse.ArgumentParser()
222 | parser.add_argument("--model_name", required=True)
223 | parser.add_argument("--num_test", type=int, required=True)
224 |
225 | parser.add_argument("--data_csv_path", default="data/")
226 | parser.add_argument("--feature_target_names_path", default="data/")
227 | parser.add_argument("--result_dir", default="results/")
228 |
229 | parser.add_argument("--seq_len", type=int, default=120)
230 | parser.add_argument("--horizon", type=int, default=30)
231 | parser.add_argument("--gpu", type=int, default=1)
232 |
233 | args = parser.parse_args()
234 |
235 | data_name = '_'.join(args.model_name.split('_')[-3:-1])
236 | data_csv_path = os.path.join(args.data_csv_path, f"preprocess_data_{data_name}.csv")
237 | feature_target_names_path = os.path.join(args.feature_target_names_path, f"config_{data_name}.json")
238 | result_dir = os.path.join(args.result_dir, args.model_name)
239 | log_dir = os.path.join(result_dir, "logs")
240 | model_dir = os.path.join(result_dir, "models")
241 | trained_json_path = os.path.join(result_dir, "trained_config.json")
242 |
243 | """ get the model path with trained config file """
244 | if not os.path.exists(trained_json_path):
245 | model_path = os.path.join(model_dir, args.model_name + '.ckpt')
246 | else:
247 | with open(trained_json_path) as f:
248 | model_json = json.load(f)
249 | model_path = model_json["best_model_path"]
250 |
251 | """ output the evaluation results and visualization config """
252 | output_dir = os.path.join(args.result_dir, args.model_name, "outputs")
253 | if not os.path.exists(output_dir):
254 | os.makedirs(output_dir)
255 |
256 | test_json_path = os.path.join(output_dir, "test.json")
257 | data_for_visualization_path = os.path.join(args.result_dir, args.model_name, "visualization.json")
258 | image_path = os.path.join(output_dir, "images")
259 | if not os.path.exists(image_path):
260 | os.makedirs(image_path)
261 |
262 | """ test """
263 | test(
264 | data_csv_path=data_csv_path,
265 | feature_target_names_path=feature_target_names_path,
266 | model_name=args.model_name,
267 | model_path=model_path,
268 | test_json_path=test_json_path,
269 | seq_len = args.seq_len,
270 | horizon = args.horizon,
271 | data_for_visualization_path=data_for_visualization_path,
272 | image_path=image_path,
273 | num_test = args.num_test,
274 | gpu=args.gpu,
275 | )
--------------------------------------------------------------------------------
/viz.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 3,
6 | "id": "d793dbff-59b0-48fb-84d9-a4dcfb89e762",
7 | "metadata": {},
8 | "outputs": [],
9 | "source": [
10 | "from tqdm import tqdm\n",
11 | "import matplotlib.pyplot as plt\n",
12 | "from generate_time_series import generate_df"
13 | ]
14 | },
15 | {
16 | "cell_type": "code",
17 | "execution_count": 5,
18 | "id": "770be6c8-70e0-4f30-b1a5-0cad0b6ffd24",
19 | "metadata": {},
20 | "outputs": [
21 | {
22 | "name": "stdout",
23 | "output_type": "stream",
24 | "text": [
25 | "1.4.9\n",
26 | "1.9.1+cu102\n"
27 | ]
28 | }
29 | ],
30 | "source": [
31 | "import pytorch_lightning\n",
32 | "import torch\n",
33 | "print(pytorch_lightning.__version__)\n",
34 | "print(torch.__version__)"
35 | ]
36 | },
37 | {
38 | "cell_type": "code",
39 | "execution_count": 20,
40 | "id": "b04e7070-5935-4f7b-b8cb-b4b4b37f2d7f",
41 | "metadata": {},
42 | "outputs": [
43 | {
44 | "data": {
45 | "text/plain": [
46 | "[]"
47 | ]
48 | },
49 | "execution_count": 20,
50 | "metadata": {},
51 | "output_type": "execute_result"
52 | }
53 | ],
54 | "source": [
55 | "auxiliary_feat = [\"day_of_week\", \"day_of_month\", \"day_of_year\", \"month\", \"week_of_year\", \"year\"]\n",
56 | "auxiliary_feat = [auxiliary_feat[int(i)] for i in list(\"\")]\n",
57 | "auxiliary_feat"
58 | ]
59 | },
60 | {
61 | "cell_type": "code",
62 | "execution_count": 15,
63 | "id": "d07110c4-2cdc-4263-be8c-bc1007d520a1",
64 | "metadata": {},
65 | "outputs": [
66 | {
67 | "name": "stdout",
68 | "output_type": "stream",
69 | "text": [
70 | "0\n",
71 | "1\n",
72 | "2\n",
73 | "3\n",
74 | "4\n",
75 | "5\n",
76 | "6\n",
77 | "0\n",
78 | "1\n",
79 | "2\n",
80 | "3\n",
81 | "4\n",
82 | "5\n",
83 | "6\n",
84 | "0\n"
85 | ]
86 | },
87 | {
88 | "data": {
89 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYIAAAD4CAYAAADhNOGaAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAAA0r0lEQVR4nO3deXRc93Xg+e/FTiwEUFiKG0gAJLiUKIkiIYJabYkErSUd2T19pqVkbM3EGY0yVtp2x0nkuCcnp7vtOHbsLB3bipLYVjpe2p3YLTmmJILaN4AEJZIiAYILABIgyAKIwkbsqPrNH1UFlaACWEBV4VXVu59zcIBaHuqKeg/3vff73fsTYwxKKaXsK83qAJRSSllLE4FSStmcJgKllLI5TQRKKWVzmgiUUsrmMqwOYClKS0tNZWWl1WEopVRSOXr06FVjTNnc55MyEVRWVtLc3Gx1GEoplVRE5EK45/XWkFJK2ZwmAqWUsjlNBEopZXOaCJRSyuY0ESillM3FJBGIyPdFpFdETs7zuojIX4vIORE5ISI7Q167T0TaAq89GYt4lFJKRS5WVwQ/BO5b4PX7gZrA12PA9wBEJB34TuB1F/CIiLhiFJNSSqkIxCQRGGNeBzwLvOUh4B+NXyNQJCKrgd3AOWNMuzFmCvhp4L1x0djez3dfPRevX69sbnBsimePXcLn09buKvYmpr38yXOn6Bkcj/nvXq4xgrVAV8jj7sBz8z3/ESLymIg0i0hzX1/fkoI41OLmmy+20Xp5eEnbK7WQ/+/ZU3z+p8f45Ykeq0NRKegf3uzgh293cqF/LOa/e7kSgYR5zizw/EefNOZpY0ytMaa2rOwjFdIReeLeTazMyeRrB1qXtL1S83nv4gC/PN5DZrrwjRfamJj2Wh2SSiFXr03yvVfPs2+bk9s2lsT89y9XIugGKkIerwN6Fng+Lopys/jdezfxxtmrvHZmaVcVSs1ljOFrB1opzc/mO7+xk0uD4zzzdqfVYakU8leHzjI+7eXJ+7fG5fcvVyJ4DvhMYPbQHmDIGHMZOALUiEiViGQBDwfeGzefua2SDSW5fO1XrXj1Xq6KgRdPuTnSOcB/rN/M/htWsXdrOX/zyjk8o1NWh6ZSwLneEX58+CK/WbeeTeX5cfmMWE0f/QnwDrBFRLpF5LMi8riIPB54ywGgHTgH/B3w/wIYY2aAJ4AXgVbgZ8aYU7GIaT5ZGWn84X1baXOP8D+bu66/gVILmJrx8fXnW6kpz+d/r10HwJcf2MrYlJe/fumsxdGpVPD150+zIjOdz++tidtnxKT7qDHmkeu8boDPzfPaAfyJYtncv30VO9cX8a2GM/ybm9eQl52UTVhVAvhR0wU6+8f4wf95Kxnp/vOqTeUFPHxrBf/UeIHP3LaB6rL4nMWp1Pf2+ascau3lD+7bQkl+dtw+x5aVxSLCVx500TcyydOvt1sdjkpSQ+PT/NVLZ7ljUwkf3/LhCQxf2LeZ7Iw0/uyF0xZFp5Kdz+cfe1pTmMNv3VEV18+yZSIA2LWhmAdvXM3Tr7fjHp6wOhyVhL77yjmGxqf5owe2IfLhCXBlBdn8zsc38uIpN4c7FiqxUSq8Z49f4uSlYX7/vi3kZKbH9bNsmwgA/uC+Lcz4fHz74BmrQ1FJpsszxg/e6uR/27mOG9YUhn3PZ++sZtXKHL76qxYtMlOLMjHt5ZsvtHHj2kIeujlsaVVM2ToRbCjJ49HbKvnZ0S4tMlOL8s0X20hLg9/bv3ne96zISuf3P7GF491D/Ov7l5cxOpXsvv9WBz1DE/zRA9tISwtXbhVbtk4E8EGR2Z8+r/dyVWSOdQ3y3PEe/u+7qllduGLB937qlrW4Vq/kz54/rUVmKiL91yb57ivxKx4Lx/aJIFhk9vqZPi0yU9dljOGrv2qhND+L/+djG6/7/rQ04T89uE2LzFTE/jLOxWPh2D4RAHz6tg2sd2iRmbq+YPHYF+s3kx/htOPbN5VyrxaZqQic673Gjw9f5Dd2x694LBxNBEB2Rvpskdk/H9UiMxVeaPHYv6+tuP4GIb58/1ZGJ2e0yEwtaLZ4bF/8isfC0UQQ8MCNq7hlfRHfOniG0ckZq8NRCejHgeKxP3pg22zxWKRqnAU8vHs9/9R4gY6ro3GKUCWzd873c6jVze98fCOlcSweC0cTQYCI/15u78gkf/eGFpmpD1uoeCxSXwwWmenEBDVHaPHYZ++Mb/FYOJoIQuza4ODBG1fzt6+106tFZirEd189x+A8xWORChaZvXDqCkc6tchMfeC54z28f2loWYrHwtFEMMdskVmDFpkpvy7PGD94s5N/e8v8xWORChaZ/ddftWqRmQICxWMvtrF97cplKR4LRxPBHBtK8vjMbZX8rLmL01e0yEz5i8dE4EufmL94LFIrstL50ie2cLxrUIvMFAA/eKuTS4Pjy1Y8Fo4mgjB+995N5Gdn8LUDei/X7hZTPBYpLTJTQf7isXPs21bO7RtLLYtDE0EYRblZ/Ie9NVpkZnPGGL72q1ZK87N4/OPXLx6LVHqa8JVAkdk/vtMZs9+rks9fvXSWsWUuHgtHE8E8Pn3bBiocK/jTA1pkZlcHW9wc7vQsqngsUndsKuWeLWX8t5fPMaBFZrZ0vu8aP2q6yCO7K9hUXmBpLJoI5hEsMjt9ZYR/OdptdThqmU17fXz9+dNsWkLxWKS+/MA2f5HZy1pkZkfB4rEv7It+7ClasVqq8j4RaRORcyLyZJjXf19EjgW+ToqIV0Qcgdc6ReT9wGvNsYgnVh68cTW3rC/izw+2MTalRWZ28qNA4dcfPbB10cVjkdocKDL77+9okZndNLb309BiTfFYOFHv4SKSDnwHuB9wAY+IiCv0PcaYbxpjdhhjdgBfBl4zxoROpL4n8HpttPHE0oeKzF7vsDoctUyCxWO3byzhni3lcf2sL+yr0SIzmwkWj622qHgsnFic6uwGzhlj2o0xU8BPgYcWeP8jwE9i8LnLYtcGBw/cuIq/ff28FpnZRLB47CsPLr14LFLlBTk8/jEtMrOTX57o4UT3EL//CWuKx8KJRSJYC4R2ausOPPcRIpIL3Af8S8jTBjgoIkdF5LH5PkREHhORZhFp7utb3pk8f/CJrUx7ffzFIS0yS3XBlcdiUTwWqd++qxrnymz+669aMUYnJqSyiWkv33ihjRvWrOSTO6wpHgsnFokg3CnTfHvzvwHemnNb6A5jzE78t5Y+JyJ3h9vQGPO0MabWGFNbVra0Xi9LVVmax6f3VPI/jnTRdmVkWT9bLa8/P9iGEJvisUityErnS/sDRWYntMgslQWLx75iYfFYOLFIBN1A6LSKdUDPPO99mDm3hYwxPYHvvcAv8N9qSjgfFJm1Wh2KipPjXYM8eyy2xWOR+rc717Ft9Ur+7IXTTM5okVkqChaP7d1azu2brCseCycWieAIUCMiVSKShf+P/XNz3yQihcDHgGdDnssTkYLgz8B+4GQMYoq54rwsfvfeGl4708frWmSWcvwrj8W+eCxS6YGVzLoHdCWzVPXXgeKxLz9gbfFYOFEnAmPMDPAE8CLQCvzMGHNKRB4XkcdD3vop4KAxJnSenBN4U0SOA4eBXxljXog2pnj5zO3+IrOvaZFZygkWj31hX+yLxyKlRWapK5GKx8KJyQRpY8wBY8xmY8xGY8xXA889ZYx5KuQ9PzTGPDxnu3ZjzM2BrxuC2yaqDxWZvatFZqkitHjs4VvjUzwWKS0yS01/9vxpchKkeCwcrSxepAdvXM2OiiK+pUVmKePHTRfjXjwWqc3OAv79rVpklkqa2vs5mEDFY+FoIlikYJGZe3iSv39Di8yS3fDENH956MyyFI9F6ov1NWRlpPGNF7TILNmFFo/91h2JUTwWjiaCJaitdHD/9lU89ZoWmSW7775yPuqVx2ItWGT2/MkrNGuRWVL75YkejncP8aX9W1iRlRjFY+FoIliiP7xPi8ySXZdnjO+/1cGnblnL9rXLUzwWqd++q0qLzJJcaPHYp25JnOKxcDQRLJEWmSW/2eKx/VusDuUjcrMy+NL+LRzTIrOk9cO3E7N4LBxNBFEIFpn96fNaZJZsQovH1hQtb/FYpLTILHl5Rqf4zsuJWTwWjiaCKASLzF5t6+ONs1pkliyMMXz1gHXFY5FKTxO+8oC/yOwf375gdThqERK5eCwcTQRR+sztG1hdmMMP3uq0OhQVoZbLwxzu8PDEPZssKx6L1J01pdxVU8oP3urQsYIkMT7l5cdNF/l3O9clZPFYOJoIopSdkc49W8s50uHRauMk0djun4nzie2rLI4kMvtvWEXP0ARdnnGrQ1EReO/iAFNeH/clyf4Fmghioq7KwcjkDC09w1aHoiLQ1N7PhpLcZW8st1R7qhwANHb0WxyJikRjh4c0gdrKYqtDiZgmghjYU10C+JefU4nN5zMc7vRQF/jjmgw2ledTkpel+1eSaGzv54Y1hRTkZFodSsQ0EcSAc2UOlSW5NOkZW8Jrc48wODZNXVWJ1aFETETYXeWgqV2LyxLdxLSXY12DSXWiAZoIYmZPdQmHdZwg4TUFzqrrqpPrQN1TXcKlwXG6PGNWh6IWcKxrkKkZ3+xdgmShiSBG6qodDE/McPqKjhMkssZ2D+uKV7CuONfqUBYlmLiaOvSqIJE1tXsQgVv1isCegrcaGvXyPWEZExwfSK6zNYDN5QUU5WbOXtGoxNTY3s+2VSspXJE84wOgiSBm1hStoMKxQg/UBHa29xqe0amkuy0EkJYm7K506BVBApuc8fLuxYHk3L+sDiCV7Kkq4XCnB5+OEySkYJK+Lcnu3wbtqS7homeMnkGtJ0hEJ7qHmEzC8QGIUSIQkftEpE1EzonIk2Fe/7iIDInIscDXH0e6bTKpqy5hcGyaNrc2oUtEje0e1hTmsK44OeoH5vpgnECvOhNR43n//5fdlTa8IhCRdOA7wP2AC3hERFxh3vqGMWZH4Os/L3LbpBCcMqa3hxKPMYamjn7qqksSZt2Bxdq6aiUrczJ0GmmCaurwsHVVAcV5WVaHsmixuCLYDZwLrD88BfwUeGgZtk04FY5c1hat0Pu4Ceh83yhXr00l3fzuUOlpgXoC3b8SzrTXx9ELA0m7f8UiEawFukIedweem+s2ETkuIs+LyA2L3BYReUxEmkWkua8vcTt91lX7D1RtEJZYgrdTkvH+bag91SV0XB3FrSvjJZQT3UOMT3uTdv+KRSIId50996/gu8AGY8zNwH8D/tcitvU/aczTxphaY0xtWVnZUmONuz1VJXhGpzjbe83qUFSIxnYPzpXZbChJrvqBuT6Ypqy3HxNJ8P/HbhtfEXQDFSGP1wE9oW8wxgwbY64Ffj4AZIpIaSTbJpvZAT09UBOGMYam9n7qqpJ3fCDItWYlBdkZensowTR1eKgpz6ckP9vqUJYkFongCFAjIlUikgU8DDwX+gYRWSWBI1BEdgc+tz+SbZPNekcuqwtzaNQDNWF09o/ROzKZlPO750pPE26tcuiJRgKZ8fo42ulJ2ttCAFGvymGMmRGRJ4AXgXTg+8aYUyLyeOD1p4B/B/yOiMwA48DDxn8TPey20cZkJRGhrsrBm+f6McYk/RloKghetifzgRqqrsrBy6d76R2ZoLwgx+pwbO9kzzCjU96kPtGIyfJMgds9B+Y891TIz38D/E2k2ya7uuoS/texHs73jbKpPN/qcGyvqb2f0vxsqkvzrA4lJuoCCe1wh4dfu2mNxdGoZB8fAK0sjovZegIt/LGcv37AQ121I2WuzravWUleVrrWEySIpvZ+qsvykvrqTBNBHFSV5lFekK0HagLo8oxzeWhidpWvVJCRnkZtpUNPNBKA12do7hxI+tuOmgjiQESoqy6hqaNf6wkslmrjA0F11Q7OuK/Rf23S6lBsraVnmJHJmaQtJAvSRBAndVUO3MOTdPbrQiJWauzopyQvK+XGaoL1BId1dpqlUuVEQxNBnOzReoKE0NTuYXdV6owPBN20rpAVmelaT2Cxpo5+Kktyca5M3vEB0EQQNxvL8inNz9ID1UJdnjEuDY4n/WV7OJnpadRWFmuFsYW8PsPhjuSuHwjSRBAn/nqCEpradZzAKsEkvGdj8h+o4dRVOTh9ZYSB0SmrQ7Gl01eGGZ6YSer6gSBNBHFUV+2gZ2iCLo8uJGKFpvZ+inIz2VxeYHUocTFbT9CpV51WCC5Lm4xLn86liSCOgpeMjTrNzxJNHR52VzpIS0ut8YGgm9YVkpOZptOULdLU3s96Ry5ripJzoaNQmgjiqKY8H0delh6oFugZHOeiZ2z2rDkVZWeks3O9jhNYweczHO70pMz4kyaCOBIJLjiuB+py+2D9gdQ4UOdTV1VC65VhhsamrQ7FVs70jjA4Np0yJxqaCOKsrtpB98A43QNaT7Ccmto9rMzJYOuqlVaHEld11Q6MgSM6TrCsgusT6xWBikhwnEBvDy2vpg5//UB6io4PBO2oKCIrI02vOpdZU4eHtUUrqHAk90JHQZoI4myLs4Ci3Ew9UJeRe3iCjqujKTG/+3pyMtO5paJodgaLir/QRoapQhNBnKWlCbdWOvRAXUbBwdNUmNYXibrqEk71DDE8oeMEy+Fs7zU8o1PsSaH9SxPBMqircnDRM8blIa0nWA5NHR4KsjNwrUnt8YGgPVUOfAaOdg5YHYotBNvG6BWBWhQdJ1heTe391FYWp/z4QNAt64vJSk/TepVl0tjhYXVhDutTZHwAYpQIROQ+EWkTkXMi8mSY139TRE4Evt4WkZtDXusUkfdF5JiINMcinkSzbfVKCnIydJxgGfSOTHC+zx7jA0ErstK5uaJQbz8uA2MMTe3++oFUamQYdSIQkXTgO8D9gAt4RERcc97WAXzMGHMT8F+Ap+e8fo8xZocxpjbaeBJRepq/nkAP1PgLtmVOlfndkaqrKuHkpSGuTc5YHUpKO983ytVrkym3f8XiimA3cM4Y026MmQJ+CjwU+gZjzNvGmOANzEZgXQw+N6nUVTvouDpK7/CE1aGktKZ2D3lZ6Wy3yfhAUF21A6/PcPSCjhPEU/CqPlXqB4JikQjWAl0hj7sDz83ns8DzIY8NcFBEjorIY/NtJCKPiUiziDT39fVFFbAVPug7pFcF8dTY3s+uSgcZ6fYa/tq1oZiMNNF2E3HW1O6hvCCbqtI8q0OJqVgcLeFulIXtuywi9+BPBH8Y8vQdxpid+G8tfU5E7g63rTHmaWNMrTGmtqysLNqYl51r9UryszN0oZo46r82ydneaynfViKc3KwMblpXqPtXHPnrB/qpqy5JqfEBiE0i6AYqQh6vA3rmvklEbgL+HnjIGDO7txpjegLfe4Ff4L/VlHIydCGRuJsdH0ih+d2LUVddwonuIcamdJwgHjr7x3APT6bcbSGITSI4AtSISJWIZAEPA8+FvkFE1gM/Bz5tjDkT8nyeiBQEfwb2AydjEFNC2lNdwvm+UfpGdMHxeGjq8LAiM52b1hVaHYol9lSXMOMzvHth0OpQUlJTiqxPHE7UicAYMwM8AbwItAI/M8acEpHHReTxwNv+GCgBvjtnmqgTeFNEjgOHgV8ZY16INqZEFTyT0AXH46OxvZ9dG4rJtNn4QNCuDf7aCb3qjI+mDg+l+dlsLEut8QGAjFj8EmPMAeDAnOeeCvn5t4HfDrNdO3Dz3OdT1fa1heRmpdPU0c+DN622OpyUMjA6xekrI3xpv33/XfOzM9i+tlDrVeLAXz/Qn3L1A0H2PHWySGZ6Grs26DhBPASXa0y1+d2LtafKwfGuISamvVaHklK6POP0DE2kVFuJUJoIltme6hLOuP1Nq1TsNLV7yM5Is+34QNCe6hKmvD7evaj1BLHU2JG64wOgiWDZBac2HtbL95hqbO9n5/pisjPSrQ7FUrWVxaQJWsUeY03tHhx5WdSU51sdSlxoIlhmN64tIiczTQ/UGBoam6b1ynDKnq0tRkFOJjes0XqCWGvq6Gd3ZWqOD4AmgmWXlaHjBLF2pNODManVFjgadVUO3usa1HGCGOkeGKN7YDyl9y9NBBbYU1VCm3uEwTEdJ4iFxvZ+sjLS2FFRZHUoCWFPdQlTMz6OdQ1aHUpKCLaPT+UrTk0EFqirLsEYrSeIlaYODzsqisjJtPf4QNCtVQ5EdP2LWGnq6KcoN5MtzgKrQ4kbTQQWuLmikOyMNJo0EURteGKaUz1DKX22tliFKzLZtmql1hPESFOHh1srHaSl8EJHmggskJ2Rzs71Ok4QC0c7B/AZ//x59YE91SW8e3GAyRkdJ4jG5aFxLvSPpfyJhiYCi9RVO2i5PMzQuC44Ho3G9n4y04Vb1hdbHUpCqat2MDHt40T3kNWhJLXg7bVUbDQXShOBReqq/OMEzZ16eygajR0ebl5XxIosHR8ItbvS/4dLp5FGp6mjn4KcDLatTu2FjjQRWOSW9UX+Bcf1QF2ya5MznLyk4wPhFOdlsXVVgY5DRamx3cPuSgfpKTw+AJoILJOTmc6O9UV6oEbh6IUBvD6T0vO7o7GnuoTmzgGmvT6rQ0lKvcMTdFwdtcWJhiYCC+2pcnDy0hAjEzpOsBSN7f1kpAm7Nuj4QDh1VQ7Gp706TrBEwWVl7XCioYnAQnXVJfgMNOuC40vS1N7PjesKyc2KSTf1lLM7MMCp00iXpqm9n/zsDFwpPj4AmggstXN9MZnpupDIUoxNzXCiW8cHFlKSn01Neb4Wli1RY3s/tZXFZNhgoaPU/y9MYCuy0rl5XZEeqEtw9MIAMz6T8tP6ouUfJ/Awo+MEi9I3Msn5PnuMD0CMEoGI3CcibSJyTkSeDPO6iMhfB14/ISI7I9021dVVO3j/0hCjk7rg+GI0tXtITxNqKzURLKSu2sHolJeTPcNWh5JUgu1f7HKiEXUiEJF04DvA/YALeEREXHPedj9QE/h6DPjeIrZNaXVVJXh9hqM6TrAoTR39bF+zkvxsHR9YyOw4gd5+XJSmjn5ys9LZvtYeCx3F4opgN3DOGNNujJkCfgo8NOc9DwH/aPwagSIRWR3htilNFxxfvPEpL8e7dHwgEuUFOVSX5ek05UVqbO9n14ZiMm0wPgCxSQRrga6Qx92B5yJ5TyTbAiAij4lIs4g09/X1RR10osjLzuCmdYV6oC7CexcHmPL6bDGtLxb2VJdwpMOD12esDiUpeEanOOO+ZqsTjVgkgnAld3P3uPneE8m2/ieNedoYU2uMqS0rK1tkiImtrqqEE92DjE9pg7BINHZ4SBN0fCBCdVUORiZnaNFxgogcnl2f2D77VywSQTdQEfJ4HdAT4Xsi2Tbl1VU7mPYaXXA8Qk3t/dywppCVOZlWh5IUgme2Wk8QmcZ2DzmZady4tsjqUJZNLBLBEaBGRKpEJAt4GHhuznueAz4TmD20BxgyxlyOcNuUV6vjBBGbmPbyXtegbWZzxIJzZQ5VpXm6TnaEGtv7qd3gICvDHuMDAFFPuTDGzIjIE8CLQDrwfWPMKRF5PPD6U8AB4AHgHDAG/F8LbRttTMmmICeT7WtWaj1BBI51DTI146PORvdvY6GuysGB9y/j9ZmUb6AWjcGxKdrcIzx442qrQ1lWMZl7Z4w5gP+PfehzT4X8bIDPRbqtHdVVl/DDtzqZmPbqkosLaGr3IPJBm2UVmbpqBz890sXpK8PcsMYeUyKX4nCHB2Ow3YmGfa59ElxdlYMpr4/3Lg5aHUpCa+roZ9uqlRTm6vjAYtRVBcYJ9KpzQU0dHrIz0ri5wl7JUhNBgqitdJAm6DjBAiZnvBy9MKDTRpdgTdEK1jtydf+6jsb2fnauLyY7w15X5ZoIEkThikxca3TB8YWc6B5icsY3e3arFqeuysHhTg8+rScIa2h8mpbLw7Y80dBEkEDqqkp47+KgLjg+j2CbhN06Y2hJ6qpLGByb5kzviNWhJKTmzsD4gA1PNDQRJJC6KgeTMz6Od+lCIuE0dXjYuqoAR16W1aEkpbrZvkM6ThBOU4eHrPQ0bllfZHUoy04TQQLZXeVAdJwgrGmvj+bOAa0fiEKFI5e1RSt0/5pHY3s/O9YX2XLWniaCBFKUm8XWVTpOEM6J7iHGp722m9YXa3XVjsAUSR0nCDUyMc3JS0PssemJhiaCBFNX5eDohQGmZnQhkVDB5KjjA9HZU1VC/+gU53qvWR1KQmm+MIDPhvUDQZoIEsye6hImpn2c6B60OpSE0tTuoaY8n9L8bKtDSWrBvkON2u32Qxrb+8lMF3auL7Y6FEtoIkgwHyw4rgdq0IzXR3Onx5bT+mKtwrGC1YU5Ok4wR1O7h5vXFbEiy37jA6CJIOE48rLY4izQAzXEyZ5hRqe8tpzWF2siQl2Vg6Z2HScIGp2c4f1LQ7Y+0dBEkIDqqv3jBNO64DjwQf2AnQ/UWKqrLuHqtUnar45aHUpCOHphAK/P2PpEQxNBAtpTXcLYlJf3L2k9Afjv31aX5VFekGN1KClhdpxArzoB/79DRpqwa4M9xwdAE0FC2q2FP7O8PhOoH7Dv2VqsVZbkUl6QrftXQFOHhxvXFZKXHZNmzElJE0ECKs3PZlN5vtYTAC09w4xMzthq2cB4ExHqqkto6ui3/TjB+JSXE92Dtj/R0ESQoOqqHDR3DjBj83GCYDK0+4Eaa3VVDtzDk1zoH7M6FEu9e3GAaa+x/fiTJoIEtae6hGuTM5yy+YLjje39VJbksqpQxwdiSccJ/Brb+0lPE2ptPD4AUSYCEXGISIOInA18/8i/pohUiMgrItIqIqdE5PMhr/2JiFwSkWOBrweiiSeVBM9Q3jx31eJIrDM146Op3TP7R0vFzsayPMoKsnnDxvsX+I+v7WsLKcix90JH0V4RPAm8ZIypAV4KPJ5rBvg9Y8w2YA/wORFxhbz+F8aYHYEv2y9ZGVRekMPN6wppaHFbHYplmjr6GZmcYe82p9WhpBwRYe/Wcl5v67NtO5PekQmOdQ2yb2u51aFYLtpE8BDwTODnZ4BPzn2DMeayMebdwM8jQCuwNsrPtYV6l5NjXYP0Dk9YHYolGlrc5GSmceemUqtDSUn1LicjkzO2vT30UmsvxkD9DXqiEW0icBpjLoP/Dz6wYGoVkUrgFqAp5OknROSEiHw/3K2lkG0fE5FmEWnu6+uLMuzksM/l30EPtfZaHMnyM8ZwqMXNnZvKbFv2H293bCplRWa6ba86G1rcrCtewRZngdWhWO66iUBEDonIyTBfDy3mg0QkH/gX4AvGmOAI6PeAjcAO4DLwrfm2N8Y8bYypNcbUlpWVLeajk9YWZwEVjhU0tFyxOpRld6pnmJ6hCfa79GwtXnIy07mrppRDrW7bTSMdnZzhzXNXqXc5ERGrw7HcdSsojDH75ntNRNwistoYc1lEVgNhT11FJBN/EviRMebnIb/bHfKevwP+dTHBpzoRoX7bKv6p6QKjkzO2Kng52OJGBO7dpvdv46ne5eRgi5uTl4a5cV2h1eEsmzfO+sdG6vVEA4j+1tBzwKOBnx8Fnp37BvGn238AWo0x357z2uqQh58CTkYZT8qpdzmZmvHxxll73A4LOtTiZtf6Ym07HWd7tzlJE2hotdftoYaWXgpXZLK70t71A0HRJoKvA/UichaoDzxGRNaISHAG0B3Ap4F7w0wT/YaIvC8iJ4B7gC9GGU/KubWymKLcTA7a6D5u98AYLZeH9WxtGTjysqjd4LDVOMGM18fLp93cu7WcjHQtpYIIbg0txBjTD+wN83wP8EDg5zeBsDfhjDGfjubz7SAjPY17t5Tz8uleZrw+W+y4hwJ/lDQRLI96l5OvHmilyzNGhSPX6nDi7uiFAQbGpnX/CpH6f1VSQL3LyeDYNM0XBqwOZVk0tLrZWJZHdVm+1aHYQv3s7DR7XBU0tLjJSk/j7s32mHQSCU0ESeDuzWVkZaTZ4vJ9aHyapnYP9a5VVodiG5WledSU59ti/zLG0NDq5vZNJeTbaPLF9WgiSAJ52RncsbGEhpbUn+b3alsvMz6jl+3LrN7lpKnDw9DYtNWhxNXZ3mtc6B/T/WsOTQRJYp/LyUXPGGfc16wOJa4Otrgpzc9iR0WR1aHYyj6XE6/P8EpbahcvBq969mnbkg/RRJAkgjtuKheXTc54ea2tj71bnaSnaZHPctqxroiyguyUvz10sMXNzesKca7UbrahNBEkCefKHG6uKErpA7Wx3cO1yRm9bLdAWpqwb1s5r7b1MjnjtTqcuHAPT3C8a1D3rzA0ESSR/S4nx7uHcKdoE7pDLW5WZKZzZ402mbNCvcvJ6JSXxhRdwvKlQM8unYjwUZoIkkgqT/MzxnCo1c1dNaXkZGqTOSvcvrGU3Kz0lL392NByhfWOXDY7dVryXJoIkkhNeT4bSnJT8vbQyUvDXB6a0Mt2C+VkpnN3TRmHWnpTbnba6OQMb53v1yZz89BEkET8TeicvH2un2uTM1aHE1MNLVdIE3QRGovVu5xcGZ7g/UtDVocSU6+f0SZzC9FEkGTqXU6mvD5eP5NaTegOtrip3eDAkZdldSi2du/WctLTJOWuOhta3BTlZtp+beL5aCJIMrs2FFOcm5lSB2qXZ4zTV0b0bC0BFOdlUbuhOKX2rxmvj5fberXJ3AL0XyXJZKSncc9WfxO6aW9qrDU7W+SjiSAh1LucnL4yQpdnzOpQYuJI5wCDY9PU623HeWkiSEL7XU6Gxqc50pka0/waWtxsKs+nqjTP6lAUH8xOS5XW5w0tbrIytMncQjQRJKG7alKnCd3g2BSHOz16WyiBbCjJY7MzPyWmkfqbzF3hjo0ltlrhb7E0ESShvOwM7tyUGmvNvtrWh1ebzCWcepczcEtlyupQonLGfY0uz7gWkV2HJoIkVe9y0uUZp809YnUoUWlocVNWkM2OdUVWh6JC1LtWpUQTuuBVzT5d+3pBUSUCEXGISIOInA18Dzs3S0Q6A0tSHhOR5sVurz5q77ZyRKDhVPLeHpqc8fJqWy/7tpWTpk3mEspNawspT4EmdA0tbnZUFFGuTeYWFO0VwZPAS8aYGuClwOP53GOM2WGMqV3i9ipEeUEOOyqKknrR8XfO9zM65dXbQgkoLU3Y53LyWltf0jahcw9PcLx7SPevCESbCB4Cngn8/AzwyWXe3tbqXU5OdA9xZSg5m9A1tLjJzUrn9o3aZC4RBZvQvX2+3+pQliR4NbNfE8F1RZsInMaYywCB7/PdiDPAQRE5KiKPLWF7ROQxEWkWkea+vtSqql2q4A6ejFcFPp+/ydzdNWXaZC5B3b6xhLys9KS9PdTQ4qayJJdN5dpk7nqumwhE5JCInAzz9dAiPucOY8xO4H7gcyJy92IDNcY8bYypNcbUlpXpfGCAjWX5VCZpE7r3Lw3hHp7UIrIElp2Rzt2byzjU4sbnS67ZadcmZ3jnfD/7tmmTuUhcNxEYY/YZY7aH+XoWcIvIaoDA97BTDIwxPYHvvcAvgN2BlyLaXoUnItS7nLxz/iojE8m11mxDi5s08fe2UYmr3uWkd2SSE0nWhO61tj6mvNpkLlLR3hp6Dng08POjwLNz3yAieSJSEPwZ2A+cjHR7tbB61yqmvYbXz1y1OpRFOdTqprZSm8wlumATukNJdtV5qNVNcW4mu7TJXESiTQRfB+pF5CxQH3iMiKwRkQOB9ziBN0XkOHAY+JUx5oWFtleR27WhGEdeVlJVgQabzOkgXuIrys3i1srkakI37fXx8ule7t3q1CZzEYqq5toY0w/sDfN8D/BA4Od24ObFbK8il54m3Lu1nIOnrjDt9ZGZBDt+sIeNXrYnh3rXKv7Lv7ZwsX+M9SW5VodzXUc6PQyNT+v+tQiJ/1dDXVe9y8nwxAxHOpKjCV1DyxU2O/PZUKJN5pLB/tkmdMlx1dnQ4iY7I427N+u05EhpIkgBd9WUkp2RlhTdIgfHpjjSOaBna0mkwpHL1lUFSXF7yBhDQ4ubOzeVkpulTeYipYkgBeRmZXBXTSkNLYnfhO7l072BJnPaBCyZ+JvQeRgYTewmdKevjNA9MK4nGoukiSBF7Nvm5NLgOK2XE7sJXUOLm/KCbG5aW2h1KGoR9m1z4jP+RJ7IGlrciMC92mRuUTQRpIi925z+JnQJfPk+Me3ltTN97N3m1CZzSebGtYU4VyZ+E7rZJnMF2mRuMTQRpIiygmxuqSiioTVxB/TeOd/P2JRXp40mobQ0Yd82J6+f7WNiOjGb0F0eGuf9S9pkbik0EaSQetcqTl4a5vLQuNWhhNXQ6m8yd9vGEqtDUUtQ73IyNuXlnQRtQneo1X/bSk80Fk8TQQoJngklYhWoz2c41OLmY5u1yVyyum1jCfnZGQk7O62hxU1VaR4by7TJ3GJpIkghm8rzqS7NS8gD9cSlIXpHJvWyPYllZ6Tzsc1lHGpNvCZ0IxPTvHP+KvUubTK3FJoIUky9y0ljez/DCdaErqHlymwVtEpe9S4nfSOTHO8etDqUD3ntTB/TXl37eqk0EaSYepeTaa/htbbEWrOhocXNrZXFFOVqk7lkds8WfxO6RJs91NDipiQvi53rtcncUmgiSDG3rC+mJC8roQ7UC/2jnHFf0yKyFFCYm0ldlSOh9q9pr49XTvfOdkpVi6eJIMUEb7+80tbLtNdndTjAB7UN9dv0sj0V7Nvm5GzvNTqvjlodCgCHOzwMT8zoIkdR0ESQgupdTkYmZmhqT4wmdAdb3GxxFiRF50p1fcH78IlyVRBsMndXjTaZWypNBCnorpoycjLTEmKNAs/oFM2dHh3ESyGJ1IQu2GTurhptMhcNTQQpaEVWOnduKuNQa6/lTeheOd2Lz+jaA6lmv8tJ8wUPHoub0LVeHuHSoDaZi5YmghS13+VvQtdyedjSOBpa3DhXZnOjNplLKfWuVQnRhG62ydxWTQTRiCoRiIhDRBpE5Gzg+0fmbonIFhE5FvI1LCJfCLz2JyJyKeS1B6KJR33g3m3lljehm5j28vrZPvZpk7mUs33tSlYX5lh++7Gh9Qo71xdTVpBtaRzJLtorgieBl4wxNcBLgccfYoxpM8bsMMbsAHYBY8AvQt7yF8HXjTEH5m6vlqY0P5td661da/bt81cZm/LqZXsKEgk0oTtz1bImdD2D45y8NKz7VwxEmwgeAp4J/PwM8MnrvH8vcN4YcyHKz1URqHc5OdUzzKVBa5rQNbS4yc/O0CZzKare5WR82stb565a8vmHWnXt61iJNhE4jTGXAQLfr9c/4GHgJ3Oee0JETojI98PdWgoSkcdEpFlEmvv6EqtqNlFZ2YTO5zMcau3lY5vLyM7QJnOpaE+1vwmdVVedDS1uqsu0yVwsXDcRiMghETkZ5uuhxXyQiGQBvw78z5CnvwdsBHYAl4Fvzbe9MeZpY0ytMaa2rKxsMR9tW9Vl+VSX5VlyoB7rHqRvZJJ9Lu0tlKqyMtL42Bb/7LTlbkI3PDFNY3u/FinGyHUTgTFmnzFme5ivZwG3iKwGCHxfaArB/cC7xpjZv0rGGLcxxmuM8QF/B+yO7j9HzRVsQjc0vrxN6Bpa3KSnCfds0USQyva7nFy9Nsl7XYPL+rmvtmmTuViK9tbQc8CjgZ8fBZ5d4L2PMOe2UDCJBHwKOBllPGqO/S4nMz7Dq23LO82vocXN7kqHNplLcR/fUk6GBU3ogk3mbtEmczERbSL4OlAvImeB+sBjRGSNiMzOABKR3MDrP5+z/TdE5H0ROQHcA3wxynjUHDsqiinNz5pdvWk5dFwd5VzvNT1bs4HCFZnUVTtmB26Xw9SMj1fbetm7TZvMxUpUNdnGmH78M4HmPt8DPBDyeAz4yNQRY8yno/l8dX3pacLerU4OvH+ZqRkfWRnxryEMDk5rIrCH+m1O/uSXLXRcHaWqNC/un3e4w8PIxIx2s40hrSy2gXqXk5HJGZo6lmet2YYWN1tXFVDh0CZzdrBvtgnd8hSXNbRcISczjTs3aZO5WNFEYAN31pSyIjN9We7jekanaL7g0QXEbWRdcS6u1SuXZf/6oMlcGSuydFpyrGgisIGczHTuqinlUIs77k3oXmp1B5rM6WW7ndS7nBy9MED/tcm4fs6pnmF6hib0tmOMaSKwiXqXk56hCU71xLcJXUOLm1Urc9i+dmVcP0cllnqXE5+Bl+LchC7YZG6vrn0dU5oIbOLereWkiX+RmHiZmPbyxtmr7HOVI6KzOezkhjUrWVOYE/fbQw0tbnatL6YkX5vMxZImApsoyc9m14b4NqF78+xVxqe9elvIhkSEfS4nb5ztY3wqPk3ougfGaLmsTebiQROBjdS7nLReHqZ7YCwuv/9Qq7/J3J5qR1x+v0ps9S4nE9O+uDWheylQC6OJIPY0EdhI8Ew9Hk3oZpvMbdEmc3ZVV1VCQRyb0DW0uNlYlke1NpmLOU0ENlJVmsem8nwa4lAF+l7XIFevTeq0URvLykjj41vLeem0G2+Mm9ANjQeazOltx7jQ1Z5tpt7l5G9fO0/9t1+L6e8dHJ8mI034uDaZs7V6l5NfHu9h37dfIyOG7R/Gp73M+LTJXLxoIrCZ39i9np7Bcaa9vpj/7p3riylckRnz36uSR/02Jw/fWsHwROy73e53reKWiqKY/14FEu8Co3iora01zc3NVoehlFJJRUSOGmNq5z6vYwRKKWVzmgiUUsrmNBEopZTNaSJQSimb00SglFI2p4lAKaVsThOBUkrZnCYCpZSyuaQsKBORPuDCEjcvBeLTHjE+kineZIoVkiveZIoVkiveZIoVoot3gzGmbO6TSZkIoiEizeEq6xJVMsWbTLFCcsWbTLFCcsWbTLFCfOLVW0NKKWVzmgiUUsrm7JgInrY6gEVKpniTKVZIrniTKVZIrniTKVaIQ7y2GyNQSin1YXa8IlBKKRVCE4FSStmcrRKBiNwnIm0ick5EnrQ6nvmISIWIvCIirSJySkQ+b3VM1yMi6SLynoj8q9WxXI+IFInIP4vI6cC/8W1Wx7QQEfliYD84KSI/EZEcq2MKEpHvi0iviJwMec4hIg0icjbwvdjKGEPNE+83A/vCCRH5hYgUWRjirHCxhrz2JRExIlIai8+yTSIQkXTgO8D9gAt4RERc1kY1rxng94wx24A9wOcSONagzwOtVgcRob8CXjDGbAVuJoHjFpG1wH8Aao0x24F04GFro/qQHwL3zXnuSeAlY0wN8FLgcaL4IR+NtwHYboy5CTgDfHm5g5rHD/lorIhIBVAPXIzVB9kmEQC7gXPGmHZjzBTwU+Ahi2MKyxhz2RjzbuDnEfx/qNZaG9X8RGQd8CDw91bHcj0ishK4G/gHAGPMlDFm0NKgri8DWCEiGUAu0GNxPLOMMa8DnjlPPwQ8E/j5GeCTyxnTQsLFa4w5aIyZCTxsBNYte2BhzPNvC/AXwB8AMZvpY6dEsBboCnncTQL/cQ0SkUrgFqDJ4lAW8pf4d0yfxXFEohroA34QuJX19yKSZ3VQ8zHGXAL+HP/Z32VgyBhz0NqorstpjLkM/pMaoNzieBbjt4DnrQ5iPiLy68AlY8zxWP5eOyUCCfNcQs+dFZF84F+ALxhjhq2OJxwR+TWg1xhz1OpYIpQB7AS+Z4y5BRglsW5dfEjg/vpDQBWwBsgTkf/D2qhSk4h8Bf9t2R9ZHUs4IpILfAX441j/bjslgm6gIuTxOhLoEnsuEcnEnwR+ZIz5udXxLOAO4NdFpBP/7bZ7ReSfrA1pQd1AtzEmeIX1z/gTQ6LaB3QYY/qMMdPAz4HbLY7petwishog8L3X4niuS0QeBX4N+E2TuMVVG/GfEBwPHG/rgHdFZFW0v9hOieAIUCMiVSKShX/A7TmLYwpLRAT/PexWY8y3rY5nIcaYLxtj1hljKvH/m75sjEnYM1ZjzBWgS0S2BJ7aC7RYGNL1XAT2iEhuYL/YSwIPbgc8Bzwa+PlR4FkLY7kuEbkP+EPg140xY1bHMx9jzPvGmHJjTGXgeOsGdgb26ajYJhEEBoOeAF7EfyD9zBhzytqo5nUH8Gn8Z9fHAl8PWB1UCvld4EcicgLYAXzN2nDmF7hy+WfgXeB9/MdswrREEJGfAO8AW0SkW0Q+C3wdqBeRs/hnt3zdyhhDzRPv3wAFQEPgWHvK0iAD5ok1Pp+VuFdBSimlloNtrgiUUkqFp4lAKaVsThOBUkrZnCYCpZSyOU0ESillc5oIlFLK5jQRKKWUzf3/2hhCFcpdTIwAAAAASUVORK5CYII=\n",
90 | "text/plain": [
91 | ""
92 | ]
93 | },
94 | "metadata": {
95 | "needs_background": "light"
96 | },
97 | "output_type": "display_data"
98 | },
99 | {
100 | "data": {
101 | "text/plain": [
102 | "[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]"
103 | ]
104 | },
105 | "execution_count": 15,
106 | "metadata": {},
107 | "output_type": "execute_result"
108 | }
109 | ],
110 | "source": [
111 | "\n",
112 | "import numpy as np\n",
113 | "\n",
114 | "x = [i for i in range(15)]\n",
115 | "y = []\n",
116 | "for i in x:\n",
117 | " y.append(np.cos(i * 2 * np.pi / 7 ))\n",
118 | " print(i % 7)\n",
119 | " \n",
120 | "fig = plt.figure()\n",
121 | "plt.plot(x, y)\n",
122 | "plt.show()\n",
123 | "x\n",
124 | " "
125 | ]
126 | },
127 | {
128 | "cell_type": "code",
129 | "execution_count": 3,
130 | "id": "615d6751-27f3-4560-a5cc-031b0ec512fb",
131 | "metadata": {},
132 | "outputs": [
133 | {
134 | "data": {
135 | "text/plain": [
136 | "0.10578089509666766"
137 | ]
138 | },
139 | "execution_count": 3,
140 | "metadata": {},
141 | "output_type": "execute_result"
142 | }
143 | ],
144 | "source": [
145 | "import random\n",
146 | "\n",
147 | "random.random()"
148 | ]
149 | },
150 | {
151 | "cell_type": "code",
152 | "execution_count": null,
153 | "id": "2e570e4d-31ec-40dd-9330-e28816097010",
154 | "metadata": {},
155 | "outputs": [],
156 | "source": []
157 | }
158 | ],
159 | "metadata": {
160 | "kernelspec": {
161 | "display_name": "Python 3",
162 | "language": "python",
163 | "name": "python3"
164 | },
165 | "language_info": {
166 | "codemirror_mode": {
167 | "name": "ipython",
168 | "version": 3
169 | },
170 | "file_extension": ".py",
171 | "mimetype": "text/x-python",
172 | "name": "python",
173 | "nbconvert_exporter": "python",
174 | "pygments_lexer": "ipython3",
175 | "version": "3.8.5"
176 | }
177 | },
178 | "nbformat": 4,
179 | "nbformat_minor": 5
180 | }
181 |
--------------------------------------------------------------------------------