├── .gitignore ├── DSANet ├── README.md ├── batch_jobs │ ├── b_ele_168_12.sh │ ├── b_ele_168_24.sh │ ├── b_ele_168_3.sh │ ├── b_ele_168_36.sh │ ├── b_ele_168_36_c.sh │ ├── b_ele_168_6.sh │ ├── b_pow_168_12.sh │ ├── b_pow_168_24.sh │ ├── b_pow_168_3.sh │ ├── b_pow_168_36.sh │ ├── b_pow_168_36_c.sh │ ├── b_pow_168_36_load.sh │ ├── b_pow_168_36_load_c.sh │ ├── b_pow_168_36_price.sh │ ├── b_pow_168_36_price_c.sh │ ├── b_pow_168_36_solar.sh │ ├── b_pow_168_36_solar_c.sh │ ├── b_pow_168_36_wind.sh │ ├── b_pow_168_36_wind_c.sh │ ├── b_pow_168_6.sh │ ├── batch_job_DSANet.sh │ ├── batch_job_DSANet_128_12.sh │ ├── batch_job_DSANet_128_12_test.sh │ ├── batch_job_DSANet_128_24.sh │ ├── batch_job_DSANet_128_24_test.sh │ ├── batch_job_DSANet_128_3.sh │ ├── batch_job_DSANet_128_36.sh │ ├── batch_job_DSANet_128_36_test.sh │ ├── batch_job_DSANet_128_3_test.sh │ ├── batch_job_DSANet_128_6.sh │ ├── batch_job_DSANet_128_6_test.sh │ ├── batch_job_DSANet_power_128_12.sh │ ├── batch_job_DSANet_power_128_12_test.sh │ ├── batch_job_DSANet_power_128_24.sh │ ├── batch_job_DSANet_power_128_24_test.sh │ ├── batch_job_DSANet_power_128_3.sh │ ├── batch_job_DSANet_power_128_36.sh │ ├── batch_job_DSANet_power_128_36_test.sh │ ├── batch_job_DSANet_power_128_3_test.sh │ ├── batch_job_DSANet_power_128_6.sh │ ├── batch_job_DSANet_power_128_6_test.sh │ ├── batch_job_DSANet_power_168_12.sh │ ├── batch_job_DSANet_power_168_12_test.sh │ ├── batch_job_DSANet_power_168_24.sh │ ├── batch_job_DSANet_power_168_24_test.sh │ ├── batch_job_DSANet_power_168_2_test.sh │ ├── batch_job_DSANet_power_168_3.sh │ ├── batch_job_DSANet_power_168_36.sh │ ├── batch_job_DSANet_power_168_36_test.sh │ ├── batch_job_DSANet_power_168_6.sh │ ├── batch_job_DSANet_power_168_6_test.sh │ ├── batch_job__hopt_DSANet_36.sh │ ├── batch_job_hopt_DSANet_power_36.sh │ ├── batch_job_hopt_DSANet_power_36_t.sh │ ├── bj_ele_168_12.sh │ ├── bj_ele_168_24.sh │ ├── bj_ele_168_3.sh │ ├── bj_ele_168_36.sh │ ├── bj_ele_168_6.sh │ ├── bj_hopt_ele_36.sh │ ├── bj_hopt_power_36.sh │ ├── bj_pow_168_36.sh │ ├── hopt_batch_job_DSANet_power_168_36_full.sh │ ├── opt_b_ele_168_36.sh │ ├── opt_b_pow_168_36.sh │ ├── opt_b_pow_168_36_price.sh │ ├── opt_b_pow_168_36_solar.sh │ ├── opt_b_pow_168_36_wind.sh │ ├── test_b_pow_168_36.sh │ ├── test_b_pow_168_36_load.sh │ ├── test_b_pow_168_36_price.sh │ ├── test_b_pow_168_36_solar.sh │ ├── test_b_pow_168_36_wind.sh │ └── test_runs.sh ├── data │ └── README.md ├── dataset.py ├── datautil.py ├── docs │ ├── DSANet-model-structure.png │ ├── ablation_CORR.png │ ├── ablation_MAE.png │ ├── ablation_RRSE.png │ ├── exp_results_window_128.png │ ├── exp_results_window_32.png │ └── exp_results_window_64.png ├── dsanet │ ├── Layers.py │ ├── Modules.py │ ├── SubLayers.py │ └── __init__.py ├── hopt_gpu_trainer.py ├── hopt_gpu_trainer_electricity.py ├── hopt_gpu_trainer_power.py ├── hopt_gpu_trainer_power_full.py ├── model.py ├── new_hopt_gpu_trainer_electricity.py ├── new_hopt_gpu_trainer_power.py ├── requirements.txt ├── requirements_old.txt ├── single_cpu_trainer.py ├── single_gpu_trainer.py ├── single_gpu_trainer_electricity.py ├── single_gpu_trainer_power.py └── test_electricity.py ├── DeepAR ├── LICENSE ├── README.md ├── batch jobs │ ├── batch_job_hpo_elect_36.sh │ ├── batch_job_hpo_power36.sh │ ├── batch_train12.sh │ ├── batch_train24.sh │ ├── batch_train3.sh │ ├── batch_train36.sh │ ├── batch_train6.sh │ ├── batch_train_power12.sh │ ├── batch_train_power24.sh │ ├── batch_train_power3.sh │ ├── batch_train_power36.sh │ ├── batch_train_power6.sh │ ├── batch_train_power_load.sh │ ├── batch_train_power_non_exogeneous.sh │ ├── batch_train_power_price.sh │ ├── batch_train_power_solar.sh │ └── batch_train_power_wind.sh ├── conda_env.yml ├── dataloader.py ├── evaluate.py ├── experiments │ ├── base_model │ │ ├── best.pth.tar │ │ ├── metrics_test_best_weights.json │ │ └── params.json │ └── param_search │ │ └── params.json ├── model │ ├── LSTM.py │ └── net.py ├── preprocess_elect.py ├── preprocess_power_system.py ├── pytorchtools.py ├── requirements.txt ├── search_hyperparams_custom.py ├── train.py └── utils.py ├── DeepTCN ├── README.md ├── batch_jobs │ ├── batch_job_tcn_elect_12.sh │ ├── batch_job_tcn_elect_24.sh │ ├── batch_job_tcn_elect_3.sh │ ├── batch_job_tcn_elect_6.sh │ ├── batch_job_tcn_elect_hop_36_evaluate.sh │ ├── batch_job_tcn_hop.sh │ ├── batch_job_tcn_hop_elect_36.sh │ ├── batch_job_tcn_hop_power_36.sh │ ├── batch_job_tcn_power_12.sh │ ├── batch_job_tcn_power_24.sh │ ├── batch_job_tcn_power_3.sh │ ├── batch_job_tcn_power_36.sh │ ├── batch_job_tcn_power_36_load.sh │ ├── batch_job_tcn_power_36_nonexogeneous.sh │ ├── batch_job_tcn_power_36_price.sh │ ├── batch_job_tcn_power_36_solar.sh │ ├── batch_job_tcn_power_36_wind.sh │ ├── batch_job_tcn_power_6.sh │ └── batch_job_tcn_power_hop_36_evaluate.sh └── electricity │ ├── NewTCNQuantile │ ├── ec_feature_preprocess.ipynb │ ├── ec_feature_preprocess.py │ ├── ec_feature_preprocess_nonexogeneous.py │ ├── ec_probabilistic_forecasting.py │ ├── ec_probabilistic_forecasting_evaluate.py │ ├── ec_probabilistic_hpo.py │ ├── nnHelper.py │ ├── nnModels.py │ ├── nnTrainer.py │ └── pytorchtools.py │ ├── basicPreprocess.R │ └── readme.md ├── FFNN └── gluonts_benchmarks.py ├── LICENSE ├── LSTNet ├── README.md ├── batch_jobs │ ├── batch_job_electricity_train_test_save_evaluate.sh │ ├── batch_job_lstnet_hpo_elect.sh │ ├── batch_job_lstnet_hpo_power.sh │ ├── batch_job_power_system_exogeneous.sh │ ├── batch_job_power_system_fields.sh │ └── batch_job_power_system_test_train_test_evaluation.sh ├── electricity.sh ├── lstnet.ipynb ├── lstnet_datautil.py ├── lstnet_model.py ├── lstnet_plot.py ├── lstnet_util.py └── main.py ├── Metrics └── metrics.py ├── Naive └── naive benchmarks.ipynb ├── README.md ├── datasets ├── README.md ├── csv │ ├── electricity.csv.gz │ ├── europe_power_system.csv.gz │ ├── europe_power_system_exogeneous.csv.gz │ ├── europe_power_system_load.csv.gz │ ├── europe_power_system_price.csv.gz │ ├── europe_power_system_solar.csv.gz │ └── europe_power_system_wind.csv.gz ├── dataset_europe.ipynb └── txt │ ├── electricity.txt.gz │ ├── europe_power_system.txt.gz │ ├── europe_power_system_exogeneous.txt.gz │ ├── europe_power_system_load.txt.gz │ ├── europe_power_system_price.txt.gz │ ├── europe_power_system_solar.txt.gz │ └── europe_power_system_wind.txt.gz ├── results ├── coverage_test.jpg └── stat_test.jpg └── util ├── Msglog.py ├── README.md └── model_util.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | *.pyc 7 | *.pyo 8 | 9 | nohup.out 10 | .DS_Store 11 | *.npy 12 | *.csv 13 | *.ipynb_checkpoints 14 | -------------------------------------------------------------------------------- /DSANet/README.md: -------------------------------------------------------------------------------- 1 | #DSANet 2 | 3 | ## Modifications to the initial implementation by https://github.com/bighuang624/DSANet: 4 | - hyper-parameter optimization with [hyperopt](https://github.com/hyperopt/hyperopt) 5 | 6 | To run the experiment please see [batch jobs](batch jobs) folder. 7 | 8 | ------------------------------------------------------------------------ 9 | 10 | # Dual Self-Attention Network for Multivariate Time Series Forecasting 11 | 12 | This project is the PyTorch implementation of the paper "[DSANet: Dual Self-Attention Network for Multivariate Time Series Forecasting](https://dl.acm.org/citation.cfm?doid=3357384.3358132)", in which we propose a dual self-attention network (DSANet) for multivariate time series forecasting. The network architecture is illustrated in the following figure, and more details about the effect of each component can be found in the paper. 13 | 14 | ![](https://raw.githubusercontent.com/bighuang624/DSANet/master/docs/DSANet-model-structure.png) 15 | 16 | ## Requirements 17 | 18 | * Python 3.5 or above 19 | * PyTorch 1.1 or above 20 | * pytorch-lightning 21 | 22 | ## How to run 23 | 24 | You need to prepare the dataset first. Check [here](https://github.com/bighuang624/DSANet/blob/master/data/README.md). 25 | 26 | ```bash 27 | # clone project 28 | git clone https://github.com/bighuang624/DSANet.git 29 | 30 | # install dependencies 31 | cd DSANet 32 | pip install requirements.txt 33 | 34 | # run 35 | python single_cpu_trainer.py --data_name {data_name} --n_multiv {n_multiv} 36 | ``` 37 | 38 | **Notice:** At present, we find that there are some bugs (presumably some problems left by the old version of pytorch-lightning) that make our code unable to run correctly on GPUs. You can currently run the code on the CPU as above. 39 | 40 | ## Citation 41 | 42 | If our code is helpful for your research, please cite our paper: 43 | 44 | ``` 45 | @inproceedings{Huang2019DSANet, 46 | author = {Huang, Siteng and Wang, Donglin and Wu, Xuehan and Tang, Ao}, 47 | title = {DSANet: Dual Self-Attention Network for Multivariate Time Series Forecasting}, 48 | booktitle = {The 28th ACM International Conference on Information and Knowledge Management (CIKM 2019)}, 49 | month = {November}, 50 | year = {2019}, 51 | address = {Beijing, China} 52 | } 53 | ``` 54 | 55 | ## Acknowledgement 56 | 57 | Part of the code is heavily borrowed from [jadore801120/attention-is-all-you-need-pytorch](https://github.com/jadore801120/attention-is-all-you-need-pytorch). 58 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/b_ele_168_12.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsae168_12 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=64G 8 | #SBATCH --time=15:00:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_ele_168_12.txt 11 | #SBATCH --error=je_ele_168_12.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer.py --data_name electricity --window 168 --horizon 12 --calendar False --batch_size 32 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | 28 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/b_ele_168_24.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsae168_24 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=32G 8 | #SBATCH --time=15:00:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_ele_168_24.txt 11 | #SBATCH --error=je_ele_168_24.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer.py --data_name electricity --window 168 --horizon 24 --calendar False --batch_size 32 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | 28 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/b_ele_168_3.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsae168_3 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=64G 8 | #SBATCH --time=15:00:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_ele_168_3.txt 11 | #SBATCH --error=je_ele_168_3.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer.py --data_name electricity --window 168 --horizon 3 --calendar False --batch_size 32 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | 28 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/b_ele_168_36.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsae168_36 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=64G 8 | #SBATCH --time=15:00:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_ele_168_36.txt 11 | #SBATCH --error=je_ele_168_36.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer.py --data_name electricity --window 168 --horizon 36 --calendar False --batch_size 32 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | 28 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/b_ele_168_36_c.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsae168_36c 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=64G 8 | #SBATCH --time=15:00:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_ele_168_36_c.txt 11 | #SBATCH --error=je_ele_168_36_c.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer.py --data_name electricity --window 168 --horizon 36 --calendar True --batch_size 32 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | 28 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/b_ele_168_6.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsae168_6 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=32G 8 | #SBATCH --time=15:00:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_ele_168_6.txt 11 | #SBATCH --error=je_ele_168_6.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer.py --data_name electricity --window 168 --horizon 6 --calendar False --batch_size 32 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | 28 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/b_pow_168_12.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap168_12 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=64G 8 | #SBATCH --time=15:00:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_pow_168_12.txt 11 | #SBATCH --error=je_pow_168_12.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer.py --data_name europe_power_system --window 168 --horizon 12 --powerset all --calendar False --batch_size 32 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | 28 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/b_pow_168_24.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap168_24 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=64G 8 | #SBATCH --time=15:00:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_pow_168_24.txt 11 | #SBATCH --error=je_pow_168_24.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer.py --data_name europe_power_system --window 168 --horizon 24 --powerset all --calendar False --batch_size 32 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | 28 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/b_pow_168_3.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap168_36 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=32G 8 | #SBATCH --time=15:00:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_pow_168_3.txt 11 | #SBATCH --error=je_pow_168_3.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer.py --data_name europe_power_system --window 168 --horizon 3 --powerset all --calendar False --batch_size 32 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | 28 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/b_pow_168_36.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap168_36 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=64G 8 | #SBATCH --time=15:00:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_pow_168_36.txt 11 | #SBATCH --error=je_pow_168_36.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer.py --data_name europe_power_system --window 168 --horizon 36 --powerset all --calendar False --batch_size 32 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | 28 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/b_pow_168_36_c.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap168_36c 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=64G 8 | #SBATCH --time=15:00:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_pow_168_36_c.txt 11 | #SBATCH --error=je_pow_168_36_c.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer.py --data_name europe_power_system --window 168 --horizon 36 --powerset all --calendar True --batch_size 32 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | 28 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/b_pow_168_36_load.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap16836l 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=64G 8 | #SBATCH --time=15:00:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_pow_168_36_load.txt 11 | #SBATCH --error=je_pow_168_36_load.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer.py --data_name europe_power_system --window 168 --horizon 36 --learning_rate 0.001 --local 5 --d_model 100 --drop_prob 0.2 --powerset load --calendar False --batch_size 32 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/b_pow_168_36_load_c.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap16836l 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=64G 8 | #SBATCH --time=15:00:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_pow_168_36_load_c.txt 11 | #SBATCH --error=je_pow_168_36_load_c.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer.py --data_name europe_power_system --window 168 --horizon 36 --powerset load --calendar True --batch_size 32 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | 28 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/b_pow_168_36_price.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap16836p 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=64G 8 | #SBATCH --time=15:00:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_pow_168_36_price.txt 11 | #SBATCH --error=je_pow_168_36_price.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer.py --data_name europe_power_system --window 168 --horizon 36 --powerset price --calendar False --batch_size 32 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | 28 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/b_pow_168_36_price_c.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap16836p 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=64G 8 | #SBATCH --time=15:00:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_pow_168_36_price_c.txt 11 | #SBATCH --error=je_pow_168_36_price_c.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer.py --data_name europe_power_system --window 168 --horizon 36 --powerset price --calendar True --batch_size 32 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | 28 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/b_pow_168_36_solar.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap16836s 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=64G 8 | #SBATCH --time=15:00:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_pow_168_36_solar.txt 11 | #SBATCH --error=je_pow_168_36_solar.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer.py --data_name europe_power_system --window 168 --horizon 36 --powerset solar --calendar False --batch_size 32 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | 28 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/b_pow_168_36_solar_c.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap16836s 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=64G 8 | #SBATCH --time=15:00:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_pow_168_36_solar_c.txt 11 | #SBATCH --error=je_pow_168_36_solar_c.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer.py --data_name europe_power_system --window 168 --horizon 36 --powerset solar --calendar True --batch_size 32 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | 28 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/b_pow_168_36_wind.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap16836w 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=64G 8 | #SBATCH --time=15:00:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_pow_168_36_wind.txt 11 | #SBATCH --error=je_pow_168_36_wind.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer.py --data_name europe_power_system --window 168 --horizon 36 --powerset wind --calendar False --batch_size 32 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | 28 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/b_pow_168_36_wind_c.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap16836w 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=64G 8 | #SBATCH --time=15:00:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_pow_168_36_wind_c.txt 11 | #SBATCH --error=je_pow_168_36_wind_c.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer.py --data_name europe_power_system --window 168 --horizon 36 --powerset wind --calendar True --batch_size 32 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | 28 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/b_pow_168_6.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap168_6 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=64G 8 | #SBATCH --time=15:00:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_pow_168_6.txt 11 | #SBATCH --error=je_pow_168_6.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer.py --data_name europe_power_system --window 168 --horizon 6 --powerset all --calendar False --batch_size 32 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | 28 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/batch_job_DSANet.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsanet 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=8G 8 | #SBATCH --time=0:10:00 9 | #SBATCH --gres=gpu:v100:1,nvme:32 10 | #SBATCH --output=job_out_ep.txt 11 | #SBATCH --error=job_err_ep.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.2.0 15 | module list 16 | 17 | #cd DeepAR 18 | 19 | #cd ./DSANet-master/ 20 | 21 | echo "Installing requirements ..." 22 | pip install -r 'requirements.txt' --user -q --no-cache-dir 23 | echo "Requirements installed." 24 | 25 | echo "Loading data ... " 26 | #srun python3 ec_feature_preprocess_custom.py 27 | echo "Data loaded." 28 | 29 | echo "Start running ... " 30 | srun python3 single_cpu_trainer.py --data_name electricity --n_multiv 321 --window 128 --horizon 36 31 | echo "Finished running!" 32 | 33 | seff $SLURM_JOBID 34 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/batch_job_DSANet_128_12.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsanet_128_12 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=12G 8 | #SBATCH --time=15:50:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_ep_electricity_128_12.txt 11 | #SBATCH --error=job_err_ep_electricity_128_12.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.2.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements_old.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_cpu_trainer.py --data_name electricity --n_multiv 321 --window 128 --horizon 12 --batch_size 64 --split_train 0.6003649635036497 --split_validation 0.19981751824817517 --split_test 0.19981751824817517 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/batch_job_DSANet_128_12_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsat128_12 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=8G 8 | #SBATCH --time=0:15:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_ep_electricity_128_12_test.txt 11 | #SBATCH --error=job_err_ep_electricity_128_12_test.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.2.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_cpu_trainer.py --data_name electricity --n_multiv 321 --window 128 --horizon 12 --batch_size 64 --test_only True --split_train 0.6003649635036497 --split_validation 0.19981751824817517 --split_test 0.19981751824817517 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/batch_job_DSANet_128_24.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsanet_128_24 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=12G 8 | #SBATCH --time=15:50:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_ep_electricity_128_24.txt 11 | #SBATCH --error=job_err_ep_electricity_128_24.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.2.0 15 | module list 16 | 17 | echo "Installing requirements ..." 18 | pip install -r 'requirements.txt' --user -q --no-cache-dir 19 | echo "Requirements installed." 20 | 21 | echo "Start running ... " 22 | srun python3 single_cpu_trainer.py --data_name electricity --n_multiv 321 --window 128 --horizon 24 --batch_size 64 --split_train 0.6003649635036497 --split_validation 0.19981751824817517 --split_test 0.19981751824817517 23 | echo "Finished running!" 24 | 25 | seff $SLURM_JOBID 26 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/batch_job_DSANet_128_24_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsat128_24 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=8G 8 | #SBATCH --time=0:15:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_ep_electricity_128_24_test.txt 11 | #SBATCH --error=job_err_ep_electricity_128_24_test.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.2.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_cpu_trainer.py --data_name electricity --n_multiv 321 --window 128 --horizon 24 --batch_size 64 --test_only True --split_train 0.6003649635036497 --split_validation 0.19981751824817517 --split_test 0.19981751824817517 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/batch_job_DSANet_128_3.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsae128_3 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=12G 8 | #SBATCH --time=5:00:00 9 | #SBATCH --gres=gpu:v100:4 10 | #SBATCH --output=jo_ele_128_3.txt 11 | #SBATCH --error=je_ele_128_3.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.2.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer_electricity.py --data_name electricity --n_multiv 321 --window 128 --horizon 3 --batch_size 64 --split_train 0.6003649635036497 --split_validation 0.19981751824817517 --split_test 0.19981751824817517 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/batch_job_DSANet_128_36.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsanet_128_36 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=12G 8 | #SBATCH --time=15:50:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_ep_electricity_128_36.txt 11 | #SBATCH --error=job_err_ep_electricity_128_36.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.2.0 15 | module list 16 | 17 | echo "Installing requirements ..." 18 | pip install -r 'requirements.txt' --user -q --no-cache-dir 19 | echo "Requirements installed." 20 | 21 | echo "Start running ... " 22 | srun python3 single_cpu_trainer.py --data_name electricity --n_multiv 321 --window 128 --horizon 36 --batch_size 64 --split_train 0.6003649635036497 --split_validation 0.19981751824817517 --split_test 0.19981751824817517 23 | echo "Finished running!" 24 | 25 | seff $SLURM_JOBID 26 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/batch_job_DSANet_128_36_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsat128_36 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=8G 8 | #SBATCH --time=0:15:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_ep_electricity_128_36_test.txt 11 | #SBATCH --error=job_err_ep_electricity_128_36_test.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.2.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_cpu_trainer.py --data_name electricity --n_multiv 321 --window 128 --horizon 36 --batch_size 64 --test_only True --split_train 0.6003649635036497 --split_validation 0.19981751824817517 --split_test 0.19981751824817517 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/batch_job_DSANet_128_3_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsat128_3 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=8G 8 | #SBATCH --time=0:15:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_ep_electricity_128_3_test.txt 11 | #SBATCH --error=job_err_ep_electricity_128_3_test.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.2.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_cpu_trainer.py --data_name electricity --n_multiv 321 --window 128 --horizon 3 --batch_size 64 --test_only True --split_train 0.6003649635036497 --split_validation 0.19981751824817517 --split_test 0.19981751824817517 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/batch_job_DSANet_128_6.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsa128_6 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=12G 8 | #SBATCH --time=15:50:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_ep_electricity_128_6.txt 11 | #SBATCH --error=job_err_ep_electricity_128_6.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.2.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_cpu_trainer.py --data_name electricity --n_multiv 321 --window 128 --horizon 6 --batch_size 64 --split_train 0.6003649635036497 --split_validation 0.19981751824817517 --split_test 0.19981751824817517 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/batch_job_DSANet_128_6_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsat128_3 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=8G 8 | #SBATCH --time=0:15:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_ep_electricity_128_6_test.txt 11 | #SBATCH --error=job_err_ep_electricity_128_6_test.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.2.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_cpu_trainer.py --data_name electricity --n_multiv 321 --window 128 --horizon 6 --batch_size 64 --test_only True --split_train 0.6003649635036497 --split_validation 0.19981751824817517 --split_test 0.19981751824817517 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/batch_job_DSANet_power_128_12.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap_128_12 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=12G 8 | #SBATCH --time=15:50:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_ep_power_128_12.txt 11 | #SBATCH --error=job_err_ep_power_128_12.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.2.0 15 | module list 16 | 17 | echo "Installing requirements ..." 18 | pip install -r 'requirements.txt' --user -q --no-cache-dir 19 | echo "Requirements installed." 20 | 21 | echo "Start running ... " 22 | srun python3 single_cpu_trainer.py --data_name europe_power_system --n_multiv 183 --window 128 --horizon 12 --batch_size 64 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 23 | echo "Finished running!" 24 | 25 | seff $SLURM_JOBID 26 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/batch_job_DSANet_power_128_12_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap_128_12 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=8G 8 | #SBATCH --time=1:00:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_ep_power_128_12_test.txt 11 | #SBATCH --error=job_err_ep_power_128_12_test.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.2.0 15 | module list 16 | 17 | echo "Installing requirements ..." 18 | pip install -r 'requirements.txt' --user -q --no-cache-dir 19 | echo "Requirements installed." 20 | 21 | echo "Start running ... " 22 | srun python3 single_cpu_trainer.py --data_name europe_power_system --n_multiv 185 --window 128 --horizon 12 --batch_size 64 --test_only True --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 23 | echo "Finished running!" 24 | 25 | seff $SLURM_JOBID 26 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/batch_job_DSANet_power_128_24.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap_128_24 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=12G 8 | #SBATCH --time=15:50:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_ep_power_128_24.txt 11 | #SBATCH --error=job_err_ep_power_128_24.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.2.0 15 | module list 16 | 17 | echo "Installing requirements ..." 18 | pip install -r 'requirements.txt' --user -q --no-cache-dir 19 | echo "Requirements installed." 20 | 21 | echo "Start running ... " 22 | srun python3 single_cpu_trainer.py --data_name europe_power_system --n_multiv 183 --window 128 --horizon 24 --batch_size 64 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 23 | echo "Finished running!" 24 | 25 | seff $SLURM_JOBID 26 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/batch_job_DSANet_power_128_24_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap_128_36 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=8G 8 | #SBATCH --time=1:00:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_ep_power_128_24_test.txt 11 | #SBATCH --error=job_err_ep_power_128_24_test.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.2.0 15 | module list 16 | 17 | echo "Installing requirements ..." 18 | pip install -r 'requirements.txt' --user -q --no-cache-dir 19 | echo "Requirements installed." 20 | 21 | echo "Start running ... " 22 | srun python3 single_cpu_trainer.py --data_name europe_power_system --n_multiv 185 --window 128 --horizon 24 --batch_size 64 --test_only True --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 23 | echo "Finished running!" 24 | 25 | seff $SLURM_JOBID 26 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/batch_job_DSANet_power_128_3.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap_128_3 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=12G 8 | #SBATCH --time=15:50:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_ep_power_128_3.txt 11 | #SBATCH --error=job_err_ep_power_128_3.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.2.0 15 | module list 16 | 17 | echo "Installing requirements ..." 18 | pip install -r 'requirements.txt' --user -q --no-cache-dir 19 | echo "Requirements installed." 20 | 21 | echo "Start running ... " 22 | srun python3 single_cpu_trainer.py --data_name europe_power_system --n_multiv 183 --window 128 --horizon 3 --batch_size 64 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 23 | echo "Finished running!" 24 | 25 | seff $SLURM_JOBID 26 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/batch_job_DSANet_power_128_36.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap_128_36 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=12G 8 | #SBATCH --time=15:50:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_ep_power_128_36.txt 11 | #SBATCH --error=job_err_ep_power_128_36.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.2.0 15 | module list 16 | 17 | echo "Installing requirements ..." 18 | pip install -r 'requirements.txt' --user -q --no-cache-dir 19 | echo "Requirements installed." 20 | 21 | echo "Start running ... " 22 | srun python3 single_cpu_trainer.py --data_name europe_power_system --n_multiv 183 --window 128 --horizon 36 --batch_size 64 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 23 | echo "Finished running!" 24 | 25 | seff $SLURM_JOBID 26 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/batch_job_DSANet_power_128_36_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dspt12836 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=8G 8 | #SBATCH --time=2:00:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_ep_power_128_36_test.txt 11 | #SBATCH --error=job_err_ep_power_128_36_test.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.2.0 15 | module list 16 | 17 | echo "Installing requirements ..." 18 | pip install -r 'requirements.txt' --user -q --no-cache-dir 19 | echo "Requirements installed." 20 | 21 | echo "Start running ... " 22 | srun python3 single_cpu_trainer.py --data_name europe_power_system --n_multiv 185 --window 128 --horizon 36 --batch_size 64 --test_only True --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 23 | echo "Finished running!" 24 | 25 | seff $SLURM_JOBID 26 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/batch_job_DSANet_power_128_3_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap_128_3 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=8G 8 | #SBATCH --time=1:00:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_ep_power_128_3_test.txt 11 | #SBATCH --error=job_err_ep_power_128_3_test.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.2.0 15 | module list 16 | 17 | echo "Installing requirements ..." 18 | pip install -r 'requirements.txt' --user -q --no-cache-dir 19 | echo "Requirements installed." 20 | 21 | echo "Start running ... " 22 | srun python3 single_cpu_trainer.py --data_name europe_power_system --n_multiv 185 --window 128 --horizon 3 --batch_size 64 --test_only True --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 23 | echo "Finished running!" 24 | 25 | seff $SLURM_JOBID 26 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/batch_job_DSANet_power_128_6.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap_128_6 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=12G 8 | #SBATCH --time=15:50:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_ep_power_128_6.txt 11 | #SBATCH --error=job_err_ep_power_128_6.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.2.0 15 | module list 16 | 17 | echo "Installing requirements ..." 18 | pip install -r 'requirements.txt' --user -q --no-cache-dir 19 | echo "Requirements installed." 20 | 21 | echo "Start running ... " 22 | srun python3 single_cpu_trainer.py --data_name europe_power_system --n_multiv 183 --window 128 --horizon 6 --batch_size 64 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 23 | echo "Finished running!" 24 | 25 | seff $SLURM_JOBID 26 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/batch_job_DSANet_power_128_6_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap_128_6 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=8G 8 | #SBATCH --time=1:00:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_ep_power_128_6_test.txt 11 | #SBATCH --error=job_err_ep_power_128_6_test.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.2.0 15 | module list 16 | 17 | echo "Installing requirements ..." 18 | pip install -r 'requirements.txt' --user -q --no-cache-dir 19 | echo "Requirements installed." 20 | 21 | echo "Start running ... " 22 | srun python3 single_cpu_trainer.py --data_name europe_power_system --n_multiv 185 --window 128 --horizon 6 --batch_size 64 --test_only True --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 23 | echo "Finished running!" 24 | 25 | seff $SLURM_JOBID 26 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/batch_job_DSANet_power_168_12.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap16812 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=16G 8 | #SBATCH --time=15:50:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_ep_power_168_12.txt 11 | #SBATCH --error=job_err_ep_power_168_12.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.2.0 15 | module list 16 | 17 | echo "Installing requirements ..." 18 | pip install -r 'requirements.txt' --user -q --no-cache-dir 19 | echo "Requirements installed." 20 | 21 | echo "Start running ... " 22 | srun python3 single_cpu_trainer.py --data_name europe_power_system --n_multiv 183 --window 168 --horizon 12 --batch_size 128 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 23 | echo "Finished running!" 24 | 25 | seff $SLURM_JOBID 26 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/batch_job_DSANet_power_168_12_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsapt16812 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=4G 8 | #SBATCH --time=0:15:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_ep_power_168_12_test.txt 11 | #SBATCH --error=job_err_ep_power_168_12_test.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.2.0 15 | module list 16 | 17 | echo "Installing requirements ..." 18 | pip install -r 'requirements.txt' --user -q --no-cache-dir 19 | echo "Requirements installed." 20 | 21 | echo "Start running ... " 22 | srun python3 single_cpu_trainer.py --data_name europe_power_system --n_multiv 183 --window 168 --horizon 12 --batch_size 128 --test_only True --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 23 | echo "Finished running!" 24 | 25 | seff $SLURM_JOBID 26 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/batch_job_DSANet_power_168_24.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap16824 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=16G 8 | #SBATCH --time=15:50:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_ep_power_168_24.txt 11 | #SBATCH --error=job_err_ep_power_168_24.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.2.0 15 | module list 16 | 17 | echo "Installing requirements ..." 18 | pip install -r 'requirements.txt' --user -q --no-cache-dir 19 | echo "Requirements installed." 20 | 21 | echo "Start running ... " 22 | srun python3 single_cpu_trainer.py --data_name europe_power_system --n_multiv 183 --window 168 --horizon 24 --batch_size 128 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 23 | echo "Finished running!" 24 | 25 | seff $SLURM_JOBID 26 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/batch_job_DSANet_power_168_24_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsapt16824 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=4G 8 | #SBATCH --time=0:15:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_ep_power_168_24_test.txt 11 | #SBATCH --error=job_err_ep_power_168_24_test.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.2.0 15 | module list 16 | 17 | echo "Installing requirements ..." 18 | pip install -r 'requirements.txt' --user -q --no-cache-dir 19 | echo "Requirements installed." 20 | 21 | echo "Start running ... " 22 | srun python3 single_cpu_trainer.py --data_name europe_power_system --n_multiv 183 --window 168 --horizon 24 --batch_size 128 --test_only True --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 23 | echo "Finished running!" 24 | 25 | seff $SLURM_JOBID 26 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/batch_job_DSANet_power_168_2_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsapt1683 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=4G 8 | #SBATCH --time=0:15:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_ep_power_168_3_test.txt 11 | #SBATCH --error=job_err_ep_power_168_3_test.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.2.0 15 | module list 16 | 17 | echo "Installing requirements ..." 18 | pip install -r 'requirements.txt' --user -q --no-cache-dir 19 | echo "Requirements installed." 20 | 21 | echo "Start running ... " 22 | srun python3 single_cpu_trainer.py --data_name europe_power_system --n_multiv 183 --window 168 --horizon 3 --batch_size 128 --test_only True --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 23 | echo "Finished running!" 24 | 25 | seff $SLURM_JOBID 26 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/batch_job_DSANet_power_168_3.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap1683 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=16G 8 | #SBATCH --time=15:50:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_ep_power_168_3.txt 11 | #SBATCH --error=job_err_ep_power_168_3.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.2.0 15 | module list 16 | 17 | echo "Installing requirements ..." 18 | pip install -r 'requirements.txt' --user -q --no-cache-dir 19 | echo "Requirements installed." 20 | 21 | echo "Start running ... " 22 | srun python3 single_cpu_trainer.py --data_name europe_power_system --n_multiv 183 --window 168 --horizon 3 --batch_size 128 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 23 | echo "Finished running!" 24 | 25 | seff $SLURM_JOBID 26 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/batch_job_DSANet_power_168_36.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap16836 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=16G 8 | #SBATCH --time=15:50:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_ep_power_168_36.txt 11 | #SBATCH --error=job_err_ep_power_168_36.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.2.0 15 | module list 16 | 17 | echo "Installing requirements ..." 18 | pip install -r 'requirements.txt' --user -q --no-cache-dir 19 | echo "Requirements installed." 20 | 21 | echo "Start running ... " 22 | srun python3 single_cpu_trainer.py --data_name europe_power_system --n_multiv 183 --window 168 --horizon 36 --batch_size 128 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 23 | echo "Finished running!" 24 | 25 | seff $SLURM_JOBID 26 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/batch_job_DSANet_power_168_36_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsapt16836 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=4G 8 | #SBATCH --time=0:15:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_ep_power_168_36_test.txt 11 | #SBATCH --error=job_err_ep_power_168_36_test.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.2.0 15 | module list 16 | 17 | echo "Installing requirements ..." 18 | pip install -r 'requirements.txt' --user -q --no-cache-dir 19 | echo "Requirements installed." 20 | 21 | echo "Start running ... " 22 | srun python3 single_cpu_trainer.py --data_name europe_power_system --n_multiv 183 --window 168 --horizon 36 --batch_size 128 --test_only True --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 23 | echo "Finished running!" 24 | 25 | seff $SLURM_JOBID 26 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/batch_job_DSANet_power_168_6.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap1686 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=16G 8 | #SBATCH --time=15:50:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_ep_power_168_6.txt 11 | #SBATCH --error=job_err_ep_power_168_6.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.2.0 15 | module list 16 | 17 | echo "Installing requirements ..." 18 | pip install -r 'requirements.txt' --user -q --no-cache-dir 19 | echo "Requirements installed." 20 | 21 | echo "Start running ... " 22 | srun python3 single_cpu_trainer.py --data_name europe_power_system --n_multiv 183 --window 168 --horizon 6 --batch_size 128 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 23 | echo "Finished running!" 24 | 25 | seff $SLURM_JOBID 26 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/batch_job_DSANet_power_168_6_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsapt1686 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=4G 8 | #SBATCH --time=0:15:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_ep_power_168_6_test.txt 11 | #SBATCH --error=job_err_ep_power_168_6_test.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.2.0 15 | module list 16 | 17 | echo "Installing requirements ..." 18 | pip install -r 'requirements.txt' --user -q --no-cache-dir 19 | echo "Requirements installed." 20 | 21 | echo "Start running ... " 22 | srun python3 single_cpu_trainer.py --data_name europe_power_system --n_multiv 183 --window 168 --horizon 6 --batch_size 128 --test_only True --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 23 | echo "Finished running!" 24 | 25 | seff $SLURM_JOBID 26 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/batch_job__hopt_DSANet_36.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsaehopt 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=32G 8 | #SBATCH --time=48:00:00 9 | #SBATCH --gres=gpu:v100:4 10 | #SBATCH --output=job_out_ep_4_electricity_168_36_hopt_f2.txt 11 | #SBATCH --error=job_err_ep_4_electricity_168_36_hopt_f2.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | echo "Installing requirements ..." 18 | pip install -r 'requirements.txt' --user -q --no-cache-dir 19 | echo "Requirements installed." 20 | export NCCL_DEBUG=INFO 21 | export PYTHONFAULTHANDLER=1 22 | 23 | echo "Start running ... " 24 | srun python3 hopt_gpu_trainer_electricity.py --data_name electricity --window 168 --n_multiv 321 --horizon 36 --split_train 0.1104014598540146 --split_validation 0.028284671532846715 --split_test 0.028284671532846715 25 | echo "Finished running!" 26 | 27 | seff $SLURM_JOBID 28 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/batch_job_hopt_DSANet_power_36.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap4hopt 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=32G 8 | #SBATCH --time=48:59:00 9 | #SBATCH --gres=gpu:v100:4 10 | #SBATCH --output=job_out_ep_4_power_168_36_hopt_f4.txt 11 | #SBATCH --error=job_err_ep_4_power_168_36_hopt_f4.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | echo "Installing requirements ..." 18 | pip install -r 'requirements.txt' --user -q --no-cache-dir 19 | #cd apex 20 | #pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./ 21 | #cd .. 22 | echo "Requirements installed." 23 | 24 | #export NCCL_DEBUG=INFO 25 | #export PYTHONFAULTHANDLER=1 26 | 27 | echo "Start running ... " 28 | srun python3 hopt_gpu_trainer_power.py --data_name europe_power_system --window 168 --n_multiv 183 --horizon 36 --split_train 0.11267605633802817 --split_validation 0.02910798122065728 --split_test 0.02910798122065728 29 | echo "Finished running!" 30 | 31 | seff $SLURM_JOBID 32 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/batch_job_hopt_DSANet_power_36_t.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsaphopt 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=6G 8 | #SBATCH --time=0:05:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=job_out_ep_power_168_36_hopt_t.txt 11 | #SBATCH --error=job_err_ep_power_168_36_hopt_t.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.2.0 15 | module list 16 | 17 | echo "Installing requirements ..." 18 | pip install -r 'requirements.txt' --user -q --no-cache-dir 19 | echo "Requirements installed." 20 | 21 | echo "Start running ... " 22 | srun python3 hopt_gpu_trainer.py --data_name europe_power_system --window 168 --n_multiv 183 --horizon 36 --split_train 0.11267605633802817 --split_validation 0.02910798122065728 --split_test 0.02910798122065728 23 | echo "Finished running!" 24 | 25 | seff $SLURM_JOBID 26 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/bj_ele_168_12.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsae168_12 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=12G 8 | #SBATCH --time=15:00:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_ele_168_12.txt 11 | #SBATCH --error=je_ele_168_12.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer_electricity.py --data_name electricity --n_multiv 321 --window 168 --horizon 12 --batch_size 64 --split_train 0.6003649635036497 --split_validation 0.19981751824817517 --split_test 0.19981751824817517 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/bj_ele_168_24.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsae168_24 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=32G 8 | #SBATCH --time=15:00:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_ele_168_24_calendar.txt 11 | #SBATCH --error=je_ele_168_24_calendar.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer_electricity.py --data_name electricity --n_multiv 327 --window 168 --horizon 24 --batch_size 64 --split_train 0.6003649635036497 --split_validation 0.19981751824817517 --split_test 0.19981751824817517 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/bj_ele_168_3.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsae168_3 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=12G 8 | #SBATCH --time=15:00:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_ele_168_3.txt 11 | #SBATCH --error=je_ele_168_3.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer_electricity.py --data_name electricity --n_multiv 321 --window 168 --horizon 3 --batch_size 64 --split_train 0.6003649635036497 --split_validation 0.19981751824817517 --split_test 0.19981751824817517 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/bj_ele_168_36.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsae168_36 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=32G 8 | #SBATCH --time=15:00:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_ele_168_36_calendar.txt 11 | #SBATCH --error=je_ele_168_36_calendar.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer_electricity.py --data_name electricity --n_multiv 327 --window 168 --horizon 36 --batch_size 64 --split_train 0.6003649635036497 --split_validation 0.19981751824817517 --split_test 0.19981751824817517 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/bj_ele_168_6.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsae168_6 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=12G 8 | #SBATCH --time=15:00:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_ele_168_6.txt 11 | #SBATCH --error=je_ele_168_6.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer_electricity.py --data_name electricity --n_multiv 321 --window 168 --horizon 6 --batch_size 64 --split_train 0.6003649635036497 --split_validation 0.19981751824817517 --split_test 0.19981751824817517 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/bj_hopt_ele_36.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsaehopt 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=128G 8 | #SBATCH --time=72:00:00 9 | #SBATCH --gres=gpu:v100:4 10 | #SBATCH --output=jo_ele_36_hopt.txt 11 | #SBATCH --error=je_ele_36_hopt.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | echo "Installing requirements ..." 18 | pip install -r 'requirements.txt' --user -q --no-cache-dir 19 | echo "Requirements installed." 20 | #export NCCL_DEBUG=INFO 21 | #export PYTHONFAULTHANDLER=1 22 | export PYTHONWARNINGS="ignore" 23 | 24 | echo "Start running ... " 25 | srun python3 new_hopt_gpu_trainer_electricity.py --data_name electricity --window 168 --horizon 36 --split_train 0.1104014598540146 --split_validation 0.028284671532846715 --split_test 0.028284671532846715 26 | echo "Finished running!" 27 | 28 | seff $SLURM_JOBID 29 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/bj_hopt_power_36.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsaphopt 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=128G 8 | #SBATCH --time=72:00:00 9 | #SBATCH --gres=gpu:v100:4 10 | #SBATCH --output=jo_power_36_hopt.txt 11 | #SBATCH --error=je_power_36_hopt.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | echo "Installing requirements ..." 18 | pip install -r 'requirements.txt' --user -q --no-cache-dir 19 | #cd apex 20 | #pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./ 21 | #cd .. 22 | echo "Requirements installed." 23 | 24 | #export NCCL_DEBUG=INFO 25 | #export PYTHONFAULTHANDLER=1 26 | export PYTHONWARNINGS="ignore" 27 | 28 | echo "Start running ... " 29 | srun python3 new_hopt_gpu_trainer_power.py --data_name europe_power_system --window 168 --horizon 36 --split_train 0.11267605633802817 --split_validation 0.02910798122065728 --split_test 0.02910798122065728 30 | echo "Finished running!" 31 | 32 | seff $SLURM_JOBID 33 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/bj_pow_168_36.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap168_36 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=32G 8 | #SBATCH --time=15:00:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_pow_168_36_calendar.txt 11 | #SBATCH --error=je_pow_168_36_calendar.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer_power.py --data_name europe_power_system --n_multiv 189 --window 168 --horizon 36 --batch_size 128 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/hopt_batch_job_DSANet_power_168_36_full.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap16836 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=16G 8 | #SBATCH --time=72:00:00 9 | #SBATCH --gres=gpu:v100:4 10 | #SBATCH --output=h_job_out_ep_power_168_36_hopt_full.txt 11 | #SBATCH --error=h_job_err_ep_power_168_36_hopt_full.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | echo "Installing requirements ..." 18 | pip install -r 'requirements.txt' --user -q --no-cache-dir 19 | echo "Requirements installed." 20 | #export NCCL_DEBUG=INFO 21 | #export PYTHONFAULTHANDLER=1 22 | 23 | echo "Start running ... " 24 | srun python3 hopt_gpu_trainer_power_full.py --data_name europe_power_system --n_multiv 183 --window 168 --horizon 36 --batch_size 128 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 25 | echo "Finished running!" 26 | 27 | seff $SLURM_JOBID 28 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/opt_b_ele_168_36.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsae168_36opt 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=64G 8 | #SBATCH --time=15:00:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_ele_168_36_opt.txt 11 | #SBATCH --error=je_ele_168_36_opt.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer.py --data_name electricity --window 168 --horizon 36 --learning_rate 0.0001 --local 5 --d_model 200 --drop_prob 0.3 --calendar False --batch_size 32 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | 28 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/opt_b_pow_168_36.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap168_360 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=64G 8 | #SBATCH --time=15:00:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_pow_168_36_opt.txt 11 | #SBATCH --error=je_pow_168_36_opt.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer.py --data_name europe_power_system --window 168 --horizon 36 --learning_rate 0.0001 --local 5 --d_model 200 --drop_prob 0.3 --powerset all --calendar False --batch_size 32 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | 28 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/opt_b_pow_168_36_price.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap16836popt 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=64G 8 | #SBATCH --time=15:00:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_pow_168_36_price_opt.txt 11 | #SBATCH --error=je_pow_168_36_price_opt.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer.py --data_name europe_power_system --window 168 --horizon 36 --learning_rate 0.0001 --local 5 --d_model 200 --drop_prob 0.3 --powerset price --calendar False --batch_size 32 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | 28 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/opt_b_pow_168_36_solar.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap16836sopt 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=64G 8 | #SBATCH --time=15:00:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_pow_168_36_solar_opt.txt 11 | #SBATCH --error=je_pow_168_36_solar_opt.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer.py --data_name europe_power_system --window 168 --horizon 36 --learning_rate 0.0001 --local 5 --d_model 200 --drop_prob 0.3 --powerset solar --calendar False --batch_size 32 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | 28 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/opt_b_pow_168_36_wind.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap16836wopt 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=64G 8 | #SBATCH --time=15:00:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_pow_168_36_wind_opt.txt 11 | #SBATCH --error=je_pow_168_36_wind_opt.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer.py --data_name europe_power_system --window 168 --horizon 36 --learning_rate 0.0001 --local 5 --d_model 200 --drop_prob 0.3 --powerset wind --calendar False --batch_size 32 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | 28 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/test_b_pow_168_36.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap168_36t 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=32G 8 | #SBATCH --time=00:15:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_pow_168_36_test.txt 11 | #SBATCH --error=je_pow_168_36_test.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer.py --data_name europe_power_system --window 168 --horizon 36 --powerset all --calendar False --test_only True --batch_size 32 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | 28 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/test_b_pow_168_36_load.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap16836l 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=64G 8 | #SBATCH --time=15:00:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_pow_168_36_load.txt 11 | #SBATCH --error=je_pow_168_36_load.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer.py --data_name europe_power_system --window 168 --horizon 36 --powerset load --calendar False --test_only True --batch_size 32 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | 28 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/test_b_pow_168_36_price.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap16836p 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=64G 8 | #SBATCH --time=15:00:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_pow_168_36_price.txt 11 | #SBATCH --error=je_pow_168_36_price.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer.py --data_name europe_power_system --window 168 --horizon 36 --powerset price --calendar False --test_only True --batch_size 32 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | 28 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/test_b_pow_168_36_solar.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap16836s 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=64G 8 | #SBATCH --time=15:00:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_pow_168_36_solar.txt 11 | #SBATCH --error=je_pow_168_36_solar.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer.py --data_name europe_power_system --window 168 --horizon 36 --powerset solar --calendar False --test_only True --batch_size 32 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | 28 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/test_b_pow_168_36_wind.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsap16836w 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=64G 8 | #SBATCH --time=15:00:00 9 | #SBATCH --gres=gpu:v100:2 10 | #SBATCH --output=jo_pow_168_36_wind.txt 11 | #SBATCH --error=je_pow_168_36_wind.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 single_gpu_trainer.py --data_name europe_power_system --window 168 --horizon 36 --powerset wind --calendar False --test_only True --batch_size 32 --split_train 0.7004694835680751 --split_validation 0.14929577464788732 --split_test 0.15023474178403756 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | 28 | -------------------------------------------------------------------------------- /DSANet/batch_jobs/test_runs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=dsatest2 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=128G 8 | #SBATCH --time=4:45:00 9 | #SBATCH --gres=gpu:v100:4 10 | #SBATCH --output=jo_test_runs_ele_mode.txt 11 | #SBATCH --error=je_test_runs_ele_mode.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load pytorch/1.3.0 15 | module list 16 | 17 | 18 | echo "Installing requirements ..." 19 | pip install -r 'requirements.txt' --user -q --no-cache-dir 20 | echo "Requirements installed." 21 | 22 | echo "Start running ... " 23 | srun python3 test_electricity.py 24 | echo "Finished running!" 25 | 26 | seff $SLURM_JOBID 27 | -------------------------------------------------------------------------------- /DSANet/data/README.md: -------------------------------------------------------------------------------- 1 | ## Data 2 | 3 | Due to the requirements of the cooperative enterprise, the dataset we used in this paper cannot be available temporarily. 4 | 5 | You can try other multivariate time series datasets in this way: 6 | 7 | 1. Download the dataset in .txt file. 8 | 2. Split the dataset into training, validation and test set. 9 | 3. Name the three files `{data_name}_train.txt`, `{data_name}_validation.txt`, `{data_name}_test.txt`. 10 | 4. Place the three files in `./data/{data_name}` by default. 11 | 12 | The data organization in the .txt file is the same as that in [laiguokun/multivariate-time-series-data](https://github.com/laiguokun/multivariate-time-series-data). 13 | -------------------------------------------------------------------------------- /DSANet/docs/DSANet-model-structure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleksei-mashlakov/multivariate-deep-learning/e495ce3626fe2843364f4644d5d3e9ab69791b82/DSANet/docs/DSANet-model-structure.png -------------------------------------------------------------------------------- /DSANet/docs/ablation_CORR.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleksei-mashlakov/multivariate-deep-learning/e495ce3626fe2843364f4644d5d3e9ab69791b82/DSANet/docs/ablation_CORR.png -------------------------------------------------------------------------------- /DSANet/docs/ablation_MAE.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleksei-mashlakov/multivariate-deep-learning/e495ce3626fe2843364f4644d5d3e9ab69791b82/DSANet/docs/ablation_MAE.png -------------------------------------------------------------------------------- /DSANet/docs/ablation_RRSE.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleksei-mashlakov/multivariate-deep-learning/e495ce3626fe2843364f4644d5d3e9ab69791b82/DSANet/docs/ablation_RRSE.png -------------------------------------------------------------------------------- /DSANet/docs/exp_results_window_128.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleksei-mashlakov/multivariate-deep-learning/e495ce3626fe2843364f4644d5d3e9ab69791b82/DSANet/docs/exp_results_window_128.png -------------------------------------------------------------------------------- /DSANet/docs/exp_results_window_32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleksei-mashlakov/multivariate-deep-learning/e495ce3626fe2843364f4644d5d3e9ab69791b82/DSANet/docs/exp_results_window_32.png -------------------------------------------------------------------------------- /DSANet/docs/exp_results_window_64.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleksei-mashlakov/multivariate-deep-learning/e495ce3626fe2843364f4644d5d3e9ab69791b82/DSANet/docs/exp_results_window_64.png -------------------------------------------------------------------------------- /DSANet/dsanet/Layers.py: -------------------------------------------------------------------------------- 1 | """ Define the Layers """ 2 | import torch.nn as nn 3 | from dsanet.SubLayers import MultiHeadAttention, PositionwiseFeedForward 4 | 5 | 6 | class EncoderLayer(nn.Module): 7 | """ Compose with two layers """ 8 | 9 | def __init__(self, hparams): 10 | super(EncoderLayer, self).__init__() 11 | self.slf_attn = MultiHeadAttention(hparams) 12 | self.pos_ffn = PositionwiseFeedForward(hparams) 13 | 14 | def forward(self, enc_input): 15 | enc_output, enc_slf_attn = self.slf_attn( 16 | enc_input, enc_input, enc_input) 17 | 18 | enc_output = self.pos_ffn(enc_output) 19 | 20 | return enc_output, enc_slf_attn 21 | 22 | 23 | class DecoderLayer(nn.Module): 24 | """ Compose with three layers """ 25 | 26 | def __init__(self, hparams): 27 | super(DecoderLayer, self).__init__() 28 | self.slf_attn = MultiHeadAttention(hparams) 29 | self.enc_attn = MultiHeadAttention(hparams) 30 | self.pos_ffn = PositionwiseFeedForward(hparams) 31 | 32 | def forward(self, dec_input, enc_output, non_pad_mask=None, slf_attn_mask=None, dec_enc_attn_mask=None): 33 | dec_output, dec_slf_attn = self.slf_attn( 34 | dec_input, dec_input, dec_input, mask=slf_attn_mask) 35 | 36 | dec_output, dec_enc_attn = self.enc_attn( 37 | dec_output, enc_output, enc_output, mask=dec_enc_attn_mask) 38 | 39 | dec_output = self.pos_ffn(dec_output) 40 | 41 | return dec_output, dec_slf_attn, dec_enc_attn 42 | -------------------------------------------------------------------------------- /DSANet/dsanet/Modules.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import numpy as np 4 | 5 | 6 | class ScaledDotProductAttention(nn.Module): 7 | """ Scaled Dot-Product Attention """ 8 | 9 | def __init__(self, hparams): # temperature, attn_dropout=0.1): 10 | super().__init__() 11 | self.hparams = hparams 12 | self.temperature = np.power(hparams.d_k, 0.5) 13 | self.drop_prob = hparams.drop_prob 14 | self.dropout = nn.Dropout(hparams.drop_prob) 15 | self.softmax = nn.Softmax(dim=2) 16 | 17 | def forward(self, q, k, v): 18 | 19 | attn = torch.bmm(q, k.transpose(1, 2)) 20 | attn = attn / self.temperature 21 | 22 | attn = self.softmax(attn) 23 | if self.hparams.mcdropout == 'True': 24 | attn = nn.functional.dropout(attn, p=self.drop_prob, training=True) 25 | else: 26 | attn = self.dropout(attn) 27 | #attn = self.dropout(attn) 28 | output = torch.bmm(attn, v) 29 | 30 | return output, attn 31 | -------------------------------------------------------------------------------- /DSANet/dsanet/SubLayers.py: -------------------------------------------------------------------------------- 1 | """ Define the sublayers in encoder/decoder layer """ 2 | import numpy as np 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | from dsanet.Modules import ScaledDotProductAttention 6 | 7 | 8 | class MultiHeadAttention(nn.Module): 9 | """ Multi-Head Attention module """ 10 | 11 | def __init__(self, hparams): 12 | super().__init__() 13 | self.hparams = hparams 14 | self.n_head = hparams.n_head 15 | self.d_k = hparams.d_k 16 | self.d_v = hparams.d_v 17 | self.drop_prob = hparams.drop_prob 18 | self.w_qs = nn.Linear(hparams.d_model, hparams.n_head * hparams.d_k) 19 | self.w_ks = nn.Linear(hparams.d_model, hparams.n_head * hparams.d_k) 20 | self.w_vs = nn.Linear(hparams.d_model, hparams.n_head * hparams.d_v) 21 | nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (hparams.d_model + hparams.d_k))) 22 | nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (hparams.d_model + hparams.d_k))) 23 | nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (hparams.d_model + hparams.d_v))) 24 | 25 | self.attention = ScaledDotProductAttention(hparams) 26 | self.layer_norm = nn.LayerNorm(hparams.d_model) 27 | 28 | self.fc = nn.Linear(hparams.n_head * hparams.d_v, hparams.d_model) 29 | nn.init.xavier_normal_(self.fc.weight) 30 | 31 | self.dropout = nn.Dropout(hparams.drop_prob) 32 | 33 | def forward(self, q, k, v): 34 | d_k, d_v, n_head = self.d_k, self.d_v, self.n_head 35 | 36 | sz_b, len_q, _ = q.size() 37 | sz_b, len_k, _ = k.size() 38 | sz_b, len_v, _ = v.size() 39 | 40 | residual = q 41 | 42 | q = self.w_qs(q).view(sz_b, len_q, n_head, d_k) 43 | k = self.w_ks(k).view(sz_b, len_k, n_head, d_k) 44 | v = self.w_vs(v).view(sz_b, len_v, n_head, d_v) 45 | 46 | q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) 47 | k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) 48 | v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) 49 | 50 | output, attn = self.attention(q, k, v) 51 | 52 | output = output.view(n_head, sz_b, len_q, d_v) 53 | output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) 54 | 55 | if self.hparams.mcdropout == 'True': 56 | output = nn.functional.dropout(self.fc(output), p=self.drop_prob, training=True) 57 | else: 58 | output = self.dropout(self.fc(output)) 59 | 60 | #output = self.dropout(self.fc(output)) 61 | output = self.layer_norm(output + residual) 62 | 63 | return output, attn 64 | 65 | 66 | class PositionwiseFeedForward(nn.Module): 67 | """ A two-feed-forward-layer module """ 68 | #d_model, d_inner, dropout=dropout 69 | def __init__(self, hparams): 70 | super().__init__() 71 | self.hparams = hparams 72 | self.w_1 = nn.Conv1d(hparams.d_model, hparams.d_inner, 1) 73 | self.w_2 = nn.Conv1d(hparams.d_inner, hparams.d_model, 1) 74 | self.layer_norm = nn.LayerNorm(hparams.d_model) 75 | self.dropout = nn.Dropout(hparams.drop_prob) 76 | self.drop_prob = hparams.drop_prob 77 | 78 | def forward(self, x): 79 | residual = x 80 | output = x.transpose(1, 2) 81 | output = self.w_2(F.relu(self.w_1(output))) 82 | output = output.transpose(1, 2) 83 | if self.hparams.mcdropout == 'True': 84 | output = nn.functional.dropout(output, p=self.drop_prob, training=True) 85 | else: 86 | output = self.dropout(output) 87 | #output = self.dropout(output) 88 | output = self.layer_norm(output + residual) 89 | return output 90 | -------------------------------------------------------------------------------- /DSANet/dsanet/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleksei-mashlakov/multivariate-deep-learning/e495ce3626fe2843364f4644d5d3e9ab69791b82/DSANet/dsanet/__init__.py -------------------------------------------------------------------------------- /DSANet/requirements.txt: -------------------------------------------------------------------------------- 1 | scikit-learn>=0.20.2 2 | tqdm>=4.35.0 3 | numpy>=1.16.4 4 | tensorboard>=1.14 5 | future>=0.17.1 6 | torch>=1.3.0 7 | torchvision>=0.4.0 8 | pandas>=0.20.3 9 | pytorch-lightning>=0.7 10 | hyperopt 11 | -------------------------------------------------------------------------------- /DSANet/requirements_old.txt: -------------------------------------------------------------------------------- 1 | scikit-learn==0.20.2 2 | tqdm==4.35.0 3 | numpy==1.16.4 4 | torch>=1.1.0 5 | torchvision>=0.3.0 6 | pandas>=0.20.3 7 | test-tube==0.6.9 8 | pytorch-lightning==0.5.0 9 | -------------------------------------------------------------------------------- /DSANet/single_gpu_trainer_electricity.py: -------------------------------------------------------------------------------- 1 | """ 2 | Runs a model on a single node on 4 GPUs. 3 | """ 4 | import os 5 | from argparse import ArgumentParser 6 | import pytorch_lightning as pl 7 | 8 | import numpy as np 9 | import pandas as pd 10 | from datetime import datetime 11 | 12 | import torch 13 | 14 | from pytorch_lightning.callbacks import EarlyStopping 15 | from pytorch_lightning.loggers import TestTubeLogger 16 | 17 | import csv 18 | 19 | from model import DSANet 20 | 21 | SEED = 7 22 | torch.manual_seed(SEED) 23 | np.random.seed(SEED) 24 | 25 | 26 | def main(hparams): 27 | """ 28 | Main training routine specific for this project 29 | """ 30 | # ------------------------ 31 | # 1 INIT LIGHTNING MODEL 32 | # ------------------------ 33 | 34 | print('loading model...') 35 | model = DSANet(hparams) 36 | print('model built') 37 | 38 | # ------------------------ 39 | # 2 INIT TEST TUBE EXP 40 | # ------------------------ 41 | logger = TestTubeLogger("tb_logs_v2", name="my_dsanet_ele") 42 | 43 | # ------------------------ 44 | # 3 DEFINE CALLBACKS 45 | # ------------------------ 46 | early_stop_callback = EarlyStopping( 47 | monitor='val_loss', 48 | patience=25, 49 | verbose=False, 50 | mode='min' 51 | ) 52 | 53 | # ------------------------ 54 | # 4 INIT TRAINER 55 | # ------------------------ 56 | trainer = pl.Trainer( 57 | gpus=2, 58 | distributed_backend='dp', 59 | logger=logger, 60 | early_stop_callback=early_stop_callback, 61 | show_progress_bar=False, 62 | profiler=True, 63 | ) 64 | 65 | # ------------------------ 66 | # 5 START TRAINING 67 | # ------------------------ 68 | st_time = datetime.now() 69 | result = trainer.fit(model) 70 | print(result) 71 | eval_time = str(datetime.now() - st_time) 72 | print(f"Train time: {eval_time}") 73 | 74 | st_time = datetime.now() 75 | result = trainer.test() 76 | eval_time = str(datetime.now() - st_time) 77 | print(f"Test time: {eval_time}") 78 | print(result) 79 | 80 | if __name__ == '__main__': 81 | 82 | root_dir = os.path.dirname(os.path.realpath(__file__)) 83 | demo_log_dir = os.path.join(root_dir, 'dsanet_logs') 84 | checkpoint_dir = os.path.join(demo_log_dir, 'model_weights') 85 | test_tube_dir = os.path.join(demo_log_dir, 'test_tube_data') 86 | 87 | # although we user hyperOptParser, we are using it only as argparse right now 88 | parent_parser = ArgumentParser( add_help=False) 89 | 90 | # gpu args 91 | parent_parser.add_argument('--test_tube_save_path', type=str, default=test_tube_dir, help='where to save logs') 92 | parent_parser.add_argument('--model_save_path', type=str, default=checkpoint_dir, help='where to save model') 93 | 94 | # allow model to overwrite or extend args 95 | parser = DSANet.add_model_specific_args(parent_parser, root_dir) 96 | hyperparams = parser.parse_args() 97 | 98 | print(f'RUNNING ON GPU') 99 | main(hyperparams) -------------------------------------------------------------------------------- /DSANet/single_gpu_trainer_power.py: -------------------------------------------------------------------------------- 1 | """ 2 | Runs a model on a single node on 4 GPUs. 3 | """ 4 | import os 5 | from argparse import ArgumentParser 6 | import pytorch_lightning as pl 7 | 8 | import numpy as np 9 | import pandas as pd 10 | from datetime import datetime 11 | 12 | import torch 13 | 14 | from pytorch_lightning.callbacks import EarlyStopping 15 | from pytorch_lightning.loggers import TestTubeLogger 16 | 17 | import csv 18 | 19 | from model import DSANet 20 | 21 | SEED = 7 22 | torch.manual_seed(SEED) 23 | np.random.seed(SEED) 24 | 25 | 26 | def main(hparams): 27 | """ 28 | Main training routine specific for this project 29 | """ 30 | # ------------------------ 31 | # 1 INIT LIGHTNING MODEL 32 | # ------------------------ 33 | 34 | print('loading model...') 35 | model = DSANet(hparams) 36 | print('model built') 37 | 38 | # ------------------------ 39 | # 2 INIT TEST TUBE EXP 40 | # ------------------------ 41 | logger = TestTubeLogger("tb_logs_v2", name="my_dsanet_pow") 42 | 43 | # ------------------------ 44 | # 3 DEFINE CALLBACKS 45 | # ------------------------ 46 | early_stop_callback = EarlyStopping( 47 | monitor='val_loss', 48 | patience=25, 49 | verbose=False, 50 | mode='min' 51 | ) 52 | 53 | # ------------------------ 54 | # 4 INIT TRAINER 55 | # ------------------------ 56 | trainer = pl.Trainer( 57 | gpus=2, 58 | distributed_backend='dp', 59 | logger=logger, 60 | early_stop_callback=early_stop_callback, 61 | show_progress_bar=False, 62 | profiler=True, 63 | ) 64 | 65 | # ------------------------ 66 | # 5 START TRAINING 67 | # ------------------------ 68 | st_time = datetime.now() 69 | result = trainer.fit(model) 70 | print(result) 71 | eval_time = str(datetime.now() - st_time) 72 | print(f"Train time: {eval_time}") 73 | 74 | st_time = datetime.now() 75 | result = trainer.test() 76 | eval_time = str(datetime.now() - st_time) 77 | print(f"Test time: {eval_time}") 78 | print(result) 79 | 80 | if __name__ == '__main__': 81 | 82 | root_dir = os.path.dirname(os.path.realpath(__file__)) 83 | demo_log_dir = os.path.join(root_dir, 'dsanet_logs') 84 | checkpoint_dir = os.path.join(demo_log_dir, 'model_weights') 85 | test_tube_dir = os.path.join(demo_log_dir, 'test_tube_data') 86 | 87 | # although we user hyperOptParser, we are using it only as argparse right now 88 | parent_parser = ArgumentParser( add_help=False) 89 | 90 | # gpu args 91 | parent_parser.add_argument('--test_tube_save_path', type=str, default=test_tube_dir, help='where to save logs') 92 | parent_parser.add_argument('--model_save_path', type=str, default=checkpoint_dir, help='where to save model') 93 | 94 | # allow model to overwrite or extend args 95 | parser = DSANet.add_model_specific_args(parent_parser, root_dir) 96 | hyperparams = parser.parse_args() 97 | 98 | print(f'RUNNING ON GPU') 99 | main(hyperparams) -------------------------------------------------------------------------------- /DSANet/test_electricity.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pandas as pd 3 | import csv 4 | import pytorch_lightning as pl 5 | import torch 6 | from model import DSANet 7 | from datetime import datetime 8 | 9 | 10 | out_file = '/scratch/project_2002244/DSANet/save/test_runs_electricity_final_v2.csv' 11 | ckpt_load_path = '/scratch/project_2002244/DSANet/tb_logs_v2' 12 | path_list = [os.path.join(dirpath, filename) for dirpath, _, filenames in os.walk(ckpt_load_path) for filename 13 | in filenames if filename.endswith('.ckpt')] 14 | 15 | for filename in path_list: 16 | model = DSANet.load_from_checkpoint(filename) 17 | trainer = pl.Trainer(resume_from_checkpoint=filename) 18 | 19 | if model.hparams.n_multiv == 321 or model.hparams.n_multiv == 327: 20 | print('we have electricity data') 21 | else: 22 | continue 23 | 24 | if hasattr(model.hparams, 'mcdropout'): 25 | print("we have mcdropout") 26 | else: 27 | print("we set mcdropout to False") 28 | setattr(model.hparams, 'mcdropout', 'False') 29 | 30 | if hasattr(model.hparams, 'powerset'): 31 | print("we have powerset") 32 | else: 33 | print("we set powerset to all") 34 | setattr(model.hparams, 'powerset', 'all') 35 | 36 | if hasattr(model.hparams, 'calendar'): 37 | print("we have calendar") 38 | else: 39 | if model.hparams.n_multiv == 189 or model.hparams.n_multiv == 327: 40 | print("we set calendar to True") 41 | setattr(model.hparams, 'calendar', 'True') 42 | else: 43 | print("we set calendar to False") 44 | setattr(model.hparams, 'calendar', 'False') 45 | print(f'data: {model.hparams.data_name}, horizon: {model.hparams.horizon}, window: {model.hparams.window}, powerset: {model.hparams.powerset}, calendar: {model.hparams.calendar}, {filename}') 46 | try: 47 | st_time = datetime.now() 48 | print(f'Start the test...{st_time}') 49 | trainer.test(model) 50 | result = model.test_results 51 | eval_time = str(datetime.now() - st_time) 52 | print(f"Test time: {eval_time}") 53 | print(result) 54 | of_connection = open(out_file, 'a') 55 | writer = csv.writer(of_connection) 56 | writer.writerow([model.hparams.data_name, model.hparams.horizon, model.hparams.window, model.hparams.powerset, model.hparams.calendar, result]) 57 | torch.cuda.empty_cache() 58 | of_connection.close() 59 | except Exception as e: 60 | print(f"we got an exception...: {e}") 61 | pass 62 | 63 | 64 | -------------------------------------------------------------------------------- /DeepAR/README.md: -------------------------------------------------------------------------------- 1 | #DeepAR 2 | 3 | ## Modifications to the initial implementation by https://github.com/zhykoties/TimeSeries: 4 | - file search_hyperparams_custom.py to run hyper-parameter optimization with [hyperopt](https://github.com/hyperopt/hyperopt) 5 | - early stopping for training in train.py with pytorchtools.py 6 | - open power system dataset preprocessing in preprocess_power_system.py 7 | - saving predictions in evaluate.py 8 | - new parser arguments were added 9 | 10 | To run the experiment please see [batch jobs](batch_jobs) folder. 11 | 12 | ------------------------------------------------------------------ 13 | 14 | # List of Implementations: 15 | Currently, the reimplementation of the DeepAR paper(DeepAR: Probabilistic Forecasting with Autoregressive Recurrent Networks https://arxiv.org/abs/1704.04110) is available in PyTorch. More papers will be coming soon. 16 | 17 | ## Authors: 18 | * **Yunkai Zhang**() - *University of California, Santa Barbara* 19 | 20 | * **Qiao Jiang** - *Brown University* 21 | 22 | ## To run: 23 | 24 | 25 | 1. Install all dependencies listed in requirements.txt. Note that the model has only been tested in the versions shown in the text file. 26 | 27 | 1. Download the dataset and preprocess the data: 28 | 29 | ```bash 30 | python preprocess_elect.py 31 | ``` 32 | 1. Start training: 33 | 34 | ```bash 35 | python train.py 36 | ``` 37 | 38 | - If you want to perform ancestral sampling, 39 | 40 | ```bash 41 | python train.py --sampling 42 | ``` 43 | - If you do not want to do normalization during evaluation, 44 | 45 | 46 | ```bash 47 | python train.py --relative-metrics 48 | ``` 49 | 1. Evaluate a set of saved model weights: 50 | 51 | ```bash 52 | python evaluate.py 53 | ``` 54 | 1. Perform hyperparameter search: 55 | 56 | ```bash 57 | python search_params.py 58 | ``` 59 | 60 | ## Results 61 | 62 | ​ The model is evaluated on the electricity dataset, which contains the electricity consumption of 370 households from 2011 to 2014. Under hourly frequency, we use the first week of September, 2014 as the test set and all time steps prior to that as the train set. Following the experiment design in DeepAR, the window size is chosen to be 192, where the last 24 is the forecasting horizon. History (number of time steps since the beginning of each household), month of the year, day of the week, and hour of the day are used as time covariates. Notice that some households started at different times, so we only use windows that contain non-missing values. 63 | 64 | ​ Under Gaussian likelihood, we use the Adam optimizer with early stopping to train the model for 20 epoches. The same set of hyperparameters is used as outlined in the paper. Weights with the best ND value is selected, where __ND = 0.06349__, RMSE = 0.452, rou90 = 0.034 and rou50 = 0.063. 65 | 66 | ​ Sample results on electricity. The top 10 plots are sampled from the test set with the highest 10% ND values, whereas the bottom 10 plots are sampled from the rest of the test set. 67 | 68 | ![Sample results on electricity. The top 10 plots are sampled from the test set with the highest 10% ND values, whereas the bottom 10 plots are sampled from the rest of the test set.](./experiments/base_model/figures/best_ND.png) 69 | -------------------------------------------------------------------------------- /DeepAR/batch jobs/batch_job_hpo_elect_36.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=da_hop_elect_36 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=8G 8 | #SBATCH --time=02:00:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_elect_hop_test.txt 11 | #SBATCH --error=job_err_elect_hop_test.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load python-data 15 | module list 16 | 17 | #cd DeepAR 18 | 19 | cd .. 20 | 21 | echo "Installing requirements ..." 22 | 23 | pip3 install -r 'requirements.txt' -q --user 24 | pip3 install hyperopt -q --user 25 | 26 | echo "requirements installed." 27 | 28 | seff $SLURM_JOBID 29 | 30 | 31 | echo 'load the data' 32 | 33 | python3 preprocess_elect.py --dataset='elect' \ 34 | --data-folder='data' \ 35 | --model-name='base_model_hop_36_3' \ 36 | --file-name='electricity.csv' \ 37 | --hop 38 | 39 | echo "data loaded." 40 | echo "Search hyperparameters ..." 41 | 42 | python3 search_hyperparams_custom.py --sampling \ 43 | --dataset='elect' \ 44 | --model-name='base_model_hop_36_3' \ 45 | --evals=30 46 | 47 | echo 'Load train data' 48 | 49 | python3 preprocess_elect.py --dataset='elect' \ 50 | --data-folder='data' \ 51 | --model-name='base_model_hop_36_3' \ 52 | --file-name='electricity.csv' 53 | 54 | # echo 'Train data loaded' 55 | 56 | # echo 'Training ...' 57 | #{'batch_size': 128.0, 'learning_rate': 0.005, 'lstm_dropout': 0.30000000000000004, 'lstm_hidden_dim': 256.0} 58 | 59 | # python3 train.py --sampling \ 60 | # --dataset='elect' \ 61 | # --data-folder='base_model_hop_36_3' \ 62 | # --model-name='base_model_hop_36_3' \ 63 | # --restore-file='epoch_19' 64 | 65 | 66 | 67 | echo 'Load test data' 68 | 69 | python3 preprocess_elect.py --dataset='elect' \ 70 | --data-folder='data' \ 71 | --model-name='base_model_hop_36_3' \ 72 | --file-name='electricity.csv' \ 73 | --test 74 | 75 | echo 'Test data loaded' 76 | 77 | echo 'Testing ...' 78 | 79 | python3 evaluate.py --sampling \ 80 | --dataset='elect' \ 81 | --data-folder='base_model_hop_36_3' \ 82 | --model-name='base_model_hop_36_3' \ 83 | --save-file \ 84 | --restore-file='best' 85 | 86 | 87 | echo "Finished running!" 88 | 89 | seff $SLURM_JOBID 90 | -------------------------------------------------------------------------------- /DeepAR/batch jobs/batch_job_hpo_power36.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=da_hop_power_36 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=8G 8 | #SBATCH --time=02:00:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_power_hop_test.txt 11 | #SBATCH --error=job_err_power_hop_test.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load python-data 15 | module list 16 | 17 | #cd DeepAR 18 | 19 | cd .. 20 | 21 | echo "Installing requirements ..." 22 | 23 | pip3 install -r 'requirements.txt' -q --user 24 | pip3 install hyperopt -q --user 25 | 26 | echo "requirements installed." 27 | 28 | echo 'load the data' 29 | 30 | python3 preprocess_power_system.py --dataset='elect' \ 31 | --data-folder='data' \ 32 | --model-name='power_system_hop_36_3' \ 33 | --file-name='europe_power_system.csv' \ 34 | --hop 35 | 36 | echo "data loaded." 37 | echo "Search hyperparameters ..." 38 | 39 | python3 search_hyperparams_custom.py --sampling \ 40 | --dataset='elect' \ 41 | --model-name='power_system_hop_36_3' \ 42 | --evals=50 43 | 44 | 45 | 46 | #load the data 47 | 48 | python3 preprocess_elect.py --dataset='elect' \ 49 | --data-folder='data' \ 50 | --model-name='power_system_hop_36_3' \ 51 | --file-name='europe_power_system.csv' 52 | 53 | echo 'Training ...' 54 | 55 | #{'batch_size': 512.0, 'learning_rate': 0.005, 'lstm_dropout': 0.2, 'lstm_hidden_dim': 256.0} 56 | 57 | python3 train.py --sampling \ 58 | --dataset='elect' \ 59 | --data-folder='power_system_hop_36_3' \ 60 | --model-name='power_system_hop_36_3' \ 61 | --restore-file='epoch_25' 62 | 63 | echo 'Testing ... ' 64 | 65 | #python3 preprocess_elect.py --dataset='elect' \ 66 | # --data-folder='data' \ 67 | # --model-name='power_system_hop_36_3' \ 68 | # --file-name='europe_power_system.csv' \ 69 | # --test 70 | 71 | 72 | #python3 evaluate.py --sampling \ 73 | # --dataset='elect' \ 74 | # --data-folder='power_system_hop_36_3' \ 75 | # --model-name='power_system_hop_36_3' \ 76 | # --save-file \ 77 | # --restore-file='best' 78 | 79 | echo "Finished running!" 80 | 81 | seff $SLURM_JOBID 82 | -------------------------------------------------------------------------------- /DeepAR/batch jobs/batch_train12.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # created: Nov 24, 2019 5:07 PM 3 | # author: mashlakov 4 | 5 | #!/bin/bash 6 | #SBATCH --job-name=deepar_train_elect12_4 7 | #SBATCH --account=Project_2002244 8 | #SBATCH --partition=gpu 9 | #SBATCH --ntasks=1 10 | #SBATCH --cpus-per-task=4 11 | #SBATCH --mem=16G 12 | #SBATCH --time=01:00:00 13 | #SBATCH --gres=gpu:v100:1 14 | #SBATCH --output=job_out_elect_evaluate_12.txt 15 | #SBATCH --error=job_err_elect_evaluate_12.txt 16 | 17 | 18 | # module purge 19 | # module load python-env/3.7.4-ml 20 | 21 | module load gcc/8.3.0 cuda/10.1.168 22 | module load python-data/3.7.3-1 23 | 24 | cd .. 25 | 26 | echo "Hola el patron!" 27 | 28 | pip3 install -r 'requirements.txt' -q --user 29 | 30 | # load the data 31 | # python3 preprocess_elect.py --dataset='elect' \ 32 | # --data-folder='data' \ 33 | # --model-name='base_model_12' \ 34 | # --file-name='electricity.csv' \ 35 | # --test 36 | 37 | 38 | ## Train and test the rest of the horizons 39 | # python3 train.py --sampling \ 40 | # --dataset='elect' \ 41 | # --data-folder='base_model_12' \ 42 | # --model-name='base_model_12' \ 43 | # --restore-file='epoch_47' 44 | 45 | python3 evaluate.py --sampling \ 46 | --dataset='elect' \ 47 | --data-folder='base_model_12' \ 48 | --model-name='base_model_12' \ 49 | --save-file \ 50 | --restore-file='epoch_57' 51 | 52 | echo "Adios el patron!" 53 | 54 | # This script will print some usage statistics to the 55 | # end of the standard out file 56 | # Use that to improve your resource request estimate 57 | # on later jobs. 58 | seff $SLURM_JOBID 59 | -------------------------------------------------------------------------------- /DeepAR/batch jobs/batch_train24.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # created: Nov 24, 2019 5:07 PM 3 | # author: mashlakov 4 | 5 | #!/bin/bash 6 | #SBATCH --job-name=deepar_evaluate_elect24_2 7 | #SBATCH --account=Project_2002244 8 | #SBATCH --partition=gpu 9 | #SBATCH --ntasks=1 10 | #SBATCH --cpus-per-task=4 11 | #SBATCH --mem=16G 12 | #SBATCH --time=01:00:00 13 | #SBATCH --gres=gpu:v100:1 14 | #SBATCH --output=job_out_elect_evaluate24_2.txt 15 | #SBATCH --error=job_err_elect_evaluate24_2.txt 16 | 17 | 18 | # module purge 19 | # module load python-env/3.7.4-ml 20 | 21 | module load gcc/8.3.0 cuda/10.1.168 22 | module load python-data 23 | 24 | cd .. 25 | 26 | echo "Hola el patron!" 27 | 28 | pip3 install -r 'requirements.txt' -q --user 29 | 30 | # load the data 31 | # python3 preprocess_elect.py --dataset='elect' \ 32 | # --data-folder='data' \ 33 | # --model-name='base_model_24' \ 34 | # --file-name='electricity.csv' \ 35 | # --test 36 | 37 | ## Train and test the rest of the horizons 38 | # python3 train.py --sampling \ 39 | # --dataset='elect' \ 40 | # --data-folder='base_model_24' \ 41 | # --model-name='base_model_24' \ 42 | # --restore-file='epoch_28' 43 | 44 | # ## Evaluate the models 45 | python3 evaluate.py --sampling \ 46 | --dataset='elect' \ 47 | --data-folder='base_model_24' \ 48 | --model-name='base_model_24' \ 49 | --save-file \ 50 | --restore-file='epoch_32' 51 | 52 | echo "Adios el patron!" 53 | 54 | # This script will print some usage statistics to the 55 | # end of the standard out file 56 | # Use that to improve your resource request estimate 57 | # on later jobs. 58 | seff $SLURM_JOBID 59 | -------------------------------------------------------------------------------- /DeepAR/batch jobs/batch_train3.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # created: Nov 24, 2019 5:07 PM 3 | # author: mashlakov 4 | 5 | #!/bin/bash 6 | #SBATCH --job-name=deepar_train_elect3_4 7 | #SBATCH --account=Project_2002244 8 | #SBATCH --partition=gpu 9 | #SBATCH --ntasks=1 10 | #SBATCH --cpus-per-task=4 11 | #SBATCH --mem=16G 12 | #SBATCH --time=01:00:00 13 | #SBATCH --gres=gpu:v100:1 14 | #SBATCH --output=job_out_elect_evaluate_3.txt 15 | #SBATCH --error=job_err_elect_evaluate_3.txt 16 | 17 | module load gcc/8.3.0 cuda/10.1.168 18 | module load python-data 19 | 20 | # module purge 21 | # module load python-env/3.7.4-ml 22 | 23 | cd .. 24 | 25 | echo "Hola el patron!" 26 | 27 | pip3 install -r 'requirements.txt' -q --user 28 | 29 | # load the data 30 | # python3 preprocess_elect.py --dataset='elect' \ 31 | # --data-folder='data' \ 32 | # --model-name='base_model_3' \ 33 | # --file-name='electricity.csv' \ 34 | # --test 35 | 36 | 37 | ## Train and test the rest of the horizons 38 | # python3 train.py --sampling \ 39 | # --dataset='elect' \ 40 | # --data-folder='base_model_3' \ 41 | # --model-name='base_model_3' \ 42 | # --restore-file='epoch_7' 43 | 44 | python3 evaluate.py --sampling \ 45 | --dataset='elect' \ 46 | --data-folder='base_model_3' \ 47 | --model-name='base_model_3' \ 48 | --save-file \ 49 | --restore-file='epoch_6' 50 | 51 | echo "Adios el patron!" 52 | 53 | # This script will print some usage statistics to the 54 | # end of the standard out file 55 | # Use that to improve your resource request estimate 56 | # on later jobs. 57 | seff $SLURM_JOBID 58 | -------------------------------------------------------------------------------- /DeepAR/batch jobs/batch_train36.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # created: Nov 24, 2019 5:07 PM 3 | # author: mashlakov 4 | 5 | #!/bin/bash 6 | #SBATCH --job-name=da_train_36 7 | #SBATCH --account=Project_2002244 8 | #SBATCH --partition=gpu 9 | #SBATCH --ntasks=1 10 | #SBATCH --cpus-per-task=4 11 | #SBATCH --mem=16G 12 | #SBATCH --time=01:00:00 13 | #SBATCH --gres=gpu:v100:1 14 | #SBATCH --output=job_out_elect_evaluate_36.txt 15 | #SBATCH --error=job_err_elect_evaluate_36.txt 16 | 17 | 18 | # module purge 19 | # module load python-env/3.7.4-ml 20 | 21 | module load gcc/8.3.0 cuda/10.1.168 22 | module load python-data 23 | 24 | cd .. 25 | 26 | echo "Hola el patron!" 27 | 28 | pip3 install -r 'requirements.txt' -q --user 29 | 30 | # load the data 31 | 32 | # python3 preprocess_elect.py --dataset='elect' \ 33 | # --data-folder='data' \ 34 | # --model-name='base_model_36' \ 35 | # --file-name='electricity.csv' \ 36 | # --test 37 | 38 | 39 | ## Train and test the rest of the horizons 40 | # python3 train.py --sampling \ 41 | # --dataset='elect' \ 42 | # --data-folder='base_model_36' \ 43 | # --model-name='base_model_36' \ 44 | # --restore-file='epoch_43' 45 | 46 | 47 | 48 | ## Evaluate the models 49 | python3 evaluate.py --sampling \ 50 | --dataset='elect' \ 51 | --data-folder='base_model_36' \ 52 | --model-name='base_model_36' \ 53 | --save-file \ 54 | --restore-file='best' 55 | 56 | echo "Adios el patron!" 57 | 58 | # This script will print some usage statistics to the 59 | # end of the standard out file 60 | # Use that to improve your resource request estimate 61 | # on later jobs. 62 | seff $SLURM_JOBID 63 | -------------------------------------------------------------------------------- /DeepAR/batch jobs/batch_train6.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # created: Nov 24, 2019 5:07 PM 3 | # author: mashlakov 4 | 5 | #!/bin/bash 6 | #SBATCH --job-name=deepar_train_elect6_4 7 | #SBATCH --account=Project_2002244 8 | #SBATCH --partition=gpu 9 | #SBATCH --ntasks=1 10 | #SBATCH --cpus-per-task=4 11 | #SBATCH --mem=16G 12 | #SBATCH --time=01:00:00 13 | #SBATCH --gres=gpu:v100:1 14 | #SBATCH --output=job_out_elect_evaluate_6.txt 15 | #SBATCH --error=job_err_elect_evaluate_6.txt 16 | 17 | 18 | # module purge 19 | # module load python-env/3.7.4-ml 20 | 21 | module load gcc/8.3.0 cuda/10.1.168 22 | module load python-data 23 | 24 | cd .. 25 | 26 | echo "Hola el patron!" 27 | 28 | pip3 install -r 'requirements.txt' -q --user 29 | 30 | # # load the data 31 | # python3 preprocess_elect.py --dataset='elect' \ 32 | # --data-folder='data' \ 33 | # --model-name='base_model_6' \ 34 | # --file-name='electricity.csv' \ 35 | # --test 36 | 37 | 38 | ## Train and test the rest of the horizons 39 | # python3 train.py --sampling \ 40 | # --dataset='elect' \ 41 | # --data-folder='base_model_6' \ 42 | # --model-name='base_model_6' \ 43 | # --restore-file='epoch_21' 44 | 45 | 46 | python3 evaluate.py --sampling \ 47 | --dataset='elect' \ 48 | --data-folder='base_model_6' \ 49 | --model-name='base_model_6' \ 50 | --save-file \ 51 | --restore-file='epoch_10' 52 | 53 | echo "Adios el patron!" 54 | 55 | # This script will print some usage statistics to the 56 | # end of the standard out file 57 | # Use that to improve your resource request estimate 58 | # on later jobs. 59 | seff $SLURM_JOBID 60 | -------------------------------------------------------------------------------- /DeepAR/batch jobs/batch_train_power12.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # created: Nov 24, 2019 5:07 PM 3 | # author: mashlakov 4 | 5 | #!/bin/bash 6 | #SBATCH --job-name=deepar_train_power12_3 7 | #SBATCH --account=Project_2002244 8 | #SBATCH --partition=gpu 9 | #SBATCH --ntasks=1 10 | #SBATCH --cpus-per-task=4 11 | #SBATCH --mem=16G 12 | #SBATCH --time=01:00:00 13 | #SBATCH --gres=gpu:v100:1 14 | #SBATCH --output=job_out_elect_power_evaluate_12.txt 15 | #SBATCH --error=job_err_elect_power_evaluate_12.txt 16 | 17 | module load gcc/8.3.0 cuda/10.1.168 18 | module load python-data 19 | 20 | # module purge 21 | # module load python-env/3.7.4-ml 22 | 23 | cd .. 24 | 25 | echo "Hola el patron!" 26 | 27 | pip3 install -r 'requirements.txt' -q --user 28 | 29 | # load the data 30 | python3 preprocess_power_system.py --dataset='elect' \ 31 | --data-folder='data' \ 32 | --model-name='power_system_12' \ 33 | --file-name='europe_power_system.csv' \ 34 | --test 35 | 36 | 37 | # ## Train and test the rest of the horizons 38 | # python3 train.py --sampling \ 39 | # --dataset='elect' \ 40 | # --data-folder='power_system_12' \ 41 | # --model-name='power_system_12' \ 42 | # --restore-file='epoch_63' 43 | 44 | # ## Evaluate the models 45 | python3 evaluate.py --sampling \ 46 | --dataset='elect' \ 47 | --data-folder='power_system_12' \ 48 | --model-name='power_system_12' \ 49 | --save-file \ 50 | --restore-file='epoch_67' 51 | 52 | echo "Adios el patron!" 53 | 54 | # This script will print some usage statistics to the 55 | # end of the standard out file 56 | # Use that to improve your resource request estimate 57 | # on later jobs. 58 | seff $SLURM_JOBID 59 | -------------------------------------------------------------------------------- /DeepAR/batch jobs/batch_train_power24.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # created: Nov 24, 2019 5:07 PM 3 | # author: mashlakov 4 | 5 | #!/bin/bash 6 | #SBATCH --job-name=dr_power24 7 | #SBATCH --account=Project_2002244 8 | #SBATCH --partition=gpu 9 | #SBATCH --ntasks=1 10 | #SBATCH --cpus-per-task=4 11 | #SBATCH --mem=8G 12 | #SBATCH --time=02:00:00 13 | #SBATCH --gres=gpu:v100:1 14 | #SBATCH --output=job_out_power242_eval.txt 15 | #SBATCH --error=job_err_power242_eval.txt 16 | 17 | module load gcc/8.3.0 cuda/10.1.168 18 | module load python-data 19 | # module purge 20 | # module load python-env/3.7.4-ml 21 | 22 | cd .. 23 | 24 | echo "Hola el patron!" 25 | 26 | pip3 install -r 'requirements.txt' -q --user 27 | 28 | # load the data 29 | # comment test for training and uncomment for testing 30 | # python3 preprocess_power_system.py --dataset='elect' \ 31 | # --data-folder='data' \ 32 | # --model-name='power_system_24_2' \ 33 | # --file-name='europe_power_system.csv' 34 | 35 | # ## Train and test the rest of the horizons 36 | # python3 train.py --sampling \ 37 | # --dataset='elect' \ 38 | # --data-folder='power_system_24_2' \ 39 | # --model-name='power_system_24_2'\ 40 | # --restore-file='epoch_27' 41 | 42 | 43 | python3 preprocess_power_system.py --dataset='elect' \ 44 | --data-folder='data' \ 45 | --model-name='power_system_24_2' \ 46 | --file-name='europe_power_system.csv' \ 47 | --test 48 | 49 | ## Evaluate the models 50 | python3 evaluate.py --sampling \ 51 | --dataset='elect' \ 52 | --data-folder='power_system_24_2' \ 53 | --model-name='power_system_24_2' \ 54 | --save-file \ 55 | --restore-file='best' 56 | 57 | echo "Adios el patron!" 58 | 59 | # This script will print some usage statistics to the 60 | # end of the standard out file 61 | # Use that to improve your resource request estimate 62 | # on later jobs. 63 | seff $SLURM_JOBID -------------------------------------------------------------------------------- /DeepAR/batch jobs/batch_train_power3.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # created: Nov 24, 2019 5:07 PM 3 | # author: mashlakov 4 | 5 | #!/bin/bash 6 | #SBATCH --job-name=deepar_train_power3_4 7 | #SBATCH --account=Project_2002244 8 | #SBATCH --partition=gpu 9 | #SBATCH --ntasks=1 10 | #SBATCH --cpus-per-task=4 11 | #SBATCH --mem=16G 12 | #SBATCH --time=02:00:00 13 | #SBATCH --gres=gpu:v100:1 14 | #SBATCH --output=job_out_elect_power_evaluate3.txt 15 | #SBATCH --error=job_err_elect_power_evaluate3.txt 16 | 17 | module load gcc/8.3.0 cuda/10.1.168 18 | module load python-data 19 | 20 | # module purge 21 | # module load python-env/3.7.4-ml 22 | 23 | cd .. 24 | 25 | echo "Hola el patron!" 26 | 27 | pip3 install -r 'requirements.txt' -q --user 28 | 29 | # load the data 30 | python3 preprocess_power_system.py --dataset='elect' \ 31 | --data-folder='data' \ 32 | --model-name='power_system_3' \ 33 | --file-name='europe_power_system.csv' 34 | --test 35 | 36 | 37 | ## Train and test the rest of the horizons 38 | # python3 train.py --sampling \ 39 | # --dataset='elect' \ 40 | # --data-folder='power_system_3' \ 41 | # --model-name='power_system_3' \ 42 | # --restore-file='epoch_27' 43 | 44 | python3 evaluate.py --sampling \ 45 | --dataset='elect' \ 46 | --data-folder='power_system_3' \ 47 | --model-name='power_system_3' \ 48 | --save-file \ 49 | --restore-file='epoch_15' 50 | 51 | echo "Adios el patron!" 52 | 53 | # This script will print some usage statistics to the 54 | # end of the standard out file 55 | # Use that to improve your resource request estimate 56 | # on later jobs. 57 | seff $SLURM_JOBID 58 | -------------------------------------------------------------------------------- /DeepAR/batch jobs/batch_train_power36.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # created: Nov 24, 2019 5:07 PM 3 | # author: mashlakov 4 | 5 | #!/bin/bash 6 | #SBATCH --job-name=deepar_evaluate_power36 7 | #SBATCH --account=Project_2002244 8 | #SBATCH --partition=gpu 9 | #SBATCH --ntasks=1 10 | #SBATCH --cpus-per-task=4 11 | #SBATCH --mem=16G 12 | #SBATCH --time=01:00:00 13 | #SBATCH --gres=gpu:v100:1 14 | #SBATCH --output=job_out_evaluate_power36.txt 15 | #SBATCH --error=job_err_evaluate_power36.txt 16 | 17 | 18 | # module purge 19 | # module load python-env/3.7.4-ml 20 | 21 | module load gcc/8.3.0 cuda/10.1.168 22 | module load python-data 23 | 24 | cd .. 25 | 26 | echo "Hola el patron!" 27 | 28 | pip3 install -r 'requirements.txt' -q --user 29 | 30 | # load the data 31 | python3 preprocess_power_system.py --dataset='elect' \ 32 | --data-folder='data' \ 33 | --model-name='power_system_36' \ 34 | --file-name='europe_power_system.csv' \ 35 | --test 36 | 37 | ## Train and test the rest of the horizons 38 | # python3 train.py --sampling \ 39 | # --dataset='elect' \ 40 | # --data-folder='power_system_36' \ 41 | # --model-name='power_system_36' 42 | 43 | ## Evaluate the models 44 | python3 evaluate.py --sampling \ 45 | --dataset='elect' \ 46 | --data-folder='power_system_36' \ 47 | --model-name='power_system_36' \ 48 | --save-file \ 49 | --restore-file='best' 50 | 51 | echo "Adios el patron!" 52 | 53 | # This script will print some usage statistics to the 54 | # end of the standard out file 55 | # Use that to improve your resource request estimate 56 | # on later jobs. 57 | seff $SLURM_JOBID -------------------------------------------------------------------------------- /DeepAR/batch jobs/batch_train_power6.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # created: Nov 24, 2019 5:07 PM 3 | # author: mashlakov 4 | 5 | #!/bin/bash 6 | #SBATCH --job-name=deepar_train_power6_3 7 | #SBATCH --account=Project_2002244 8 | #SBATCH --partition=gpu 9 | #SBATCH --ntasks=1 10 | #SBATCH --cpus-per-task=4 11 | #SBATCH --mem=16G 12 | #SBATCH --time=01:00:00 13 | #SBATCH --gres=gpu:v100:1 14 | #SBATCH --output=job_out_elect_power_evaluate_6.txt 15 | #SBATCH --error=job_err_elect_power_evaluate_6.txt 16 | 17 | module load gcc/8.3.0 cuda/10.1.168 18 | module load python-data 19 | 20 | # module purge 21 | # module load python-env/3.7.4-ml 22 | 23 | cd .. 24 | 25 | echo "Hola el patron!" 26 | 27 | pip3 install -r 'requirements.txt' -q --user 28 | 29 | # load the data 30 | python3 preprocess_power_system.py --dataset='elect' \ 31 | --data-folder='data' \ 32 | --model-name='power_system_6' \ 33 | --file-name='europe_power_system.csv' \ 34 | --test 35 | 36 | 37 | ## Train and test the rest of the horizons 38 | # python3 train.py --sampling \ 39 | # --dataset='elect' \ 40 | # --data-folder='power_system_6' \ 41 | # --model-name='power_system_6' \ 42 | # --restore-file='epoch_26' 43 | 44 | python3 evaluate.py --sampling \ 45 | --dataset='elect' \ 46 | --data-folder='power_system_6' \ 47 | --model-name='power_system_6' \ 48 | --save-file \ 49 | --restore-file='epoch_45' 50 | 51 | echo "Adios el patron!" 52 | 53 | # This script will print some usage statistics to the 54 | # end of the standard out file 55 | # Use that to improve your resource request estimate 56 | # on later jobs. 57 | seff $SLURM_JOBID 58 | -------------------------------------------------------------------------------- /DeepAR/batch jobs/batch_train_power_load.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # created: Nov 24, 2019 5:07 PM 3 | # author: mashlakov 4 | 5 | #!/bin/bash 6 | #SBATCH --job-name=deepar_power36_load 7 | #SBATCH --account=Project_2002244 8 | #SBATCH --partition=gpu 9 | #SBATCH --ntasks=1 10 | #SBATCH --cpus-per-task=4 11 | #SBATCH --mem=16G 12 | #SBATCH --time=06:00:00 13 | #SBATCH --gres=gpu:v100:1 14 | #SBATCH --output=job_out_power36_load.txt 15 | #SBATCH --error=job_err_power36_load.txt 16 | 17 | 18 | # module purge 19 | # module load python-env/3.7.4-ml 20 | 21 | module load gcc/8.3.0 cuda/10.1.168 22 | module load python-data 23 | 24 | cd .. 25 | 26 | echo "Hola el patron!" 27 | 28 | pip3 install -r 'requirements.txt' -q --user 29 | 30 | # load the data 31 | python3 preprocess_power_system.py --dataset='elect' \ 32 | --data-folder='data' \ 33 | --model-name='power_system_load_36' \ 34 | --file-name='europe_power_system_load.csv' \ 35 | # --test \ 36 | 37 | # Train and test the rest of the horizons 38 | python3 train.py --sampling \ 39 | --dataset='elect' \ 40 | --data-folder='power_system_load_36' \ 41 | --model-name='power_system_load_36' 42 | 43 | # load the data 44 | python3 preprocess_power_system.py --dataset='elect' \ 45 | --data-folder='data' \ 46 | --model-name='power_system_load_36' \ 47 | --file-name='europe_power_system_load.csv' \ 48 | --test \ 49 | 50 | ## Evaluate the models 51 | python3 evaluate.py --sampling \ 52 | --dataset='elect' \ 53 | --data-folder='power_system_load_36' \ 54 | --model-name='power_system_load_36' \ 55 | --save-file \ 56 | --restore-file='best' 57 | 58 | echo "Adios el patron!" 59 | 60 | # This script will print some usage statistics to the 61 | # end of the standard out file 62 | # Use that to improve your resource request estimate 63 | # on later jobs. 64 | seff $SLURM_JOBID -------------------------------------------------------------------------------- /DeepAR/batch jobs/batch_train_power_non_exogeneous.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # created: Nov 24, 2019 5:07 PM 3 | # author: mashlakov 4 | 5 | #!/bin/bash 6 | #SBATCH --job-name=deepar_power36_nonex 7 | #SBATCH --account=Project_2002244 8 | #SBATCH --partition=gpu 9 | #SBATCH --ntasks=1 10 | #SBATCH --cpus-per-task=4 11 | #SBATCH --mem=16G 12 | #SBATCH --time=1-06:00:00 13 | #SBATCH --gres=gpu:v100:1 14 | #SBATCH --output=job_out_power36_nonex.txt 15 | #SBATCH --error=job_err_power36_nonex.txt 16 | 17 | 18 | # module purge 19 | # module load python-env/3.7.4-ml 20 | 21 | module load gcc/8.3.0 cuda/10.1.168 22 | module load python-data 23 | 24 | cd .. 25 | 26 | echo "Hola el patron!" 27 | 28 | pip3 install -r 'requirements.txt' -q --user 29 | 30 | # load the data 31 | python3 preprocess_power_system.py --dataset='elect' \ 32 | --data-folder='data' \ 33 | --model-name='power_system_non_exogeneous_36' \ 34 | --file-name='europe_power_system.csv' \ 35 | --num_covariates=1 36 | # --test \ 37 | 38 | 39 | echo "Training ..." 40 | 41 | 42 | # Train and test the rest of the horizons 43 | python3 train.py --sampling \ 44 | --dataset='elect' \ 45 | --data-folder='power_system_non_exogeneous_36' \ 46 | --model-name='power_system_non_exogeneous_36' 47 | 48 | echo "Loading the test data ..." 49 | 50 | 51 | # load the data 52 | python3 preprocess_power_system.py --dataset='elect' \ 53 | --data-folder='data' \ 54 | --model-name='power_system_non_exogeneous_36' \ 55 | --file-name='europe_power_system.csv' \ 56 | --test \ 57 | --num_covariates=1 58 | 59 | echo "Testing ..." 60 | 61 | ## Evaluate the models 62 | python3 evaluate.py --sampling \ 63 | --dataset='elect' \ 64 | --data-folder='power_system_non_exogeneous_36' \ 65 | --model-name='power_system_non_exogeneous_36' \ 66 | --save-file \ 67 | --restore-file='best' 68 | 69 | echo "Adios el patron!" 70 | 71 | # This script will print some usage statistics to the 72 | # end of the standard out file 73 | # Use that to improve your resource request estimate 74 | # on later jobs. 75 | seff $SLURM_JOBID -------------------------------------------------------------------------------- /DeepAR/batch jobs/batch_train_power_price.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # created: Nov 24, 2019 5:07 PM 3 | # author: mashlakov 4 | 5 | #!/bin/bash 6 | #SBATCH --job-name=dr_p36_price 7 | #SBATCH --account=Project_2002244 8 | #SBATCH --partition=gpu 9 | #SBATCH --ntasks=1 10 | #SBATCH --cpus-per-task=4 11 | #SBATCH --mem=16G 12 | #SBATCH --time=10:00:00 13 | #SBATCH --gres=gpu:v100:1 14 | #SBATCH --output=job_out_power36_price.txt 15 | #SBATCH --error=job_err_power36_price.txt 16 | 17 | 18 | # module purge 19 | # module load python-env/3.7.4-ml 20 | 21 | module load gcc/8.3.0 cuda/10.1.168 22 | module load python-data 23 | 24 | cd .. 25 | 26 | echo "Hola el patron!" 27 | 28 | pip3 install -r 'requirements.txt' -q --user 29 | 30 | # load the data 31 | python3 preprocess_power_system.py --dataset='elect' \ 32 | --data-folder='data' \ 33 | --model-name='power_system_price_36' \ 34 | --file-name='europe_power_system_price.csv' \ 35 | # --test \ 36 | 37 | # Train and test the rest of the horizons 38 | python3 train.py --sampling \ 39 | --dataset='elect' \ 40 | --data-folder='power_system_price_36' \ 41 | --model-name='power_system_price_36' 42 | 43 | # load the data 44 | python3 preprocess_power_system.py --dataset='elect' \ 45 | --data-folder='data' \ 46 | --model-name='power_system_price_36' \ 47 | --file-name='europe_power_system_price.csv' \ 48 | --test \ 49 | 50 | ## Evaluate the models 51 | python3 evaluate.py --sampling \ 52 | --dataset='elect' \ 53 | --data-folder='power_system_price_36' \ 54 | --model-name='power_system_price_36' \ 55 | --save-file \ 56 | --restore-file='best' 57 | 58 | echo "Adios el patron!" 59 | 60 | # This script will print some usage statistics to the 61 | # end of the standard out file 62 | # Use that to improve your resource request estimate 63 | # on later jobs. 64 | seff $SLURM_JOBID -------------------------------------------------------------------------------- /DeepAR/batch jobs/batch_train_power_solar.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # created: Nov 24, 2019 5:07 PM 3 | # author: mashlakov 4 | 5 | #!/bin/bash 6 | #SBATCH --job-name=dr_p36_solar 7 | #SBATCH --account=Project_2002244 8 | #SBATCH --partition=gpu 9 | #SBATCH --ntasks=1 10 | #SBATCH --cpus-per-task=4 11 | #SBATCH --mem=16G 12 | #SBATCH --time=10:00:00 13 | #SBATCH --gres=gpu:v100:1 14 | #SBATCH --output=job_out_power36_solar.txt 15 | #SBATCH --error=job_err_power36_solar.txt 16 | 17 | 18 | # module purge 19 | # module load python-env/3.7.4-ml 20 | 21 | module load gcc/8.3.0 cuda/10.1.168 22 | module load python-data 23 | 24 | cd .. 25 | 26 | echo "Hola el patron!" 27 | 28 | pip3 install -r 'requirements.txt' -q --user 29 | 30 | # load the data 31 | python3 preprocess_power_system.py --dataset='elect' \ 32 | --data-folder='data' \ 33 | --model-name='power_system_solar_36' \ 34 | --file-name='europe_power_system_solar.csv' \ 35 | # --test \ 36 | 37 | # Train and test the rest of the horizons 38 | python3 train.py --sampling \ 39 | --dataset='elect' \ 40 | --data-folder='power_system_solar_36' \ 41 | --model-name='power_system_solar_36' 42 | 43 | # load the data 44 | python3 preprocess_power_system.py --dataset='elect' \ 45 | --data-folder='data' \ 46 | --model-name='power_system_solar_36' \ 47 | --file-name='europe_power_system_solar.csv' \ 48 | --test \ 49 | 50 | ## Evaluate the models 51 | python3 evaluate.py --sampling \ 52 | --dataset='elect' \ 53 | --data-folder='power_system_solar_36' \ 54 | --model-name='power_system_solar_36' \ 55 | --save-file \ 56 | --restore-file='best' 57 | 58 | echo "Adios el patron!" 59 | 60 | # This script will print some usage statistics to the 61 | # end of the standard out file 62 | # Use that to improve your resource request estimate 63 | # on later jobs. 64 | seff $SLURM_JOBID -------------------------------------------------------------------------------- /DeepAR/batch jobs/batch_train_power_wind.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # created: Nov 24, 2019 5:07 PM 3 | # author: mashlakov 4 | 5 | #!/bin/bash 6 | #SBATCH --job-name=dr_p36_wind 7 | #SBATCH --account=Project_2002244 8 | #SBATCH --partition=gpu 9 | #SBATCH --ntasks=1 10 | #SBATCH --cpus-per-task=4 11 | #SBATCH --mem=16G 12 | #SBATCH --time=10:00:00 13 | #SBATCH --gres=gpu:v100:1 14 | #SBATCH --output=job_out_power36_wind.txt 15 | #SBATCH --error=job_err_power36_wind.txt 16 | 17 | 18 | # module purge 19 | # module load python-env/3.7.4-ml 20 | 21 | module load gcc/8.3.0 cuda/10.1.168 22 | module load python-data 23 | 24 | cd .. 25 | 26 | echo "Hola el patron!" 27 | 28 | pip3 install -r 'requirements.txt' -q --user 29 | 30 | # load the data 31 | python3 preprocess_power_system.py --dataset='elect' \ 32 | --data-folder='data' \ 33 | --model-name='power_system_wind_36' \ 34 | --file-name='europe_power_system_wind.csv' \ 35 | # --test \ 36 | 37 | # Train and test the rest of the horizons 38 | python3 train.py --sampling \ 39 | --dataset='elect' \ 40 | --data-folder='power_system_wind_36' \ 41 | --model-name='power_system_wind_36' 42 | 43 | # load the data 44 | python3 preprocess_power_system.py --dataset='elect' \ 45 | --data-folder='data' \ 46 | --model-name='power_system_wind_36' \ 47 | --file-name='europe_power_system_wind.csv' \ 48 | --test \ 49 | 50 | ## Evaluate the models 51 | python3 evaluate.py --sampling \ 52 | --dataset='elect' \ 53 | --data-folder='power_system_wind_36' \ 54 | --model-name='power_system_wind_36' \ 55 | --save-file \ 56 | --restore-file='best' 57 | 58 | echo "Adios el patron!" 59 | 60 | # This script will print some usage statistics to the 61 | # end of the standard out file 62 | # Use that to improve your resource request estimate 63 | # on later jobs. 64 | seff $SLURM_JOBID -------------------------------------------------------------------------------- /DeepAR/conda_env.yml: -------------------------------------------------------------------------------- 1 | name: python-data-3.7.3-1 2 | channels: 3 | - defaults 4 | - conda-forge 5 | dependencies: 6 | - hyperopt 7 | - pip 8 | - pip: 9 | - torch==0.4.1 10 | - torchvision==0.2.1 11 | - tqdm==4.26.0 12 | 13 | -------------------------------------------------------------------------------- /DeepAR/dataloader.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | import numpy as np 3 | import torch 4 | import os 5 | import logging 6 | from torch.utils.data import DataLoader, Dataset, Sampler 7 | 8 | logger = logging.getLogger('DeepAR.Data') 9 | 10 | class TrainDataset(Dataset): 11 | def __init__(self, data_path, data_name, num_class): 12 | self.data = np.load(os.path.join(data_path, f'train_data_{data_name}.npy')) 13 | self.label = np.load(os.path.join(data_path, f'train_label_{data_name}.npy')) 14 | self.train_len = self.data.shape[0] 15 | logger.info(f'train_len: {self.train_len}') 16 | logger.info(f'building datasets from {data_path}...') 17 | 18 | def __len__(self): 19 | return self.train_len 20 | 21 | def __getitem__(self, index): 22 | return (self.data[index,:,:-1],int(self.data[index,0,-1]), self.label[index]) 23 | 24 | class TestDataset(Dataset): 25 | def __init__(self, data_path, data_name, num_class): 26 | self.data = np.load(os.path.join(data_path, f'test_data_{data_name}.npy')) 27 | self.v = np.load(os.path.join(data_path, f'test_v_{data_name}.npy')) 28 | self.label = np.load(os.path.join(data_path, f'test_label_{data_name}.npy')) 29 | self.test_len = self.data.shape[0] 30 | logger.info(f'test_len: {self.test_len}') 31 | logger.info(f'building datasets from {data_path}...') 32 | 33 | def __len__(self): 34 | return self.test_len 35 | 36 | def __getitem__(self, index): 37 | return (self.data[index,:,:-1],int(self.data[index,0,-1]),self.v[index],self.label[index]) 38 | 39 | class WeightedSampler(Sampler): 40 | def __init__(self, data_path, data_name, replacement=True): 41 | v = np.load(os.path.join(data_path, f'train_v_{data_name}.npy')) 42 | self.weights = torch.as_tensor(np.abs(v[:,0])/np.sum(np.abs(v[:,0])), dtype=torch.double) 43 | logger.info(f'weights: {self.weights}') 44 | self.num_samples = self.weights.shape[0] 45 | logger.info(f'num samples: {self.num_samples}') 46 | self.replacement = replacement 47 | 48 | def __iter__(self): 49 | return iter(torch.multinomial(self.weights, self.num_samples, self.replacement).tolist()) 50 | 51 | def __len__(self): 52 | return self.num_samples -------------------------------------------------------------------------------- /DeepAR/experiments/base_model/best.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleksei-mashlakov/multivariate-deep-learning/e495ce3626fe2843364f4644d5d3e9ab69791b82/DeepAR/experiments/base_model/best.pth.tar -------------------------------------------------------------------------------- /DeepAR/experiments/base_model/metrics_test_best_weights.json: -------------------------------------------------------------------------------- 1 | { 2 | "ND": 0.06349205537841585, 3 | "RMSE": 0.4518856872267067, 4 | "test_loss": 5.225288391113281, 5 | "rou90": 0.03436976639371784, 6 | "rou50": 0.06343928840627182 7 | } -------------------------------------------------------------------------------- /DeepAR/experiments/base_model/params.json: -------------------------------------------------------------------------------- 1 | { 2 | "learning_rate": 1e-3, 3 | "batch_size": 64, 4 | "lstm_layers": 3, 5 | "num_epochs": 20, 6 | "train_window": 192, 7 | "test_window": 192, 8 | "predict_start": 168, 9 | "test_predict_start": 168, 10 | "predict_steps": 24, 11 | "num_class": 370, 12 | "cov_dim": 4, 13 | "lstm_hidden_dim": 40, 14 | "embedding_dim": 20, 15 | "sample_times": 200, 16 | "lstm_dropout": 0.1, 17 | "predict_batch": 256 18 | } 19 | -------------------------------------------------------------------------------- /DeepAR/experiments/param_search/params.json: -------------------------------------------------------------------------------- 1 | { 2 | "learning_rate": 1e-3, 3 | "batch_size": 64, 4 | "lstm_layers": 3, 5 | "num_epochs": 20, 6 | "train_window": 192, 7 | "test_window": 192, 8 | "predict_start": 168, 9 | "test_predict_start": 168, 10 | "predict_steps": 24, 11 | "num_class": 370, 12 | "cov_dim": 4, 13 | "lstm_hidden_dim": 40, 14 | "embedding_dim": 20, 15 | "sample_times": 200, 16 | "lstm_dropout": 0.1, 17 | "predict_batch": 256 18 | } 19 | -------------------------------------------------------------------------------- /DeepAR/model/LSTM.py: -------------------------------------------------------------------------------- 1 | ''' 2 | https://github.com/keitakurita/Better_LSTM_PyTorch/blob/master/better_lstm/model.py 3 | Add to net.py: self.lstm = LSTM(1+params.cov_dim+params.embedding_dim, params.lstm_hidden_dim, params.lstm_layers, bias = True, 4 | batch_first = False, dropout = params.lstm_dropout) 5 | ''' 6 | 7 | import torch 8 | import torch.nn as nn 9 | from torch.nn.utils.rnn import PackedSequence 10 | from typing import * 11 | 12 | 13 | class VariationalDropout(nn.Module): 14 | """ 15 | Applies the same dropout mask across the temporal dimension 16 | See https://arxiv.org/abs/1512.05287 for more details. 17 | Note that this is not applied to the recurrent activations in the LSTM like the above paper. 18 | Instead, it is applied to the inputs and outputs of the recurrent layer. 19 | """ 20 | def __init__(self, dropout: float, batch_first: Optional[bool]=False): 21 | super().__init__() 22 | self.dropout = dropout 23 | self.batch_first = batch_first 24 | 25 | def forward(self, x: torch.Tensor) -> torch.Tensor: 26 | if not self.training or self.dropout <= 0.: 27 | return x 28 | 29 | is_packed = isinstance(x, PackedSequence) 30 | if is_packed: 31 | x, batch_sizes = x 32 | max_batch_size = int(batch_sizes[0]) 33 | else: 34 | batch_sizes = None 35 | max_batch_size = x.size(0) 36 | 37 | # Drop same mask across entire sequence 38 | if self.batch_first: 39 | m = x.new_empty(max_batch_size, 1, x.size(2), requires_grad=False).bernoulli_(1 - self.dropout) 40 | else: 41 | m = x.new_empty(1, max_batch_size, x.size(2), requires_grad=False).bernoulli_(1 - self.dropout) 42 | x = x.masked_fill(m == 0, 0) / (1 - self.dropout) 43 | 44 | if is_packed: 45 | return PackedSequence(x, batch_sizes) 46 | else: 47 | return x 48 | 49 | class LSTM(nn.LSTM): 50 | def __init__(self, *args, dropouti: float=0., 51 | dropoutw: float=0., dropouto: float=0., 52 | batch_first=True, unit_forget_bias=True, **kwargs): 53 | super().__init__(*args, **kwargs, batch_first=batch_first) 54 | self.unit_forget_bias = unit_forget_bias 55 | self.dropoutw = dropoutw 56 | self.input_drop = VariationalDropout(dropouti, 57 | batch_first=batch_first) 58 | self.output_drop = VariationalDropout(dropouto, 59 | batch_first=batch_first) 60 | self._init_weights() 61 | 62 | def _init_weights(self): 63 | """ 64 | Use orthogonal init for recurrent layers, xavier uniform for input layers 65 | Bias is 0 except for forget gate 66 | """ 67 | for name, param in self.named_parameters(): 68 | if "weight_hh" in name: 69 | nn.init.orthogonal_(param.data) 70 | elif "weight_ih" in name: 71 | nn.init.xavier_uniform_(param.data) 72 | elif "bias" in name and self.unit_forget_bias: 73 | nn.init.zeros_(param.data) 74 | param.data[self.hidden_size:2 * self.hidden_size] = 1 75 | 76 | def _drop_weights(self): 77 | for name, param in self.named_parameters(): 78 | if "weight_hh" in name: 79 | getattr(self, name).data = \ 80 | torch.nn.functional.dropout(param.data, p=self.dropoutw, 81 | training=self.training).contiguous() 82 | 83 | def forward(self, input, hx=None): 84 | self._drop_weights() 85 | input = self.input_drop(input) 86 | seq, state = super().forward(input, hx=hx) 87 | return self.output_drop(seq), state -------------------------------------------------------------------------------- /DeepAR/pytorchtools.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import os 4 | 5 | class EarlyStopping: 6 | """Early stops the training if validation loss doesn't improve after a given patience.""" 7 | def __init__(self, patience=7, verbose=False, delta=0.0001, folder='./experiments'): 8 | """ 9 | Args: 10 | patience (int): How long to wait after last time validation loss improved. 11 | Default: 7 12 | verbose (bool): If True, prints a message for each validation loss improvement. 13 | Default: False 14 | delta (float): Minimum change in the monitored quantity to qualify as an improvement. 15 | Default: 0 16 | """ 17 | self.patience = patience 18 | self.verbose = verbose 19 | self.counter = 0 20 | self.best_score = None 21 | self.early_stop = False 22 | self.val_loss_min = np.Inf 23 | self.delta = delta 24 | self.folder = folder 25 | 26 | def __call__(self, val_loss, model): 27 | 28 | score = val_loss 29 | 30 | if self.best_score is None: 31 | self.best_score = score 32 | self.save_checkpoint(val_loss, model) 33 | elif score < self.best_score + self.delta: 34 | self.counter += 1 35 | print(f'EarlyStopping counter: {self.counter} out of {self.patience}') 36 | if self.counter >= self.patience: 37 | self.early_stop = True 38 | else: 39 | self.best_score = score 40 | self.save_checkpoint(val_loss, model) 41 | self.counter = 0 42 | 43 | def save_checkpoint(self, val_loss, model): 44 | '''Saves model when validation loss decrease.''' 45 | if self.verbose: 46 | print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...') 47 | torch.save(model.state_dict(), os.path.join(self.folder, 'checkpoint.pt')) 48 | self.val_loss_min = val_loss -------------------------------------------------------------------------------- /DeepAR/requirements.txt: -------------------------------------------------------------------------------- 1 | #numpy==1.16.0 2 | #pandas==0.25.0 3 | #scipy==1.1.0 4 | #torch==0.4.1 5 | torch==0.4.1.post2 6 | torchvision==0.2.1 7 | tqdm==4.26.0 8 | #mlflow 9 | #matplotlib==3.1.1 10 | #pickle 11 | #json -------------------------------------------------------------------------------- /DeepTCN/README.md: -------------------------------------------------------------------------------- 1 | #DeepTCN 2 | 3 | ## Modifications to the initial implementation by https://github.com/oneday88/deepTCN: 4 | - hyper-parameter optimization in ec_probabilistic_hpo.py file with [hyperopt](https://github.com/hyperopt/hyperopt) 5 | - early stopping for training in nnTrainer.py with pytorchtools.py 6 | - ec_feature_preprocess_nonexogeneous.py file 7 | - ec_probabilistic_forecasting_evaluate.py file 8 | - ArgumentParser was added 9 | 10 | To run the experiment please see [batch jobs](batch_jobs) folder. 11 | 12 | ----------------------------------------------------------------- 13 | 14 | ### Probabilistic Forecasting with Temporal Convolutional Neural Network 15 | Source codes for the paper "probabilistic forecasting with temporal convolutional neural network" 16 | #### Electricity 17 | ##### Data preprocessing 18 | * Download the dataset from UCI Machine Learning Repository: https://archive.ics.uci.edu/ml/datasets/ElectricityLoadDiagrams20112014 19 | * Run "R CMD BATCH electricity/basicPreprocess.R" to generate "modelData.csv" for model training. 20 | ##### Point forecasting 21 | * python3 electricity/ecPointModel/ec_feature_preprocess.py 22 | * python3 electricity/ecPointModel/ECPointHuber.py 23 | ##### Probabilistic forecasting 24 | * python3 electricity/NewTCNQuantile/ec_feature_preprocess.py 25 | * python3 electricity/NewTCNQuantile/ec_probabilistic_forecasting.py 26 | ### Traffic 27 | ##### Data preprocessing 28 | * Download the dataset from UCI Machine Learning Repository: https://archive.ics.uci.edu/ml/datasets/PEMS-SF 29 | * Run "R CMD BATCH traffic/basicPreprocess.R" to generate "traffic.csv". 30 | * python3 traffic/traffic_feature_preprocess.py to generate the "tensor_prepare.pkl" for the model training 31 | ##### Point forecasting 32 | * python3 traffic/point/traffic_point_forecasting.py 33 | ##### Quantile regression 34 | * python3 traffic/quantile/traffic_quantile_forecasting.py 35 | 36 | ##### Probabilistic forecasting 37 | ### Parts 38 | ##### Data preprocessing 39 | ##### Probabilistic forecasting 40 | 41 | ### Reference Paper 42 | [Probabilistic forecasting with temporal convolutional neural network](https://arxiv.org/abs/1906.04397) 43 | 44 | KDD 2019 ,Workshop on Mining and Learning from Time Series, 2019 45 | 46 | ### Kind remind 47 | The total project will be refined in the next months. Also, you can achieve better results if you do better data preprocessing like scaling. 48 | -------------------------------------------------------------------------------- /DeepTCN/batch_jobs/batch_job_tcn_elect_12.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=tcn_12 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=4G 8 | #SBATCH --time=06:00:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_elect_12_2.txt 11 | #SBATCH --error=job_err_elect_12_2.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | #module load python-data 15 | module load mxnet 16 | 17 | 18 | cd ../electricity/NewTCNQuantile 19 | 20 | #echo "Installing requirements ..." 21 | #pip install -r 'requirements.txt' 22 | #--user -q --no-cache-dir 23 | #echo "Requirements installed." 24 | 25 | echo "Loading data ... " 26 | python3 ec_feature_preprocess.py --data-folder='data' \ 27 | --file-name='electricity.csv' \ 28 | --pickle-name='electricity_12_2.pkl' \ 29 | --horizon=12 30 | echo "Data loaded." 31 | 32 | echo "Start running ... " 33 | python3 ec_probabilistic_forecasting.py --data-folder='data' \ 34 | --model-name='elect_12_2.pkl' \ 35 | --pickle-name='electricity_12_2.pkl' \ 36 | --dim=321 \ 37 | --horizon=12 \ 38 | --gpu 39 | echo "Finished running!" 40 | 41 | 42 | echo "Loading data ... " 43 | python3 ec_feature_preprocess.py --data-folder='data' \ 44 | --file-name='electricity.csv' \ 45 | --pickle-name='electricity_12_2.pkl' \ 46 | --horizon=12 \ 47 | --test 48 | echo "Data loaded." 49 | 50 | echo "Start running ... " 51 | python3 ec_probabilistic_forecasting_evaluate.py --data-folder='data' \ 52 | --model-name='elect_12_2.pkl' \ 53 | --pickle-name='electricity_12_2.pkl' \ 54 | --dim=321 \ 55 | --horizon=12 \ 56 | --gpu 57 | echo "Finished running!" 58 | 59 | seff $SLURM_JOBID 60 | -------------------------------------------------------------------------------- /DeepTCN/batch_jobs/batch_job_tcn_elect_24.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=tcn_24 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=4G 8 | #SBATCH --time=06:00:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_elect_24.txt 11 | #SBATCH --error=job_err_elect_24.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | #module load python-data 15 | module load mxnet 16 | 17 | 18 | cd ../electricity/NewTCNQuantile 19 | 20 | #echo "Installing requirements ..." 21 | #pip install -r 'requirements.txt' 22 | #--user -q --no-cache-dir 23 | #echo "Requirements installed." 24 | 25 | echo "Loading data ... " 26 | python3 ec_feature_preprocess.py --data-folder='data' \ 27 | --file-name='electricity.csv' \ 28 | --pickle-name='electricity_24.pkl' \ 29 | --horizon=24 30 | #--test 31 | 32 | echo "Data loaded." 33 | 34 | echo "Start running ... " 35 | python3 ec_probabilistic_forecasting.py --data-folder='data' \ 36 | --model-name='elect_24.pkl' \ 37 | --pickle-name='electricity_24.pkl' \ 38 | --dim=321 \ 39 | --horizon=24 \ 40 | --gpu 41 | echo "Finished running!" 42 | 43 | seff $SLURM_JOBID 44 | -------------------------------------------------------------------------------- /DeepTCN/batch_jobs/batch_job_tcn_elect_3.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=tcn_3 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=4G 8 | #SBATCH --time=10:00:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_elect_3_2.txt 11 | #SBATCH --error=job_err_elect_3_2.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | #module load python-data 15 | module load mxnet 16 | 17 | 18 | cd ../electricity/NewTCNQuantile 19 | 20 | #echo "Installing requirements ..." 21 | #pip install -r 'requirements.txt' 22 | #--user -q --no-cache-dir 23 | #echo "Requirements installed." 24 | 25 | echo "Loading data ... " 26 | python3 ec_feature_preprocess.py --data-folder='data' \ 27 | --file-name='electricity.csv' \ 28 | --pickle-name='electricity_3_2.pkl' \ 29 | --horizon=3 30 | echo "Data loaded." 31 | 32 | echo "Start running ... " 33 | python3 ec_probabilistic_forecasting.py --data-folder='data' \ 34 | --model-name='elect_3_2.pkl' \ 35 | --pickle-name='electricity_3_2.pkl' \ 36 | --dim=321 \ 37 | --horizon=3 \ 38 | --gpu 39 | echo "Finished running!" 40 | 41 | 42 | echo "Loading data ... " 43 | python3 ec_feature_preprocess.py --data-folder='data' \ 44 | --file-name='electricity.csv' \ 45 | --pickle-name='electricity_3_2.pkl' \ 46 | --horizon=3 \ 47 | --test 48 | echo "Data loaded." 49 | 50 | echo "Start running ... " 51 | python3 ec_probabilistic_forecasting_evaluate.py --data-folder='data' \ 52 | --model-name='elect_3_2.pkl' \ 53 | --pickle-name='electricity_3_2.pkl' \ 54 | --dim=321 \ 55 | --horizon=3 \ 56 | --gpu 57 | echo "Finished running!" 58 | 59 | 60 | 61 | seff $SLURM_JOBID 62 | -------------------------------------------------------------------------------- /DeepTCN/batch_jobs/batch_job_tcn_elect_6.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=tcn_6 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=4G 8 | #SBATCH --time=08:00:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_elect_6_2.txt 11 | #SBATCH --error=job_err_elect_6_2.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | #module load python-data 15 | module load mxnet 16 | 17 | 18 | cd ../electricity/NewTCNQuantile 19 | 20 | #echo "Installing requirements ..." 21 | #pip install -r 'requirements.txt' 22 | #--user -q --no-cache-dir 23 | #echo "Requirements installed." 24 | 25 | echo "Loading data ... " 26 | python3 ec_feature_preprocess.py --data-folder='data' \ 27 | --file-name='electricity.csv' \ 28 | --pickle-name='electricity_6_2.pkl' \ 29 | --horizon=6 30 | echo "Data loaded." 31 | 32 | echo "Start running ... " 33 | python3 ec_probabilistic_forecasting.py --data-folder='data' \ 34 | --model-name='elect_6_2.pkl' \ 35 | --pickle-name='electricity_6_2.pkl' \ 36 | --dim=321 \ 37 | --horizon=6 \ 38 | --gpu 39 | echo "Finished running!" 40 | 41 | echo "Loading data ... " 42 | python3 ec_feature_preprocess.py --data-folder='data' \ 43 | --file-name='electricity.csv' \ 44 | --pickle-name='electricity_6_2.pkl' \ 45 | --horizon=6 \ 46 | --test 47 | echo "Data loaded." 48 | 49 | echo "Start running ... " 50 | python3 ec_probabilistic_forecasting_evaluate.py --data-folder='data' \ 51 | --model-name='elect_6_2.pkl' \ 52 | --pickle-name='electricity_6_2.pkl' \ 53 | --dim=321 \ 54 | --horizon=6 \ 55 | --gpu 56 | echo "Finished running!" 57 | 58 | 59 | seff $SLURM_JOBID 60 | -------------------------------------------------------------------------------- /DeepTCN/batch_jobs/batch_job_tcn_elect_hop_36_evaluate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=tcn_e36_evaluate 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=4G 8 | #SBATCH --time=06:00:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_elect_hop_36_train_evaluate.txt 11 | #SBATCH --error=job_err_elect_hop_36_train_evaluate.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | #module load python-data 15 | module load mxnet 16 | 17 | 18 | cd ../electricity/NewTCNQuantile 19 | 20 | #echo "Installing requirements ..." 21 | #pip install -r 'requirements.txt' 22 | #--user -q --no-cache-dir 23 | #echo "Requirements installed." 24 | 25 | gpuseff $SLURM_JOBID 26 | 27 | 28 | echo "Loading data for training ... " 29 | 30 | python3 ec_feature_preprocess.py --data-folder='data' \ 31 | --file-name='electricity.csv' \ 32 | --pickle-name='electricity_hop_36.pkl' \ 33 | --horizon=36 34 | echo "Training data loaded." 35 | 36 | echo "Start running training... " 37 | # Optimization parameters {'batch_size': 1024, 'dropout': 0.2, 'learning_rate': 0.01, 'units': 128} 38 | 39 | python3 ec_probabilistic_forecasting.py --data-folder='data' \ 40 | --model-name='elect_hop_36.pkl' \ 41 | --pickle-name='electricity_hop_36.pkl' \ 42 | --dim=321 \ 43 | --horizon=36 \ 44 | --gpu \ 45 | --batch_size=1024 \ 46 | --units=128 \ 47 | --learning_rate=0.01 \ 48 | --dropout=0.2 49 | echo "Finished training!" 50 | 51 | 52 | echo "Loading testing data ... " 53 | python3 ec_feature_preprocess.py --data-folder='data' \ 54 | --file-name='electricity.csv' \ 55 | --pickle-name='electricity_hop_36.pkl' \ 56 | --horizon=36 \ 57 | --test 58 | 59 | echo "Testing data loaded." 60 | 61 | echo "Start running testing... " 62 | 63 | python3 ec_probabilistic_forecasting_evaluate.py --data-folder='data' \ 64 | --model-name='elect_hop_36.pkl' \ 65 | --pickle-name='electricity_hop_36.pkl' \ 66 | --dim=321 \ 67 | --horizon=36 \ 68 | --gpu 69 | echo "Finished running!" 70 | 71 | seff $SLURM_JOBID 72 | -------------------------------------------------------------------------------- /DeepTCN/batch_jobs/batch_job_tcn_hop.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=tcn_hop 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=4G 8 | #SBATCH --time=10:00:00 9 | #SBATCH --gres=gpu:v100:1,nvme:32 10 | #SBATCH --output=job_out_ep.txt 11 | #SBATCH --error=job_err_ep.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | module load python-data 15 | module load mxnet 16 | module list 17 | 18 | #cd DeepAR 19 | 20 | cd ../electricity/NewTCNQuantile 21 | 22 | #echo "Installing requirements ..." 23 | #pip install -r 'requirements.txt' 24 | #--user -q --no-cache-dir 25 | #echo "Requirements installed." 26 | 27 | echo "Loading data ... " 28 | #srun python3 ec_feature_preprocess_custom.py 29 | echo "Data loaded." 30 | 31 | echo "Start running ... " 32 | srun python3 ECPointHuber_HOP.py --dataset="feature_prepare_new2.pkl" \ 33 | --save-folder="./save" \ 34 | --save-file="hop_train_results.csv" \ 35 | --hop-file="hyper_parameter_search.pkl" \ 36 | --evals=100 \ 37 | --gpu 38 | echo "Finished running!" 39 | 40 | seff $SLURM_JOBID 41 | -------------------------------------------------------------------------------- /DeepTCN/batch_jobs/batch_job_tcn_hop_elect_36.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=tcn_e36 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=4G 8 | #SBATCH --time=03:00:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_elect_hop_36_4.txt 11 | #SBATCH --error=job_err_elect_hop_36_4.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | #module load python-data 15 | module load mxnet 16 | 17 | 18 | cd ../electricity/NewTCNQuantile 19 | 20 | #echo "Installing requirements ..." 21 | #pip install -r 'requirements.txt' 22 | #--user -q --no-cache-dir 23 | #echo "Requirements installed." 24 | 25 | gpuseff $SLURM_JOBID 26 | 27 | echo "Loading data ... " 28 | python3 ec_feature_preprocess.py --data-folder='data' \ 29 | --file-name='electricity.csv' \ 30 | --pickle-name='electricity_hop_36_3.pkl' \ 31 | --horizon=36 \ 32 | --hop 33 | echo "Data loaded." 34 | 35 | echo "Start running ... " 36 | 37 | python3 ec_probabilistic_hpo.py --data-folder='data' \ 38 | --model-name='elect_hop_36_3.pkl' \ 39 | --pickle-name='electricity_hop_36_3.pkl' \ 40 | --dim=321 \ 41 | --horizon=36 \ 42 | --save-folder="./save" \ 43 | --save-file="elect_hop_36_3_results.csv" \ 44 | --hop-file="elect_hop_hyper_parameter_search_3.pkl" \ 45 | --evals=100 \ 46 | --epochs=100 \ 47 | --patience=5 \ 48 | --gpu 49 | echo "Finished running!" 50 | 51 | 52 | 53 | echo "Loading data for training ... " 54 | 55 | python3 ec_feature_preprocess.py --data-folder='data' \ 56 | --file-name='electricity.csv' \ 57 | --pickle-name='electricity_hop_36_3.pkl' \ 58 | --horizon=36 59 | echo "Training data loaded." 60 | 61 | echo "Start running training... " 62 | 63 | #{'batch_size': 1024.0, 'dropout': 0.2, 'learning_rate': 0.01, 'units': 256.0} 64 | 65 | python3 ec_probabilistic_forecasting.py --data-folder='data' \ 66 | --model-name='elect_hop_36_3.pkl' \ 67 | --pickle-name='electricity_hop_36_3.pkl' \ 68 | --dim=321 \ 69 | --horizon=36 \ 70 | --gpu \ 71 | --batch_size=1024 \ 72 | --units=256 \ 73 | --learning_rate=0.01 \ 74 | --dropout=0.2 75 | echo "Finished training!" 76 | 77 | 78 | echo "Loading testing data ... " 79 | python3 ec_feature_preprocess.py --data-folder='data' \ 80 | --file-name='electricity.csv' \ 81 | --pickle-name='electricity_hop_36_3.pkl' \ 82 | --horizon=36 \ 83 | --test 84 | 85 | echo "Testing data loaded." 86 | 87 | echo "Start running testing... " 88 | 89 | python3 ec_probabilistic_forecasting_evaluate.py --data-folder='data' \ 90 | --model-name='elect_hop_36_3.pkl' \ 91 | --pickle-name='electricity_hop_36_3.pkl' \ 92 | --dim=321 \ 93 | --horizon=36 \ 94 | --gpu 95 | echo "Finished running!" 96 | 97 | 98 | 99 | seff $SLURM_JOBID 100 | -------------------------------------------------------------------------------- /DeepTCN/batch_jobs/batch_job_tcn_power_12.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=tcn_p12 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=4G 8 | #SBATCH --time=04:00:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_power_12_3.txt 11 | #SBATCH --error=job_err_power_12_3.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | #module load python-data 15 | module load mxnet 16 | 17 | 18 | cd ../electricity/NewTCNQuantile 19 | 20 | #echo "Installing requirements ..." 21 | #pip install -r 'requirements.txt' 22 | #--user -q --no-cache-dir 23 | #echo "Requirements installed." 24 | 25 | echo "Loading data ... " 26 | python3 ec_feature_preprocess.py --data-folder='data' \ 27 | --file-name='europe_power_system.csv' \ 28 | --pickle-name='power_system_12_3.pkl' \ 29 | --horizon=12 30 | echo "Data loaded." 31 | 32 | echo "Start running ... " 33 | python3 ec_probabilistic_forecasting.py --data-folder='data' \ 34 | --model-name='power_12_3.pkl' \ 35 | --pickle-name='power_system_12_3.pkl' \ 36 | --dim=183 \ 37 | --horizon=12 \ 38 | --patience=25 \ 39 | --gpu 40 | 41 | 42 | echo "Finished running training!" 43 | 44 | echo "Loading data ... " 45 | python3 ec_feature_preprocess.py --data-folder='data' \ 46 | --file-name='europe_power_system.csv' \ 47 | --pickle-name='power_system_12_3.pkl' \ 48 | --horizon=12 \ 49 | --test 50 | echo "Data loaded." 51 | 52 | echo "Start running ... " 53 | python3 ec_probabilistic_forecasting_evaluate.py --data-folder='data' \ 54 | --model-name='power_12_3.pkl' \ 55 | --pickle-name='power_system_12_3.pkl' \ 56 | --dim=183 \ 57 | --horizon=12 \ 58 | --gpu 59 | 60 | echo "Finished running!" 61 | 62 | seff $SLURM_JOBID 63 | -------------------------------------------------------------------------------- /DeepTCN/batch_jobs/batch_job_tcn_power_24.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=tcn_p24 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=4G 8 | #SBATCH --time=04:00:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_power_24_3.txt 11 | #SBATCH --error=job_err_power_24_3.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | #module load python-data 15 | module load mxnet 16 | 17 | 18 | cd ../electricity/NewTCNQuantile 19 | 20 | #echo "Installing requirements ..." 21 | #pip install -r 'requirements.txt' 22 | #--user -q --no-cache-dir 23 | #echo "Requirements installed." 24 | 25 | echo "Loading data ... " 26 | python3 ec_feature_preprocess.py --data-folder='data' \ 27 | --file-name='europe_power_system.csv' \ 28 | --pickle-name='power_system_24_3.pkl' \ 29 | --horizon=24 30 | echo "Data loaded." 31 | 32 | echo "Start running ... " 33 | python3 ec_probabilistic_forecasting.py --data-folder='data' \ 34 | --model-name='power_24_3.pkl' \ 35 | --pickle-name='power_system_24_3.pkl' \ 36 | --dim=183 \ 37 | --horizon=24 \ 38 | --patience=25 \ 39 | --gpu 40 | 41 | 42 | echo "Finished running training!" 43 | 44 | echo "Loading data ... " 45 | python3 ec_feature_preprocess.py --data-folder='data' \ 46 | --file-name='europe_power_system.csv' \ 47 | --pickle-name='power_system_24_3.pkl' \ 48 | --horizon=24 \ 49 | --test 50 | echo "Data loaded." 51 | 52 | echo "Start running ... " 53 | python3 ec_probabilistic_forecasting_evaluate.py --data-folder='data' \ 54 | --model-name='power_24_3.pkl' \ 55 | --pickle-name='power_system_24_3.pkl' \ 56 | --dim=183 \ 57 | --horizon=24 \ 58 | --gpu 59 | 60 | echo "Finished running!" 61 | 62 | seff $SLURM_JOBID 63 | -------------------------------------------------------------------------------- /DeepTCN/batch_jobs/batch_job_tcn_power_3.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=tcn_p3 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=4G 8 | #SBATCH --time=04:00:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_power_33.txt 11 | #SBATCH --error=job_err_power_33.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | #module load python-data 15 | module load mxnet 16 | 17 | 18 | cd ../electricity/NewTCNQuantile 19 | 20 | #echo "Installing requirements ..." 21 | #pip install -r 'requirements.txt' 22 | #--user -q --no-cache-dir 23 | #echo "Requirements installed." 24 | 25 | echo "Loading data ... " 26 | python3 ec_feature_preprocess.py --data-folder='data' \ 27 | --file-name='europe_power_system.csv' \ 28 | --pickle-name='power_system_3_3.pkl' \ 29 | --horizon=3 30 | echo "Data loaded." 31 | 32 | echo "Start running ... " 33 | python3 ec_probabilistic_forecasting.py --data-folder='data' \ 34 | --model-name='power_3_3.pkl' \ 35 | --pickle-name='power_system_3_3.pkl' \ 36 | --dim=183 \ 37 | --horizon=3 \ 38 | --patience=50 \ 39 | --gpu 40 | echo "Finished running training!" 41 | 42 | echo "Loading data ... " 43 | python3 ec_feature_preprocess.py --data-folder='data' \ 44 | --file-name='europe_power_system.csv' \ 45 | --pickle-name='power_system_3_3.pkl' \ 46 | --horizon=3 \ 47 | --test 48 | echo "Data loaded." 49 | 50 | echo "Start running ... " 51 | python3 ec_probabilistic_forecasting_evaluate.py --data-folder='data' \ 52 | --model-name='power_3_3.pkl' \ 53 | --pickle-name='power_system_3_3.pkl' \ 54 | --dim=183 \ 55 | --horizon=3 \ 56 | --gpu 57 | 58 | 59 | seff $SLURM_JOBID 60 | -------------------------------------------------------------------------------- /DeepTCN/batch_jobs/batch_job_tcn_power_36.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=tcn_p36 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=4G 8 | #SBATCH --time=06:00:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_power_36.txt 11 | #SBATCH --error=job_err_power_36.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | #module load python-data 15 | module load mxnet 16 | 17 | 18 | cd ../electricity/NewTCNQuantile 19 | 20 | #echo "Installing requirements ..." 21 | #pip install -r 'requirements.txt' 22 | #--user -q --no-cache-dir 23 | #echo "Requirements installed." 24 | 25 | echo "Loading data ... " 26 | python3 ec_feature_preprocess.py --data-folder='data' \ 27 | --file-name='europe_power_system.csv' \ 28 | --pickle-name='power_system_36.pkl' \ 29 | --horizon=36 30 | echo "Data loaded." 31 | 32 | echo "Start running ... " 33 | python3 ec_probabilistic_forecasting.py --data-folder='data' \ 34 | --model-name='power_36.pkl' \ 35 | --pickle-name='power_system_36.pkl' \ 36 | --dim=183 \ 37 | --horizon=36 \ 38 | --gpu 39 | echo "Finished running!" 40 | 41 | seff $SLURM_JOBID 42 | -------------------------------------------------------------------------------- /DeepTCN/batch_jobs/batch_job_tcn_power_36_load.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=tcn_p_load_36 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=4G 8 | #SBATCH --time=02:00:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_power_load_36.txt 11 | #SBATCH --error=job_err_power_load_36.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | #module load python-data 15 | module load mxnet 16 | 17 | 18 | cd ../electricity/NewTCNQuantile 19 | 20 | #echo "Installing requirements ..." 21 | #pip install -r 'requirements.txt' 22 | #--user -q --no-cache-dir 23 | #echo "Requirements installed." 24 | 25 | echo "Loading data ... " 26 | python3 ec_feature_preprocess.py --data-folder='data' \ 27 | --file-name='europe_power_system_load.csv' \ 28 | --pickle-name='power_system_load_36.pkl' \ 29 | --horizon=36 30 | echo "Data loaded." 31 | 32 | echo "Start running ... " 33 | python3 ec_probabilistic_forecasting.py --data-folder='data' \ 34 | --model-name='power_load_36.pkl' \ 35 | --pickle-name='power_system_load_36.pkl' \ 36 | --dim=59 \ 37 | --horizon=36 \ 38 | --patience=25 \ 39 | --gpu 40 | 41 | 42 | echo "Finished running training!" 43 | 44 | echo "Loading data ... " 45 | python3 ec_feature_preprocess.py --data-folder='data' \ 46 | --file-name='europe_power_system_load.csv' \ 47 | --pickle-name='power_system_load_36.pkl' \ 48 | --horizon=36 \ 49 | --test 50 | echo "Data loaded." 51 | 52 | echo "Start running ... " 53 | python3 ec_probabilistic_forecasting_evaluate.py --data-folder='data' \ 54 | --model-name='power_load_36.pkl' \ 55 | --pickle-name='power_system_load_36.pkl' \ 56 | --dim=59 \ 57 | --horizon=36 \ 58 | --gpu 59 | 60 | echo "Finished running!" 61 | 62 | seff $SLURM_JOBID 63 | -------------------------------------------------------------------------------- /DeepTCN/batch_jobs/batch_job_tcn_power_36_nonexogeneous.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=tcn_p_nonex_36 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=4G 8 | #SBATCH --time=02:00:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_power_nonex_36_test.txt 11 | #SBATCH --error=job_err_power_nonex_36_test.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | #module load python-data 15 | module load mxnet 16 | 17 | 18 | cd ../electricity/NewTCNQuantile 19 | 20 | #echo "Installing requirements ..." 21 | #pip install -r 'requirements.txt' 22 | #--user -q --no-cache-dir 23 | #echo "Requirements installed." 24 | 25 | gpuseff $SLURM_JOBID 26 | 27 | echo "Loading data ... " 28 | python3 ec_feature_preprocess_nonexogeneous.py --data-folder='data' \ 29 | --file-name='europe_power_system.csv' \ 30 | --pickle-name='power_system_nonex_36.pkl' \ 31 | --horizon=36 32 | echo "Data loaded." 33 | 34 | echo "Start running ... " 35 | python3 ec_probabilistic_forecasting.py --data-folder='data' \ 36 | --model-name='power_nonex_36.pkl' \ 37 | --pickle-name='power_system_nonex_36.pkl' \ 38 | --dim=183 \ 39 | --horizon=36 \ 40 | --patience=25 \ 41 | --gpu 42 | 43 | 44 | echo "Finished running training!" 45 | 46 | echo "Loading data ... " 47 | python3 ec_feature_preprocess_nonexogeneous.py --data-folder='data' \ 48 | --file-name='europe_power_system.csv' \ 49 | --pickle-name='power_system_nonex_36.pkl' \ 50 | --horizon=36 \ 51 | --test 52 | echo "Data loaded." 53 | 54 | echo "Start running ... " 55 | python3 ec_probabilistic_forecasting_evaluate.py --data-folder='data' \ 56 | --model-name='power_nonex_36.pkl' \ 57 | --pickle-name='power_system_nonex_36.pkl' \ 58 | --dim=183 \ 59 | --horizon=36 \ 60 | --gpu 61 | 62 | echo "Finished running!" 63 | -------------------------------------------------------------------------------- /DeepTCN/batch_jobs/batch_job_tcn_power_36_price.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=tcn_p_price_36 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=4G 8 | #SBATCH --time=02:00:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_power_price_36.txt 11 | #SBATCH --error=job_err_power_price_36.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | #module load python-data 15 | module load mxnet 16 | 17 | 18 | cd ../electricity/NewTCNQuantile 19 | 20 | #echo "Installing requirements ..." 21 | #pip install -r 'requirements.txt' 22 | #--user -q --no-cache-dir 23 | #echo "Requirements installed." 24 | 25 | echo "Loading data ... " 26 | python3 ec_feature_preprocess.py --data-folder='data' \ 27 | --file-name='europe_power_system_price.csv' \ 28 | --pickle-name='power_system_price_36.pkl' \ 29 | --horizon=36 30 | echo "Data loaded." 31 | 32 | echo "Start running ... " 33 | python3 ec_probabilistic_forecasting.py --data-folder='data' \ 34 | --model-name='power_price_36.pkl' \ 35 | --pickle-name='power_system_price_36.pkl' \ 36 | --dim=31 \ 37 | --horizon=36 \ 38 | --patience=25 \ 39 | --gpu 40 | 41 | 42 | echo "Finished running training!" 43 | 44 | echo "Loading data ... " 45 | python3 ec_feature_preprocess.py --data-folder='data' \ 46 | --file-name='europe_power_system_price.csv' \ 47 | --pickle-name='power_system_price_36.pkl' \ 48 | --horizon=36 \ 49 | --test 50 | echo "Data loaded." 51 | 52 | echo "Start running ... " 53 | python3 ec_probabilistic_forecasting_evaluate.py --data-folder='data' \ 54 | --model-name='power_price_36.pkl' \ 55 | --pickle-name='power_system_price_36.pkl' \ 56 | --dim=31 \ 57 | --horizon=36 \ 58 | --gpu 59 | 60 | echo "Finished running!" 61 | 62 | seff $SLURM_JOBID 63 | -------------------------------------------------------------------------------- /DeepTCN/batch_jobs/batch_job_tcn_power_36_solar.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=tcn_p_solar_36 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=4G 8 | #SBATCH --time=02:00:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_power_solar_36_new.txt 11 | #SBATCH --error=job_err_power_solar_36_new.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | #module load python-data 15 | module load mxnet 16 | 17 | 18 | cd ../electricity/NewTCNQuantile 19 | 20 | #echo "Installing requirements ..." 21 | #pip install -r 'requirements.txt' 22 | #--user -q --no-cache-dir 23 | #echo "Requirements installed." 24 | 25 | echo "Loading data ... " 26 | python3 ec_feature_preprocess.py --data-folder='data' \ 27 | --file-name='europe_power_system_solar.csv' \ 28 | --pickle-name='power_system_solar_36.pkl' \ 29 | --horizon=36 30 | echo "Data loaded." 31 | 32 | echo "Start running ... " 33 | python3 ec_probabilistic_forecasting.py --data-folder='data' \ 34 | --model-name='power_solar_36.pkl' \ 35 | --pickle-name='power_system_solar_36.pkl' \ 36 | --dim=36 \ 37 | --horizon=36 \ 38 | --patience=25 \ 39 | --gpu 40 | 41 | 42 | echo "Finished running training!" 43 | 44 | echo "Loading data ... " 45 | python3 ec_feature_preprocess.py --data-folder='data' \ 46 | --file-name='europe_power_system_solar.csv' \ 47 | --pickle-name='power_system_solar_36.pkl' \ 48 | --horizon=36 \ 49 | --test 50 | echo "Data loaded." 51 | 52 | echo "Start running ... " 53 | python3 ec_probabilistic_forecasting_evaluate.py --data-folder='data' \ 54 | --model-name='power_solar_36.pkl' \ 55 | --pickle-name='power_system_solar_36.pkl' \ 56 | --dim=36 \ 57 | --horizon=36 \ 58 | --gpu 59 | 60 | echo "Finished running!" 61 | 62 | seff $SLURM_JOBID 63 | -------------------------------------------------------------------------------- /DeepTCN/batch_jobs/batch_job_tcn_power_36_wind.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=tcn_p_wind_36 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=4G 8 | #SBATCH --time=02:00:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_power_wind_36.txt 11 | #SBATCH --error=job_err_power_wind_36.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | #module load python-data 15 | module load mxnet 16 | 17 | 18 | cd ../electricity/NewTCNQuantile 19 | 20 | #echo "Installing requirements ..." 21 | #pip install -r 'requirements.txt' 22 | #--user -q --no-cache-dir 23 | #echo "Requirements installed." 24 | 25 | echo "Loading data ... " 26 | python3 ec_feature_preprocess.py --data-folder='data' \ 27 | --file-name='europe_power_system_wind.csv' \ 28 | --pickle-name='power_system_wind_36.pkl' \ 29 | --horizon=36 30 | echo "Data loaded." 31 | 32 | echo "Start running ... " 33 | python3 ec_probabilistic_forecasting.py --data-folder='data' \ 34 | --model-name='power_wind_36.pkl' \ 35 | --pickle-name='power_system_wind_36.pkl' \ 36 | --dim=57 \ 37 | --horizon=36 \ 38 | --patience=25 \ 39 | --gpu 40 | 41 | 42 | echo "Finished running training!" 43 | 44 | echo "Loading data ... " 45 | python3 ec_feature_preprocess.py --data-folder='data' \ 46 | --file-name='europe_power_system_wind.csv' \ 47 | --pickle-name='power_system_wind_36.pkl' \ 48 | --horizon=36 \ 49 | --test 50 | echo "Data loaded." 51 | 52 | echo "Start running ... " 53 | python3 ec_probabilistic_forecasting_evaluate.py --data-folder='data' \ 54 | --model-name='power_wind_36.pkl' \ 55 | --pickle-name='power_system_wind_36.pkl' \ 56 | --dim=57 \ 57 | --horizon=36 \ 58 | --gpu 59 | 60 | echo "Finished running!" 61 | 62 | seff $SLURM_JOBID 63 | -------------------------------------------------------------------------------- /DeepTCN/batch_jobs/batch_job_tcn_power_6.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=tcn_p6 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=4G 8 | #SBATCH --time=04:00:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_power_6_3.txt 11 | #SBATCH --error=job_err_power_6_3.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | #module load python-data 15 | module load mxnet 16 | 17 | 18 | cd ../electricity/NewTCNQuantile 19 | 20 | #echo "Installing requirements ..." 21 | #pip install -r 'requirements.txt' 22 | #--user -q --no-cache-dir 23 | #echo "Requirements installed." 24 | 25 | echo "Loading data ... " 26 | python3 ec_feature_preprocess.py --data-folder='data' \ 27 | --file-name='europe_power_system.csv' \ 28 | --pickle-name='power_system_6_3.pkl' \ 29 | --horizon=6 30 | echo "Data loaded." 31 | 32 | echo "Start running ... " 33 | python3 ec_probabilistic_forecasting.py --data-folder='data' \ 34 | --model-name='power_6_3.pkl' \ 35 | --pickle-name='power_system_6_3.pkl' \ 36 | --dim=183 \ 37 | --horizon=6 \ 38 | --patience=25 \ 39 | --gpu 40 | 41 | 42 | echo "Finished running training!" 43 | 44 | echo "Loading data ... " 45 | python3 ec_feature_preprocess.py --data-folder='data' \ 46 | --file-name='europe_power_system.csv' \ 47 | --pickle-name='power_system_6_3.pkl' \ 48 | --horizon=6 \ 49 | --test 50 | echo "Data loaded." 51 | 52 | echo "Start running ... " 53 | python3 ec_probabilistic_forecasting_evaluate.py --data-folder='data' \ 54 | --model-name='power_6_3.pkl' \ 55 | --pickle-name='power_system_6_3.pkl' \ 56 | --dim=183 \ 57 | --horizon=6 \ 58 | --gpu 59 | 60 | echo "Finished running!" 61 | 62 | seff $SLURM_JOBID 63 | -------------------------------------------------------------------------------- /DeepTCN/batch_jobs/batch_job_tcn_power_hop_36_evaluate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=tcn_p36_evaluate 3 | #SBATCH --account=Project_2002244 4 | #SBATCH --partition=gpu 5 | #SBATCH --ntasks=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --mem=4G 8 | #SBATCH --time=06:00:00 9 | #SBATCH --gres=gpu:v100:1 10 | #SBATCH --output=job_out_power_hop_36_train_evaluate.txt 11 | #SBATCH --error=job_err_power_hop_36_train_evaluate.txt 12 | 13 | module load gcc/8.3.0 cuda/10.1.168 14 | #module load python-data 15 | module load mxnet 16 | 17 | 18 | cd ../electricity/NewTCNQuantile 19 | 20 | #echo "Installing requirements ..." 21 | #pip install -r 'requirements.txt' 22 | #--user -q --no-cache-dir 23 | #echo "Requirements installed." 24 | 25 | 26 | echo "Loading data for training ... " 27 | 28 | python3 ec_feature_preprocess.py --data-folder='data' \ 29 | --file-name='europe_power_system.csv' \ 30 | --pickle-name='power_system_hop_36.pkl' \ 31 | --horizon=36 32 | echo "Training data loaded." 33 | 34 | echo "Start running training... " 35 | 36 | python3 ec_probabilistic_forecasting.py --data-folder='data' \ 37 | --model-name='power_hop_36.pkl' \ 38 | --pickle-name='power_system_hop_36.pkl' \ 39 | --dim=183 \ 40 | --horizon=36 \ 41 | --gpu \ 42 | --batch_size=128\ 43 | --units=200 \ 44 | --learning_rate=0.005 \ 45 | --dropout=0.3 46 | echo "Finished training!" 47 | 48 | 49 | echo "Loading testing data ... " 50 | python3 ec_feature_preprocess.py --data-folder='data' \ 51 | --file-name='europe_power_system.csv' \ 52 | --pickle-name='power_system_hop_36.pkl' \ 53 | --horizon=36 \ 54 | --test 55 | 56 | echo "Testing data loaded." 57 | 58 | echo "Start running testing... " 59 | #Optimization parameters {'batch_size': 128, 'dropout': 0.30000000000000004, 'learning_rate': 0.005, 'units': 200} 60 | 61 | python3 ec_probabilistic_forecasting_evaluate.py --data-folder='data' \ 62 | --model-name='power_hop_36.pkl' \ 63 | --pickle-name='power_system_hop_36.pkl' \ 64 | --dim=183 \ 65 | --horizon=36 \ 66 | --gpu 67 | echo "Finished running!" 68 | 69 | seff $SLURM_JOBID 70 | -------------------------------------------------------------------------------- /DeepTCN/electricity/NewTCNQuantile/pytorchtools.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | # import torch 4 | import pickle 5 | 6 | class EarlyStopping: 7 | """Early stops the training if validation loss doesn't improve after a given patience.""" 8 | def __init__(self, patience=7, verbose=False, delta=0, model_name='checkpoint.pkl'): 9 | """ 10 | Args: 11 | patience (int): How long to wait after last time validation loss improved. 12 | Default: 7 13 | verbose (bool): If True, prints a message for each validation loss improvement. 14 | Default: False 15 | delta (float): Minimum change in the monitored quantity to qualify as an improvement. 16 | Default: 0 17 | """ 18 | self.patience = patience 19 | self.verbose = verbose 20 | self.counter = 0 21 | self.best_score = None 22 | self.early_stop = False 23 | self.val_loss_min = np.Inf 24 | self.delta = delta 25 | self.model_name=model_name 26 | 27 | def __call__(self, val_loss, model): 28 | 29 | score = -val_loss 30 | 31 | if self.best_score is None: 32 | self.best_score = score 33 | self.save_checkpoint(val_loss, model) 34 | elif score < self.best_score + self.delta: 35 | self.counter += 1 36 | print(f'EarlyStopping counter: {self.counter} out of {self.patience}') 37 | if self.counter >= self.patience: 38 | self.early_stop = True 39 | else: 40 | self.best_score = score 41 | self.save_checkpoint(val_loss, model) 42 | self.counter = 0 43 | 44 | def save_checkpoint(self, val_loss, model): 45 | '''Saves model when validation loss decrease.''' 46 | if self.verbose: 47 | print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...') 48 | pickle.dump(model, open(os.path.join('./save', self.model_name), 'wb')) 49 | #torch.save(model.state_dict(), 'checkpoint.pt') 50 | self.val_loss_min = val_loss 51 | -------------------------------------------------------------------------------- /DeepTCN/electricity/basicPreprocess.R: -------------------------------------------------------------------------------- 1 | library(lubridate) 2 | library(data.table) 3 | 4 | dirPath <- '/home/jdroot/Oneday/Order/Electricty' 5 | dt <- fread(file.path(dirPath,'LD2011_2014.txt'), header=T, sep=';') 6 | timeIndex <- seq(ymd_hms('2011-01-01 00:00:00'),ymd_hms('2014-12-31 23:00:00'), by = 'hour') 7 | 8 | ##############################################3 9 | ### by aggregating blocks of 4 columns, to obtain T = 26, 304 10 | ##############################################3 11 | dt[,V1:=NULL] 12 | aggList <- sapply(dt, function(x){ 13 | x <- as.numeric(sub(",", ".", x, fixed = TRUE)) 14 | x <- matrix(x, nrow=4) 15 | subResult <- colSums(x) 16 | subResult 17 | }) 18 | 19 | timeIndex <- timeIndex[8761:35064] 20 | aggList <- aggList[8761:35064,] 21 | modelData <- t(aggList) 22 | 23 | write.table(modelData, file='modelData.csv', sep=',', col.names=F) 24 | save(timeIndex, aggList, file='basicPreprocess.rda') 25 | -------------------------------------------------------------------------------- /DeepTCN/electricity/readme.md: -------------------------------------------------------------------------------- 1 | ### Probabilistic Forecasting with Temporal Convolutional Neural Network 2 | Source codes for the paper "probabilistic forecasting with temporal convolutional neural network" 3 | #### Electricity 4 | ##### Data preprocessing 5 | * Download the dataset from UCI Machine Learning Repository: https://archive.ics.uci.edu/ml/datasets/ElectricityLoadDiagrams20112014 6 | * Run "R CMD BATCH electricity/basicPreprocess.R" to generate "modelData.csv" for model training. 7 | ##### Point forecasting 8 | * python3 electricity/ecPointModel/ec_feature_preprocess.py 9 | * python3 electricity/ecPointModel/ECPointHuber.py 10 | ##### Probabilistic forecasting 11 | * python3 electricity/NewTCNQuantile/ec_feature_preprocess.py 12 | * python3 electricity/NewTCNQuantile/ec_probabilistic_forecasting.py 13 | ### Traffic 14 | ##### Data preprocessing 15 | * Download the dataset from UCI Machine Learning Repository: https://archive.ics.uci.edu/ml/datasets/PEMS-SF 16 | * Run "R CMD BATCH traffic/basicPreprocess.R" to generate "traffic.csv". 17 | * python3 traffic/traffic_feature_preprocess.py to generate the "tensor_prepare.pkl" for the model training 18 | ##### Point forecasting 19 | * python3 traffic/point/traffic_point_forecasting.py 20 | ##### Quantile regression 21 | * python3 traffic/quantile/traffic_quantile_forecasting.py 22 | 23 | ##### Probabilistic forecasting 24 | ### Parts 25 | ##### Data preprocessing 26 | ##### Probabilistic forecasting 27 | 28 | ### Reference Paper 29 | [Probabilistic forecasting with temporal convolutional neural network](https://arxiv.org/abs/1906.04397) 30 | 31 | KDD 2019 ,Workshop on Mining and Learning from Time Series, 2019 32 | 33 | ### Kind remind 34 | The total project will be refined in the next months. Also, you can achieve better results if you do better data preprocessing like scaling. 35 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 aleksei-mashlakov 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /LSTNet/batch_jobs/batch_job_lstnet_hpo_elect.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # created: Nov 24, 2019 5:07 PM 3 | # author: mashlakov 4 | 5 | #!/bin/bash 6 | #SBATCH --account=Project_2002244 7 | #SBATCH --job-name=lstnet_hop_elect_36 8 | #SBATCH --partition=gpu 9 | #SBATCH --ntasks=1 10 | #SBATCH --cpus-per-task=4 11 | #SBATCH --mem=20G 12 | #SBATCH --time=01:00:00 13 | #SBATCH --gres=gpu:v100:1 14 | #SBATCH --output=job_out_elect_hop_36_test.txt 15 | #SBATCH --error=job_err_elect_hop_36_test.txt 16 | 17 | # 32 GB 18 | 19 | # module purge 20 | # module load python-env/3.7.4-ml 21 | # module list 22 | 23 | module purge 24 | module load python-data/3.7.3-1 25 | module load tensorflow/2.0.0 26 | 27 | cd .. 28 | 29 | echo "Hola el patron!" 30 | gpuseff $SLURM_JOBID 31 | 32 | #pip3 install --upgrade pip3 --user 33 | #pip3 install hyperopt --user -q 34 | #pip3 install -r 'requirements.txt' -q --user 35 | #srun python3 preprocess_elect_custom.py 36 | 37 | python3 main.py --data="data/electricity.txt" \ 38 | --horizon=36 \ 39 | --save="save/electricity_hop_36_2/electricity_hop_36" \ 40 | --epochs=100 \ 41 | --optimize \ 42 | --evals=100 \ 43 | --test \ 44 | --predict="testingdata" \ 45 | --savehistory \ 46 | --plot \ 47 | --series-to-plot='5' \ 48 | --save-plot="save/results" \ 49 | --logfilename="log/lstnet_electricity_hop_36_2" \ 50 | --debuglevel=20 \ 51 | --trainpercent=0.1104014598540146 \ 52 | --validpercent=0.028284671532846715 \ 53 | --patience=5 54 | 55 | # {'GRUUnits': 128.0, 'batchsize': 128.0, 'dropout': 0.1, 'lr': 0.0005} 56 | 57 | python3 main.py --data="data/electricity.txt" \ 58 | --horizon=36 \ 59 | --save="save/electricity_hop_36_2/electricity_hop_36"\ 60 | --epochs=500 \ 61 | --GRUUnits=128 \ 62 | --lr=0.0005\ 63 | --batchsize=128\ 64 | --dropout=0.1\ 65 | --test \ 66 | --predict="testingdata" \ 67 | --savehistory \ 68 | --plot \ 69 | --series-to-plot='5' \ 70 | --save-plot="save/electricity_hop_36_2/electricity_hop_36"\ 71 | --logfilename="log/lstnet_electricity_hop_36_2"\ 72 | --debuglevel=20 \ 73 | --mc-iterations=100 \ 74 | 75 | # Train and test the rest of the horizons 76 | 77 | python3 main.py --data="data/electricity.txt" \ 78 | --horizon=36 \ 79 | --test \ 80 | --no-saveresults \ 81 | --logfilename="log/lstnet_electricity_hop_36_2_test"\ 82 | --debuglevel=20 \ 83 | --no-train \ 84 | --no-validation \ 85 | --load="save/electricity_hop_36_2/electricity_hop_36" \ 86 | --mc-iterations=100 \ 87 | 88 | #python train_old.py 89 | echo "Adios el patron!" 90 | 91 | # This script will print some usage statistics to the 92 | # end of the standard out file 93 | # Use that to improve your resource request estimate 94 | # on later jobs. 95 | seff $SLURM_JOBID 96 | -------------------------------------------------------------------------------- /LSTNet/batch_jobs/batch_job_lstnet_hpo_power.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # created: Nov 24, 2019 5:07 PM 3 | # author: mashlakov 4 | 5 | #!/bin/bash 6 | #SBATCH --account=Project_2002244 7 | #SBATCH --job-name=lstnet_hop_36 8 | #SBATCH --partition=gpu 9 | #SBATCH --ntasks=1 10 | #SBATCH --cpus-per-task=4 11 | #SBATCH --mem=12G 12 | #SBATCH --time=01:00:00 13 | #SBATCH --gres=gpu:v100:1 14 | #SBATCH --output=job_out_power_hop_36_test.txt 15 | #SBATCH --error=job_err_power_hop_36_test.txt 16 | 17 | # 32 GB 18 | 19 | # module purge 20 | # module load python-env/3.7.4-ml 21 | # module list 22 | 23 | module purge 24 | module load python-data/3.7.3-1 25 | module load tensorflow/2.0.0 26 | #pip3 install pyyaml #-q --user 27 | #pip3 install h5py 28 | 29 | #cd LSTNet 30 | gpuseff $SLURM_JOBID 31 | 32 | cd .. 33 | 34 | echo "Hola el patron!" 35 | #pip3 install --upgrade pip3 --user 36 | #pip3 install hyperopt --user -q 37 | #pip3 install -r 'requirements.txt' -q --user 38 | #srun python3 preprocess_elect_custom.py 39 | 40 | python3 main.py --data="data/europe_power_system.txt" \ 41 | --horizon=36 \ 42 | --save="save/power_system_hop_36_2/power_system_hop_36" \ 43 | --epochs=100 \ 44 | --optimize \ 45 | --evals=100 \ 46 | --test \ 47 | --predict="testingdata" \ 48 | --savehistory \ 49 | --plot \ 50 | --series-to-plot='5' \ 51 | --save-plot="save/results" \ 52 | --logfilename="log/lstnet_power_system_hop_36_2" \ 53 | --debuglevel=20 \ 54 | --trainpercent=0.11267605633802817 \ 55 | --validpercent=0.02910798122065728 \ 56 | --patience=5 57 | 58 | 59 | #{'GRUUnits': 256.0, 'batchsize': 64.0, 'dropout': 0.2, 'lr': 0.0005} 60 | 61 | 62 | python3 main.py --data="data/europe_power_system.txt" \ 63 | --horizon=36 \ 64 | --save="save/power_system_hop_36_2/power_system_hop_36"\ 65 | --epochs=500 \ 66 | --GRUUnits=256 \ 67 | --lr=0.0005\ 68 | --batchsize=64\ 69 | --dropout=0.2\ 70 | --test \ 71 | --predict="testingdata" \ 72 | --savehistory \ 73 | --plot \ 74 | --series-to-plot='5' \ 75 | --save-plot="save/power_system_hop_36_2/results_power_system_hop_36"\ 76 | --logfilename="log/lstnet_power_system_hop_36"\ 77 | --debuglevel=20 \ 78 | --mc-iterations=100 \ 79 | --trainpercent=0.7004694835680751 \ 80 | --validpercent=0.14929577464788732 \ 81 | 82 | 83 | python3 main.py --data="data/europe_power_system.txt" \ 84 | --horizon=36 \ 85 | --test \ 86 | --no-saveresults \ 87 | --logfilename="log/lstnet_power_system_hop_36_2_eval"\ 88 | --debuglevel=20 \ 89 | --no-train \ 90 | --no-validation \ 91 | --load="save/power_system_hop_36_2/power_system_hop_36" \ 92 | --trainpercent=0.7004694835680751 \ 93 | --validpercent=0.14929577464788732 \ 94 | --mc-iterations=100 \ 95 | 96 | #python train_old.py 97 | echo "Adios el patron!" 98 | 99 | # This script will print some usage statistics to the 100 | # end of the standard out file 101 | # Use that to improve your resource request estimate 102 | # on later jobs. 103 | seff $SLURM_JOBID 104 | -------------------------------------------------------------------------------- /LSTNet/batch_jobs/batch_job_power_system_exogeneous.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # created: Nov 24, 2019 5:07 PM 3 | # author: mashlakov 4 | 5 | #!/bin/bash 6 | #SBATCH --account=Project_2002244 7 | #SBATCH --job-name=lstnet_power_exogeneous 8 | #SBATCH --partition=gpu 9 | #SBATCH --ntasks=1 10 | #SBATCH --cpus-per-task=4 11 | #SBATCH --mem=32G 12 | #SBATCH --time=06:00:00 13 | #SBATCH --gres=gpu:v100:1 14 | #SBATCH --output=job_out_power_exogeneous.txt 15 | #SBATCH --error=job_err_power_exogeneous.txt 16 | 17 | 18 | module purge 19 | module load python-data/3.7.3-1 20 | module load tensorflow/2.0.0 21 | 22 | cd .. 23 | 24 | echo "Hola el patron!" 25 | #pip3 install --upgrade pip3 --user 26 | #pip3 install hyperopt --user -q 27 | #pip3 install -r 'requirements.txt' -q --user 28 | #srun python3 preprocess_elect_custom.py 29 | 30 | 31 | python3 main.py --data="data/europe_power_system_exogeneous.txt" \ 32 | --horizon=36 \ 33 | --save="save/power_system_exogeneous_36/power_system_36"\ 34 | --epochs=500 \ 35 | --GRUUnits=100 \ 36 | --lr=0.001\ 37 | --batchsize=128\ 38 | --dropout=0.2\ 39 | --test \ 40 | --predict="testingdata" \ 41 | --savehistory \ 42 | --plot \ 43 | --series-to-plot='5' \ 44 | --save-plot="save/power_system_exogeneous_36/results_power_system_36"\ 45 | --logfilename="log/lstnet_power_exogeneous_36"\ 46 | --debuglevel=20 \ 47 | --mc-iterations=100 \ 48 | --trainpercent=0.7004694835680751 \ 49 | --validpercent=0.14929577464788732 \ 50 | 51 | 52 | python3 main.py --data="data/europe_power_system_exogeneous.txt" \ 53 | --horizon=36 \ 54 | --test \ 55 | --no-saveresults \ 56 | --logfilename="log/lstnet_power_exogeneous_36_eval"\ 57 | --debuglevel=20 \ 58 | --no-train \ 59 | --no-validation \ 60 | --load="save/power_system_exogeneous_36/power_system_36" \ 61 | --trainpercent=0.7004694835680751 \ 62 | --validpercent=0.14929577464788732 \ 63 | --mc-iterations=100 \ 64 | 65 | echo "Adios el patron!" 66 | 67 | # This script will print some usage statistics to the 68 | # end of the standard out file 69 | # Use that to improve your resource request estimate 70 | # on later jobs. 71 | seff $SLURM_JOBID 72 | -------------------------------------------------------------------------------- /LSTNet/electricity.sh: -------------------------------------------------------------------------------- 1 | python3.6 main.py --data="data/electricity.txt" --horizon=24 --save="save/electricity" --test --savehistory --logfilename="log/lstnet" --debuglevel=20 2 | -------------------------------------------------------------------------------- /LSTNet/lstnet_datautil.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | # Logging 4 | from __main__ import logger_name 5 | import logging 6 | log = logging.getLogger(logger_name) 7 | 8 | class DataUtil(object): 9 | # 10 | # This class contains data specific information. 11 | # It does the following: 12 | # - Read data from file 13 | # - Normalise it 14 | # - Split it into train, dev (validation) and test 15 | # - Create X and Y for each of the 3 sets (train, dev, test) according to the following: 16 | # Every sample (x, y) shall be created as follows: 17 | # - x --> window number of values 18 | # - y --> one value that is at horizon in the future i.e. that is horizon away past the last value of x 19 | # This way X and Y will have the following dimensions: 20 | # - X [number of samples, window, number of multivariate time series] 21 | # - Y [number of samples, number of multivariate time series] 22 | 23 | def __init__(self, filename, train, valid, horizon, window, normalise = 2): 24 | try: 25 | fin = open(filename) 26 | 27 | log.debug("Start reading data") 28 | self.rawdata = np.loadtxt(fin, delimiter=',') 29 | log.debug("End reading data") 30 | 31 | self.w = window 32 | self.h = horizon 33 | self.data = np.zeros(self.rawdata.shape) 34 | self.n, self.m = self.data.shape 35 | self.normalise = normalise 36 | self.scale = np.ones(self.m) 37 | 38 | self.normalise_data(normalise) 39 | self.split_data(train, valid) 40 | except IOError as err: 41 | # In case file is not found, all of the above attributes will not have been created 42 | # Hence, in order to check if this call was successful, you can call hasattr on this object 43 | # to check if it has attribute 'data' for example 44 | log.error("Error opening data file ... %s", err) 45 | 46 | 47 | def normalise_data(self, normalise): 48 | log.debug("Normalise: %d", normalise) 49 | 50 | if normalise == 0: # do not normalise 51 | self.data = self.rawdata 52 | 53 | if normalise == 1: # same normalisation for all timeseries 54 | self.data = self.rawdata / np.max(self.rawdata) 55 | 56 | if normalise == 2: # normalise each timeseries alone. This is the default mode 57 | for i in range(self.m): 58 | self.scale[i] = np.max(np.abs(self.rawdata[:, i])) 59 | self.data[:, i] = self.rawdata[:, i] / self.scale[i] 60 | 61 | def split_data(self, train, valid): 62 | log.info("Splitting data into training set (%.2f), validation set (%.2f) and testing set (%.2f)", train, valid, 1 - (train + valid)) 63 | 64 | train_set = range(self.w + self.h - 1, int(train * self.n)) 65 | valid_set = range(int(train * self.n), int((train + valid) * self.n)) 66 | test_set = range(int((train + valid) * self.n), self.n) 67 | 68 | self.train = self.get_data(train_set) 69 | self.valid = self.get_data(valid_set) 70 | self.test = self.get_data(test_set) 71 | 72 | def get_data(self, rng): 73 | n = len(rng) 74 | 75 | X = np.zeros((n, self.w, self.m)) 76 | Y = np.zeros((n, self.m)) 77 | 78 | for i in range(n): 79 | end = rng[i] - self.h + 1 80 | start = end - self.w 81 | 82 | X[i,:,:] = self.data[start:end, :] 83 | Y[i,:] = self.data[rng[i],:] 84 | 85 | log.info("Shape of data X: %s, Y: %s ", str(X.shape), str(Y.shape)) 86 | return [X, Y] 87 | -------------------------------------------------------------------------------- /datasets/README.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /datasets/csv/electricity.csv.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleksei-mashlakov/multivariate-deep-learning/e495ce3626fe2843364f4644d5d3e9ab69791b82/datasets/csv/electricity.csv.gz -------------------------------------------------------------------------------- /datasets/csv/europe_power_system.csv.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleksei-mashlakov/multivariate-deep-learning/e495ce3626fe2843364f4644d5d3e9ab69791b82/datasets/csv/europe_power_system.csv.gz -------------------------------------------------------------------------------- /datasets/csv/europe_power_system_exogeneous.csv.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleksei-mashlakov/multivariate-deep-learning/e495ce3626fe2843364f4644d5d3e9ab69791b82/datasets/csv/europe_power_system_exogeneous.csv.gz -------------------------------------------------------------------------------- /datasets/csv/europe_power_system_load.csv.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleksei-mashlakov/multivariate-deep-learning/e495ce3626fe2843364f4644d5d3e9ab69791b82/datasets/csv/europe_power_system_load.csv.gz -------------------------------------------------------------------------------- /datasets/csv/europe_power_system_price.csv.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleksei-mashlakov/multivariate-deep-learning/e495ce3626fe2843364f4644d5d3e9ab69791b82/datasets/csv/europe_power_system_price.csv.gz -------------------------------------------------------------------------------- /datasets/csv/europe_power_system_solar.csv.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleksei-mashlakov/multivariate-deep-learning/e495ce3626fe2843364f4644d5d3e9ab69791b82/datasets/csv/europe_power_system_solar.csv.gz -------------------------------------------------------------------------------- /datasets/csv/europe_power_system_wind.csv.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleksei-mashlakov/multivariate-deep-learning/e495ce3626fe2843364f4644d5d3e9ab69791b82/datasets/csv/europe_power_system_wind.csv.gz -------------------------------------------------------------------------------- /datasets/txt/electricity.txt.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleksei-mashlakov/multivariate-deep-learning/e495ce3626fe2843364f4644d5d3e9ab69791b82/datasets/txt/electricity.txt.gz -------------------------------------------------------------------------------- /datasets/txt/europe_power_system.txt.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleksei-mashlakov/multivariate-deep-learning/e495ce3626fe2843364f4644d5d3e9ab69791b82/datasets/txt/europe_power_system.txt.gz -------------------------------------------------------------------------------- /datasets/txt/europe_power_system_exogeneous.txt.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleksei-mashlakov/multivariate-deep-learning/e495ce3626fe2843364f4644d5d3e9ab69791b82/datasets/txt/europe_power_system_exogeneous.txt.gz -------------------------------------------------------------------------------- /datasets/txt/europe_power_system_load.txt.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleksei-mashlakov/multivariate-deep-learning/e495ce3626fe2843364f4644d5d3e9ab69791b82/datasets/txt/europe_power_system_load.txt.gz -------------------------------------------------------------------------------- /datasets/txt/europe_power_system_price.txt.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleksei-mashlakov/multivariate-deep-learning/e495ce3626fe2843364f4644d5d3e9ab69791b82/datasets/txt/europe_power_system_price.txt.gz -------------------------------------------------------------------------------- /datasets/txt/europe_power_system_solar.txt.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleksei-mashlakov/multivariate-deep-learning/e495ce3626fe2843364f4644d5d3e9ab69791b82/datasets/txt/europe_power_system_solar.txt.gz -------------------------------------------------------------------------------- /datasets/txt/europe_power_system_wind.txt.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleksei-mashlakov/multivariate-deep-learning/e495ce3626fe2843364f4644d5d3e9ab69791b82/datasets/txt/europe_power_system_wind.txt.gz -------------------------------------------------------------------------------- /results/coverage_test.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleksei-mashlakov/multivariate-deep-learning/e495ce3626fe2843364f4644d5d3e9ab69791b82/results/coverage_test.jpg -------------------------------------------------------------------------------- /results/stat_test.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aleksei-mashlakov/multivariate-deep-learning/e495ce3626fe2843364f4644d5d3e9ab69791b82/results/stat_test.jpg -------------------------------------------------------------------------------- /util/Msglog.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import logging.handlers 3 | 4 | # 5 | # This function is called in the main python file in order 6 | # to initialise logging. 7 | # After that, in each other file, we just need to do the following: 8 | # import logging 9 | # log = logging.getLogger(name) where name is the same one used in LogInit 10 | # 11 | # The arguments that this function take are: 12 | # - name: name of the logger. Passed from the main python file. The same must be used in all other files 13 | # - filename: full path of the file where logs are to be written 14 | # - debuglevel: Level for the FileHandler. Messages with level bigger or equal to this one will be logged 15 | # 16 | def LogInit(name, filename, debuglevel = logging.INFO, log = True): 17 | # If the passed debuglevel is not the allowed one, print an error and default to logging.INFO 18 | try: 19 | assert(debuglevel in [logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL]) 20 | except AssertionError as err: 21 | logging.error("Invalid debuglevel (%d), changing it to %d", debuglevel, logging.INFO) 22 | debuglevel = logging.INFO 23 | 24 | # Create a logger with name 'name' and set its level to debuglevel` 25 | logger = logging.getLogger(name) 26 | logger.setLevel(debuglevel) 27 | 28 | # Set the log format 29 | formatter = logging.Formatter('%(asctime)s.%(msecs)d (%(process)d) (%(levelname)s) %(module)s.%(funcName)s -> %(message)s', datefmt='%Y-%m-%d %H:%M:%S') 30 | 31 | # Configure a StreamHandler that will log messages with level ERROR and CRITICAL onto the console. 32 | # Those messages will be logged even if log == False 33 | ch = logging.StreamHandler() 34 | ch.setLevel(logging.ERROR) 35 | ch.setFormatter(formatter) 36 | logger.addHandler(ch) 37 | 38 | if log == True: 39 | # Configure a TimedRotatingFileHandler that creates a new file at midnight in order to log 40 | # all messages with level greater than debuglevel if and only if log == True 41 | fh = logging.handlers.TimedRotatingFileHandler(filename, when='midnight') 42 | fh.setLevel(debuglevel) 43 | fh.setFormatter(formatter) 44 | logger.addHandler(fh) 45 | else: 46 | # NullHandler in case log == False 47 | nh = logging.NullHandler() 48 | logger.addHandler(nh) 49 | 50 | return logger 51 | --------------------------------------------------------------------------------