├── .gitignore
├── Code
├── BenchmarkModel
│ ├── EventClassification
│ │ ├── README.md
│ │ ├── configs
│ │ │ ├── FNN.yaml
│ │ │ ├── Inceptiontime.yaml
│ │ │ ├── MCDCNN.yaml
│ │ │ ├── MLSTM_FCN.yaml
│ │ │ ├── MiniRocket.yaml
│ │ │ ├── NNDTW.yaml
│ │ │ ├── NNEuclidean.yaml
│ │ │ ├── RNN.yaml
│ │ │ ├── ResNet.yaml
│ │ │ ├── TapNet.yaml
│ │ │ ├── cnn.yaml
│ │ │ └── transformer.yaml
│ │ ├── evaluating.py
│ │ ├── models
│ │ │ ├── FNN.py
│ │ │ ├── Inceptiontime.py
│ │ │ ├── MCDCNN.py
│ │ │ ├── MLSTM_FCN.py
│ │ │ ├── MiniRocket.py
│ │ │ ├── NNDTW.py
│ │ │ ├── NNEuclidean.py
│ │ │ ├── ResNet.py
│ │ │ ├── TapNet.py
│ │ │ ├── cnn.py
│ │ │ ├── rnn.py
│ │ │ ├── transformer.py
│ │ │ └── utils.py
│ │ ├── processing.py
│ │ └── requirements.txt
│ ├── LoadForecasting
│ │ ├── README.md
│ │ ├── configs
│ │ │ ├── DeepAR.yaml
│ │ │ ├── ELM.yaml
│ │ │ ├── FNN.yaml
│ │ │ ├── LSTNet.yaml
│ │ │ ├── NBeats.yaml
│ │ │ ├── NeuralODE.yaml
│ │ │ ├── RNN.yaml
│ │ │ ├── WaveNet.yaml
│ │ │ ├── arima.yaml
│ │ │ ├── cnn.yaml
│ │ │ ├── exponential_smoothing.yaml
│ │ │ ├── gradient_boosting.yaml
│ │ │ ├── informer.yaml
│ │ │ ├── linear_regression.yaml
│ │ │ ├── naive.yaml
│ │ │ ├── random_forest.yaml
│ │ │ ├── svr.yaml
│ │ │ ├── tcn.yaml
│ │ │ └── transformer.yaml
│ │ ├── evaluating.py
│ │ ├── models
│ │ │ ├── DeepAR.py
│ │ │ ├── EML.py
│ │ │ ├── FNN.py
│ │ │ ├── LSTNet.py
│ │ │ ├── NBeats.py
│ │ │ ├── NeuralODE.py
│ │ │ ├── WaveNet.py
│ │ │ ├── arima.py
│ │ │ ├── cnn.py
│ │ │ ├── exponential_smoothing.py
│ │ │ ├── gradient_boosting.py
│ │ │ ├── informer.py
│ │ │ ├── linear_regression.py
│ │ │ ├── naive.py
│ │ │ ├── random_forest.py
│ │ │ ├── rnn.py
│ │ │ ├── svr.py
│ │ │ ├── tcn.py
│ │ │ ├── transformer.py
│ │ │ └── utils.py
│ │ ├── processing.py
│ │ └── requirements.txt
│ ├── README.md
│ └── SyntheticDataGeneration
│ │ ├── DoppelGANger
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── example_generating_data
│ │ │ ├── config_generate_data.py
│ │ │ ├── gan_generate_data_task.py
│ │ │ └── main_generate_data.py
│ │ ├── example_main(without_GPUTaskScheduler)
│ │ │ ├── generate.py
│ │ │ └── main.py
│ │ ├── example_training
│ │ │ ├── config.py
│ │ │ ├── gan_task.py
│ │ │ └── main.py
│ │ └── gan
│ │ │ ├── __init__.py
│ │ │ ├── doppelganger.py
│ │ │ ├── load_data.py
│ │ │ ├── network.py
│ │ │ ├── op.py
│ │ │ ├── op.py.LICENSE
│ │ │ ├── output.py
│ │ │ └── util.py
│ │ ├── NaiveGAN
│ │ └── main.py
│ │ ├── README.md
│ │ ├── RGAN
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── RVAE
│ │ │ ├── launch_sine_generation_RVAE.py
│ │ │ ├── launch_sine_generation_RVAE_concatenated_input.py
│ │ │ ├── sine_generation_RVAE.py
│ │ │ ├── sine_generation_RVAE_concatenated_input.py
│ │ │ ├── sine_generation_RVAE_concatenated_input_find_best_model.py
│ │ │ └── sine_generation_RVAE_find_best_model.py
│ │ ├── data_utils.py
│ │ ├── differential_privacy
│ │ │ ├── dp_sgd
│ │ │ │ └── dp_optimizer
│ │ │ │ │ ├── dp_optimizer.py
│ │ │ │ │ ├── sanitizer.py
│ │ │ │ │ └── utils.py
│ │ │ └── privacy_accountant
│ │ │ │ └── tf
│ │ │ │ └── accountant.py
│ │ ├── eugenium_mmd.py
│ │ ├── eval.py
│ │ ├── experiment.py
│ │ ├── experiments
│ │ │ └── settings
│ │ │ │ ├── power.txt
│ │ │ │ └── test.txt
│ │ ├── kernel.py
│ │ ├── mmd.py
│ │ ├── mod_core_rnn_cell_impl.py
│ │ ├── model.py
│ │ ├── paths.py
│ │ ├── plotting.py
│ │ ├── requirements.txt
│ │ ├── tf_ops.py
│ │ ├── tstr.py
│ │ └── utils.py
│ │ ├── cot-gan
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── data_utils.py
│ │ ├── figs
│ │ │ ├── animation.gif
│ │ │ └── humanaction.gif
│ │ ├── gan.py
│ │ ├── gan_utils.py
│ │ ├── main.py
│ │ └── requirements.txt
│ │ ├── evaluating.py
│ │ ├── processing.py
│ │ └── timeGAN
│ │ ├── README.md
│ │ ├── data_loading.py
│ │ ├── main.py
│ │ ├── main_timegan.py
│ │ ├── metrics
│ │ ├── discriminative_metrics.py
│ │ ├── predictive_metrics.py
│ │ └── visualization_metrics.py
│ │ ├── requirements.txt
│ │ ├── timegan.py
│ │ └── utils.py
├── Data Processing
│ ├── README.md
│ ├── renewable_v2_step1_weather_v2_extended.py
│ ├── renewable_v2_step2_wind_v2_extended.py
│ ├── renewable_v2_step3_solar_v2_extended.py
│ ├── renewable_v2_step4_load_v2_extended.py
│ ├── renewable_v2_step5_aggregate_v2_extended.py
│ └── requirements.txt
├── Joint Simulation
│ ├── README.md
│ ├── case_D
│ │ └── 13Bus
│ │ │ ├── IEEE13Node_BusXY.csv
│ │ │ ├── IEEE13Nodeckt.dss
│ │ │ ├── IEEE13Nodeckt_scaled.dss
│ │ │ └── IEEELineCodes.DSS
│ ├── case_T
│ │ ├── PSSE
│ │ │ ├── IEEE_39_bus.dyr
│ │ │ ├── IEEE_39_bus.out
│ │ │ └── IEEE_39_bus.raw
│ │ ├── PSSE23
│ │ │ ├── output_tuned1.outx
│ │ │ ├── savnw_tuned.dyr
│ │ │ ├── savnw_tuned.out
│ │ │ ├── savnw_tuned.raw
│ │ │ ├── savnw_tuned.sav
│ │ │ ├── savnw_tuned.sld
│ │ │ ├── savnw_tuned.snp
│ │ │ ├── savnw_tuned_converted.dyr
│ │ │ ├── savnw_tuned_converted.raw
│ │ │ ├── savnw_tuned_converted.sav
│ │ │ ├── savnw_tuned_old.dyr
│ │ │ ├── savnw_tuned_th.dyr
│ │ │ ├── savnw_tuned_th.out
│ │ │ └── savnw_tuned_th.raw
│ │ ├── PSSE23_wind
│ │ │ ├── savnw_wind.dyr
│ │ │ ├── savnw_wind.out
│ │ │ ├── savnw_wind.raw
│ │ │ ├── savnw_wind_tuned.dyr
│ │ │ ├── savnw_wind_tuned.out
│ │ │ └── savnw_wind_tuned.raw
│ │ └── Re_wind_v3
│ │ │ ├── Test1_sav_wind.py
│ │ │ ├── Test2_ScaleDownSav.py
│ │ │ ├── Test3_Initialization.py
│ │ │ ├── output1.out
│ │ │ ├── output1.outx
│ │ │ ├── savnw.sld
│ │ │ ├── savnw_REwind.dyr
│ │ │ ├── savnw_REwind.snp
│ │ │ ├── savnw_REwind_flat.log
│ │ │ ├── savnw_wind.sav
│ │ │ ├── savnw_wind_scale_down.dyr
│ │ │ ├── savnw_wind_scale_down.out
│ │ │ ├── savnw_wind_scale_down.raw
│ │ │ ├── savnw_wind_scale_down.sav
│ │ │ └── savnw_wind_scale_down_cnv.sav
│ ├── code
│ │ ├── __pycache__
│ │ │ ├── cosim.cpython-34.pyc
│ │ │ ├── cosim.cpython-39.pyc
│ │ │ ├── solar_inverter.cpython-34.pyc
│ │ │ ├── utils.cpython-34.pyc
│ │ │ └── utils.cpython-39.pyc
│ │ ├── cosim.py
│ │ ├── create_metadata.py
│ │ ├── idle32.bat -.lnk
│ │ ├── inverter
│ │ │ ├── CurCtr_alg.m
│ │ │ ├── CurCtr_diff.m
│ │ │ ├── Dyn_Angle_Droop.m
│ │ │ ├── Dyn_Angle_Droop_Simple.m
│ │ │ ├── Dyn_Freq_Droop.m
│ │ │ ├── Dyn_Freq_Droop_Simple.m
│ │ │ ├── Dynamics_CurCtr.m
│ │ │ ├── Dynamics_LC_Filter.m
│ │ │ ├── Dynamics_PowSensor.m
│ │ │ ├── LC_Filter.m
│ │ │ ├── LC_Filter_RK4.m
│ │ │ ├── LinePlusInfBus.m
│ │ │ ├── Network_Simple.m
│ │ │ ├── PowCtr_Ang_Simple.m
│ │ │ ├── PowCtr_Angle_RK4.m
│ │ │ ├── PowCtr_Freq_RK4.m
│ │ │ ├── PowCtr_Freq_Simple.m
│ │ │ ├── Test1_InitCdt_v2.m
│ │ │ ├── Test1_IntCdt.mat
│ │ │ ├── Test1_Para.mat
│ │ │ ├── Test5_CreateFault.m
│ │ │ ├── VolCtr_alg.m
│ │ │ └── VolCtr_diff.m
│ │ ├── main.py
│ │ ├── one.py
│ │ ├── pvmodel
│ │ │ ├── solar_inverter.py
│ │ │ └── test_inf_bus.py
│ │ ├── run_scenarios.bat
│ │ ├── run_sspf.bat
│ │ ├── solar_inverter.py
│ │ ├── ss_data.py
│ │ └── utils.py
│ └── requirements.txt
├── README.md
├── dataloader.py
└── evaluator.py
├── README.md
└── requirements.txt
/.gitignore:
--------------------------------------------------------------------------------
1 | .ipynb_checkpoints/
2 | __pycache__/
3 |
--------------------------------------------------------------------------------
/Code/BenchmarkModel/EventClassification/README.md:
--------------------------------------------------------------------------------
1 | # Event Detection, Classification and Localization
2 | Here we describe how to reproduce the benchmark results for classification tasks given streaming measurements from sensors.
3 | ## Relevant Packages Install
4 | - Create and activate anaconda virtual environment
5 | ```angular2html
6 | conda create -n EventDetection python=3.7.10
7 | conda activate EventDetection
8 | ```
9 | - Install required packages
10 | ```angular2html
11 | pip install -r requirements.txt
12 | ```
13 |
14 | ## Benchmark Results Reproduction
15 | You can find all codes of different models in folder `models` and their respective configuration files in folder
16 | `configs`. Change the configurations in `***.yaml` from `configs` and run `***.py` from `models` for model training
17 | and evaluation. Or you can directly run `***.py` from `models` with configuration hyperparameters in the command line.
18 |
19 | **NOTE:** The `data_path` in configuration files is the path to **processed** `millisecond-level PMU Measurements` dataset. For experiment log,
20 | you can check the folder named `logs`, or you can name your own log folder by setting `logging_params:save_dir` in
21 | configuration files.
22 |
23 | #### Implementation Details
24 | Configurations in `config` folder are consistent of what we've tried in dataset benchmark submission, except the
25 | `max_epochs` and `manual_seed`. By default, we train all trainable models with 10 random seeds for 50 epochs, and
26 | the average performance is reported in the end. GPU acceleration is supported for both Pytorch and Tensorflow.
27 |
28 | ## References
29 | 1. **InceptionTime**:
30 |
31 | Fawaz, Hassan Ismail, et al. "Inceptiontime: Finding alexnet for time series classification." Data Mining and Knowledge Discovery 34.6 (2020): 1936-1962.
32 |
33 | https://github.com/sktime/sktime-dl.git
34 | 1. **MC-DCNN**:
35 |
36 | Zheng, Yi, et al. "Time series classification using multi-channels deep convolutional neural networks." International conference on web-age information management. Springer, Cham, 2014.
37 |
38 | https://github.com/sktime/sktime-dl.git
39 | 1. **ResNet**:
40 |
41 | Wang, Zhiguang, Weizhong Yan, and Tim Oates. "Time series classification from scratch with deep neural networks: A strong baseline." 2017 International joint conference on neural networks (IJCNN). IEEE, 2017.
42 |
43 | https://github.com/sktime/sktime-dl.git
44 | 1. **MLSTM-FCN**:
45 |
46 | Karim, Fazle, et al. "Multivariate LSTM-FCNs for time series classification." Neural Networks 116 (2019): 237-245.
47 |
48 | https://github.com/houshd/MLSTM-FCN.git
49 | 1. **TapNet**:
50 |
51 | Zhang, Xuchao, et al. "Tapnet: Multivariate time series classification with attentional prototypical network." Proceedings of the AAAI Conference on Artificial Intelligence. Vol. 34. No. 04. 2020.
52 |
53 | https://github.com/xuczhang/tapnet.git
54 | 1. **MiniRocket**:
55 |
56 | Dempster, Angus, Daniel F. Schmidt, and Geoffrey I. Webb. "MiniRocket: A Very Fast (Almost) Deterministic Transform for Time Series Classification." Proceedings of the 27th ACM SIGKDD Conference on Knowledge Discovery & Data Mining. 2021.
57 |
58 | https://github.com/angus924/minirocket.git
59 |
60 |
--------------------------------------------------------------------------------
/Code/BenchmarkModel/EventClassification/configs/FNN.yaml:
--------------------------------------------------------------------------------
1 | model_params:
2 | model_name: 'FNN'
3 |
4 | hidden_size: [64] #[32, 64, 128]
5 | num_layers: [2] #[1, 2, 3]
6 | dropout: [0.1]
7 |
8 | exp_params:
9 | train_valid_ratio: 0.8
10 | label_constraints: [True] #[True, False]
11 |
12 | test_flag: False
13 | last_version: 50
14 |
15 | # DNNs
16 | batch_size: [50] #[50, 100, 150]
17 | learning_rate: [0.01] #[0.1, 0.01, 0.001]
18 | num_workers: 0
19 | normalization: ['standard'] #['none', 'minmax', 'standard']
20 |
21 | target_name: ['fault', 'location', 'starttime']
22 |
23 | data_path: '../../../../PSML/processed_datasets/classification.pkl'
24 |
25 | trainer_params:
26 | max_epochs: 2 # NOTE: by default is 50
27 | gpus: [1]
28 |
29 | logging_params:
30 | save_dir: './../logs/'
31 | name: 'FNN'
32 | manual_seed: 0
--------------------------------------------------------------------------------
/Code/BenchmarkModel/EventClassification/configs/Inceptiontime.yaml:
--------------------------------------------------------------------------------
1 | model_params:
2 | model_name: 'Inceptiontime'
3 |
4 | nb_filters: [32]
5 | bottleneck_size: [32]
6 | depth: [6]
7 | kernel_size: [40]
8 |
9 | exp_params:
10 | train_valid_ratio: 0.8
11 | label_constraints: [True] #[True, False]
12 |
13 | test_flag: False
14 | last_version: 27
15 |
16 | # DNNs
17 | batch_size: [50] #[50, 100, 150]
18 | learning_rate: [0.01] #[0.1, 0.01, 0.001]
19 | num_workers: 0
20 |
21 | normalization: ['standard'] #['none', 'minmax', 'standard']
22 |
23 | target_name: ['fault', 'location', 'starttime']
24 |
25 | data_path: '../../../../PSML/processed_datasets/classification.pkl'
26 |
27 | trainer_params:
28 | max_epochs: 2 # NOTE: by default is 50
29 | gpus: [-1]
30 |
31 | logging_params:
32 | save_dir: './../logs'
33 | name: 'Inceptiontime'
34 | manual_seed: 0
--------------------------------------------------------------------------------
/Code/BenchmarkModel/EventClassification/configs/MCDCNN.yaml:
--------------------------------------------------------------------------------
1 | model_params:
2 | model_name: 'MCDCNN'
3 | kernel_size: [5]
4 | pool_size: [2]
5 | filter_sizes: [[8, 8]]
6 | dense_units: [732]
7 |
8 |
9 | exp_params:
10 | train_valid_ratio: 0.8
11 | label_constraints: [True] #[True, False]
12 |
13 | test_flag: False
14 | last_version: 3
15 |
16 | # DNNs
17 | batch_size: [256] #[50, 100, 150]
18 | learning_rate: [0.01] #[0.1, 0.01, 0.001]
19 | momentum: [0.9]
20 | decay: [0.0005]
21 | num_workers: 0
22 |
23 | normalization: ['standard'] #['none', 'minmax', 'standard']
24 |
25 | target_name: ['fault', 'location', 'starttime']
26 | data_path: '../../../../PSML/processed_datasets/classification.pkl'
27 |
28 | trainer_params:
29 | max_epochs: 2 #NOTE: by default it's 50
30 | gpus: [-1]
31 |
32 | logging_params:
33 | save_dir: './../logs'
34 | name: 'MCDCNN'
35 | manual_seed: 0
--------------------------------------------------------------------------------
/Code/BenchmarkModel/EventClassification/configs/MLSTM_FCN.yaml:
--------------------------------------------------------------------------------
1 | model_params:
2 | model_name: 'MLSTMFCN'
3 |
4 | exp_params:
5 | train_valid_ratio: 0.8
6 | label_constraints: [True] #[True, False]
7 |
8 | test_flag: False
9 | last_version: 3
10 |
11 | # DNNs
12 | batch_size: [50] #[50, 100, 150]
13 | learning_rate: [0.01] #[0.1, 0.01, 0.001]
14 | num_workers: 0
15 | normalization: ['standard'] #['none', 'minmax', 'standard']
16 |
17 | target_name: ['fault', 'location', 'starttime']
18 |
19 | data_path: '../../../../PSML/processed_datasets/classification.pkl'
20 | trainer_params:
21 | max_epochs: 2 # NOTE: by default it's 50
22 | gpus: [-1]
23 |
24 | logging_params:
25 | save_dir: './../logs'
26 | name: 'MLSTMFCN'
27 | manual_seed: 0
--------------------------------------------------------------------------------
/Code/BenchmarkModel/EventClassification/configs/MiniRocket.yaml:
--------------------------------------------------------------------------------
1 | model_params:
2 | model_name: 'MiniRocket'
3 |
4 | exp_params:
5 | train_valid_ratio: 0.8
6 | label_constraints: [True, False]
7 |
8 | test_flag: False
9 | last_version: 21
10 |
11 | target_name: ['fault', 'location', 'starttime']
12 |
13 | data_path: '../../../../PSML/processed_datasets/classification.pkl'
14 |
15 | logging_params:
16 | save_dir: './../logs'
17 | name: 'MiniRocket'
18 | manual_seed: 0
--------------------------------------------------------------------------------
/Code/BenchmarkModel/EventClassification/configs/NNDTW.yaml:
--------------------------------------------------------------------------------
1 | model_params:
2 | model_name: '1NN DTW'
3 | type: 'dependent' # ['independent', 'dependent']
4 |
5 | exp_params:
6 | test_flag: False
7 | last_version: 2
8 | normalization: 'minmax' #['none', 'minmax', 'standard']
9 | data_path: '../../../../PSML/processed_datasets/classification.pkl'
10 |
11 |
12 | trainer_params:
13 |
14 | logging_params:
15 | save_dir: './../logs'
16 | name: 'NNDTW'
17 |
18 |
19 |
--------------------------------------------------------------------------------
/Code/BenchmarkModel/EventClassification/configs/NNEuclidean.yaml:
--------------------------------------------------------------------------------
1 | model_params:
2 | model_name: '1-NN Euclidean distance'
3 |
4 | exp_params:
5 | test_flag: False
6 | last_version: 1
7 | normalization: 'standard' #['none', 'minmax', 'standard']
8 | data_path: '../../../../PSML/processed_datasets/classification.pkl'
9 |
10 | trainer_params:
11 |
12 | logging_params:
13 | save_dir: './../logs'
14 | name: 'NNEuclidean'
15 |
--------------------------------------------------------------------------------
/Code/BenchmarkModel/EventClassification/configs/RNN.yaml:
--------------------------------------------------------------------------------
1 | model_params:
2 | model_name: 'LSTM' # ['RNN', 'GRU', 'LSTM']
3 | hidden_size: [32] #[32, 64, 128]
4 | num_layers: [2] #[1, 2, 3]
5 | direction: ['uni', 'bi'] #['bi', 'uni']
6 | dropout: [0.1] #[0., 0.1]
7 |
8 | exp_params:
9 | train_valid_ratio: 0.8
10 | label_constraints: [True] #[ True, False ]
11 |
12 | test_flag: False
13 | last_version: 23
14 |
15 | # DNNs
16 | batch_size: [50] #[50, 100, 150]
17 | learning_rate: [0.01] #[0.1, 0.01, 0.001]
18 | num_workers: 0
19 |
20 | normalization: ['standard'] #['none', 'minmax', 'standard'] #['none', 'minmax', 'standard']
21 |
22 | target_name: ['fault', 'location', 'starttime']
23 |
24 | data_path: '../../../../PSML/processed_datasets/classification.pkl'
25 |
26 | trainer_params:
27 | max_epochs: 2 # NOTE: by default it is 50
28 | gpus: [2]
29 |
30 | logging_params:
31 | save_dir: './../logs'
32 | name: 'RNN'
33 | manual_seed: 0
--------------------------------------------------------------------------------
/Code/BenchmarkModel/EventClassification/configs/ResNet.yaml:
--------------------------------------------------------------------------------
1 | model_params:
2 | model_name: 'resnet'
3 |
4 | exp_params:
5 | train_valid_ratio: 0.8
6 | label_constraints: [True] #[True, False]
7 |
8 | test_flag: False
9 | last_version: 2
10 |
11 | # DNNs
12 | batch_size: [50] #[50, 100, 150]
13 | learning_rate: [0.01] #[0.1, 0.01, 0.001]
14 | num_workers: 0
15 |
16 | normalization: ['standard'] #['none', 'minmax', 'standard']
17 |
18 | target_name: ['fault', 'location', 'starttime']
19 |
20 | data_path: '../../../../PSML/processed_datasets/classification.pkl'
21 |
22 | trainer_params:
23 | max_epochs: 2 #NOTE: default is 50
24 | gpus: [-1] #NOTE: melady4 fails for tf gpu
25 |
26 | logging_params:
27 | save_dir: './../logs'
28 | name: 'resnet'
29 | manual_seed: 0
--------------------------------------------------------------------------------
/Code/BenchmarkModel/EventClassification/configs/TapNet.yaml:
--------------------------------------------------------------------------------
1 | model_params:
2 | model_name: 'TapNet'
3 |
4 | dropout: [0.]
5 |
6 | use_cnn: [True]
7 | filters: [[256, 256, 128]]
8 | kernels: [[8, 5, 3]]
9 | dilation: [1]
10 | layers: [[500, 300]]
11 |
12 | use_lstm: [True]
13 | lstm_dim: [128]
14 |
15 | use_rp: [True]
16 | rp_params: [[-1, 3]]
17 | use_metric: [False]
18 | metric_param: [0.01]
19 |
20 | exp_params:
21 | train_valid_ratio: 0.8
22 | label_constraints: [True] #[ True, False ]
23 |
24 | test_flag: False
25 | last_version: 15
26 |
27 | # DNNs
28 | batch_size: [ 50 ] #[50, 100, 150]
29 | learning_rate: [0.00001] #1e-5
30 | num_workers: 0
31 | normalization: ['standard'] #['none', 'minmax', 'standard']
32 |
33 | target_name: ['fault', 'location', 'starttime']
34 |
35 | weight_decay: [0.001]
36 |
37 | data_path: '../../../../PSML/processed_datasets/classification.pkl'
38 |
39 | trainer_params:
40 | max_epochs: 2 # NOTE: by default it is 50
41 | gpus: [3]
42 |
43 | logging_params:
44 | save_dir: './../logs'
45 | name: 'TapNet'
46 | manual_seed: 0
--------------------------------------------------------------------------------
/Code/BenchmarkModel/EventClassification/configs/cnn.yaml:
--------------------------------------------------------------------------------
1 | model_params:
2 | model_name: 'cnn'
3 | dropout: [0.1] #[0., 0.1]
4 | hidden_layers: [[32, 64]] #[[32, 64, 128]]
5 | kernel_size: [[4, 4]] #[[8, 8, 8]]
6 | stride: [[2, 2]] # [[2, 2, 4]]
7 | pooling: ['max'] #['max', 'avg', 'AdaptiveAvg', 'AdaptiveMax', 'lp']
8 |
9 | exp_params:
10 | train_valid_ratio: 0.8
11 | label_constraints: [True] #[True, False]
12 |
13 | test_flag: False
14 | last_version: 23
15 |
16 | # DNNs
17 | batch_size: [50] #[50, 100, 150]
18 | learning_rate: [0.01] #[0.1, 0.01, 0.001]
19 | num_workers: 0
20 |
21 | normalization: ['standard'] #['none', 'minmax', 'standard']
22 |
23 | target_name: ['fault', 'location', 'starttime']
24 |
25 | data_path: '../../../../PSML/processed_datasets/classification.pkl'
26 |
27 | trainer_params:
28 | max_epochs: 2 # NOTE: default is 50
29 | gpus: [1]
30 |
31 | logging_params:
32 | save_dir: './../logs'
33 | name: 'cnn'
34 | manual_seed: 0
--------------------------------------------------------------------------------
/Code/BenchmarkModel/EventClassification/configs/transformer.yaml:
--------------------------------------------------------------------------------
1 | model_params:
2 | model_name: 'transformer'
3 | hidden_size: [32] #[32, 64, 128]
4 | num_layers: [2] #[1, 2, 3]
5 | dropout: [0.1] #[0., 0.1]
6 | num_heads: [4]
7 | classification_token: ['first', 'last', 'all']
8 |
9 | exp_params:
10 | train_valid_ratio: 0.8
11 | label_constraints: [True] #[ True, False ]
12 |
13 |
14 | test_flag: False
15 | last_version: 23
16 |
17 | # DNNs
18 | batch_size: [25] #[50, 100, 150]
19 | learning_rate: [0.01] #[0.1, 0.01, 0.001]
20 | num_workers: 0
21 | normalization: ['none', 'minmax', 'standard'] #['none', 'minmax', 'standard']
22 |
23 | target_name: ['fault', 'location', 'starttime']
24 | data_path: '../../../../PSML/processed_datasets/classification.pkl'
25 |
26 | trainer_params:
27 | max_epochs: 2 #NOTE: by default, it is 50
28 | gpus: [1]
29 |
30 | logging_params:
31 | save_dir: './../logs'
32 | name: 'transformer'
33 | manual_seed: 0
--------------------------------------------------------------------------------
/Code/BenchmarkModel/EventClassification/evaluating.py:
--------------------------------------------------------------------------------
1 | # Created by xunannancy at 2021/9/25
2 | import os
3 | import pickle
4 | from sklearn.metrics import balanced_accuracy_score
5 | from .models.utils import compute_MMAE
6 |
7 | def run_evaluate_classification(root, input_dict):
8 | data_path = os.path.join(root, 'processed_dataset', 'classification.pkl')
9 |
10 | with open(data_path, 'rb') as f:
11 | dataset = pickle.load(f)
12 |
13 | label_list, data_split = dataset['label_list'], dataset['data_split']
14 | gt = label_list[data_split['test']]
15 |
16 | # fault type
17 | classification_acc = balanced_accuracy_score(gt[:, 0], input_dict['classification'])
18 | # location
19 | localization_acc = balanced_accuracy_score(gt[:, 1], input_dict['localization'])
20 | # starttime
21 | detection_MMAE = compute_MMAE(gt[:, 2], input_dict['detection'])
22 |
23 | summary = {
24 | '#samples': len(gt),
25 | 'classification': classification_acc,
26 | 'localization': localization_acc,
27 | 'detection': detection_MMAE
28 | }
29 |
30 | print(f'summary: {summary}')
31 |
32 | return
33 |
34 |
--------------------------------------------------------------------------------
/Code/BenchmarkModel/EventClassification/models/NNEuclidean.py:
--------------------------------------------------------------------------------
1 | # Created by xunannancy at 2021/9/21
2 | """
3 | 1-NN Euclidean distance
4 | """
5 | import warnings
6 | warnings.filterwarnings('ignore')
7 | import argparse
8 | import yaml
9 | from utils import merge_parameters, num_features, seqlen, run_evaluate
10 | import os
11 | import pickle
12 | from sklearn.neighbors import KNeighborsClassifier
13 | import joblib
14 | from sklearn.preprocessing import StandardScaler, MinMaxScaler
15 |
16 | def run_NNEuclidean(config):
17 | saved_folder = os.path.join(config['logging_params']['save_dir'], config['logging_params']['name'])
18 | if config['exp_params']['test_flag']:
19 | last_version = config['exp_params']['last_version'] - 1
20 | else:
21 | if not os.path.exists(saved_folder):
22 | os.makedirs(saved_folder)
23 | last_version = -1
24 | else:
25 | last_version = sorted([int(i.split('_')[1]) for i in os.listdir(saved_folder)])[-1]
26 | log_dir = os.path.join(saved_folder, f'version_{last_version+1}')
27 | if not os.path.exists(log_dir):
28 | os.makedirs(log_dir)
29 |
30 | data_path = config['exp_params']['data_path']
31 |
32 | with open(data_path, 'rb') as f:
33 | dataset = pickle.load(f)
34 |
35 | feature_list, label_list, data_split = dataset['feature_list'], dataset['label_list'], dataset['data_split']
36 | train_x, train_y = feature_list[data_split['train']], label_list[data_split['train']]
37 | test_x = feature_list[data_split['test']]
38 | seqlen, feature_dim = train_x.shape[1], train_x.shape[2]
39 |
40 | if config['exp_params']['normalization'] != 'none':
41 | if config['exp_params']['normalization'] == 'minmax':
42 | scalar_x = MinMaxScaler()
43 | elif config['exp_params']['normalization'] == 'standard':
44 | scalar_x = StandardScaler()
45 | scalar_x = scalar_x.fit(train_x.reshape([len(train_y)*seqlen, num_features]))
46 | train_x = scalar_x.transform(train_x.reshape([len(train_y)*seqlen, num_features])).reshape([len(train_y), seqlen, num_features])
47 | test_size = len(test_x)
48 | test_x = scalar_x.transform(test_x.reshape([test_size * seqlen, num_features])).reshape([test_size, seqlen, num_features])
49 |
50 | model_path = os.path.join(log_dir, 'model.joblib')
51 | if not config['exp_params']['test_flag']:
52 | neigh = KNeighborsClassifier(n_neighbors=1, p=2, metric='minkowski')
53 | neigh.fit(train_x.reshape([-1, seqlen * feature_dim]), train_y)
54 | print(f'save model at {model_path}')
55 | joblib.dump(neigh, model_path)
56 |
57 |
58 | print(f'load model from {model_path}')
59 | model = joblib.load(model_path)
60 | predictions = model.predict(test_x.reshape([-1, seqlen * feature_dim]))
61 |
62 | with open(os.path.join(log_dir, 'predictions.pkl'), 'wb') as f:
63 | pickle.dump(predictions, f)
64 |
65 | if not os.path.exists(os.path.join(log_dir, 'config.yaml')):
66 | with open(os.path.join(log_dir, 'config.yaml'), 'w') as f:
67 | yaml.dump(config, f)
68 |
69 | evaluate_config = {
70 | 'exp_params': {
71 | 'prediction_path': log_dir,
72 | 'data_path': config['exp_params']['data_path'],
73 | },
74 | }
75 | run_evaluate(config=evaluate_config, verbose=False)
76 | return
77 |
78 | if __name__ == '__main__':
79 | parser = argparse.ArgumentParser(description=None)
80 |
81 | args = vars(parser.parse_args())
82 | with open('./../configs/NNEuclidean.yaml', 'r') as file:
83 | try:
84 | config = yaml.safe_load(file)
85 | except yaml.YAMLError as exc:
86 | print(exc)
87 | config = merge_parameters(args, config)
88 | print(f'after merge: config, {config}')
89 |
90 | run_NNEuclidean(config)
91 |
92 | """
93 | minmax:
94 | summary: {'#samples': 110, 'fault': 0.6748065233998738, 'location': 0.5150019357336431, 'starttime': 29.152439024390244}
95 | std:
96 | summary: {'#samples': 110, 'fault': 0.70933669910652, 'location': 0.5113240418118468, 'starttime': 33.32723577235772}
97 |
98 | """
99 |
--------------------------------------------------------------------------------
/Code/BenchmarkModel/EventClassification/requirements.txt:
--------------------------------------------------------------------------------
1 | --find-links https://download.pytorch.org/whl/torch_stable.html
2 | torch==1.8.1+cu111
3 | pytorch-lightning==1.3.1
4 | scikit-learn==0.24.2
5 | pandas==1.2.4
6 | matplotlib==3.4.2
7 | tensorflow==2.4.1
8 | test-tube==0.7.5
9 | git+git://github.com/sktime/sktime-dl.git@7a5e6#egg=sktime-dl
10 | tslearn==0.5.2
--------------------------------------------------------------------------------
/Code/BenchmarkModel/LoadForecasting/README.md:
--------------------------------------------------------------------------------
1 | # Load and Renewable Energy Forecasting
2 | Here we describe how to reproduce the benchmark results for point forecast (PF) and
3 | prediction interval (PI) on load, solar and wind power, given weather and date information.
4 | ## Relevant Packages Install
5 | - Create and activate anaconda virtual environment
6 | ```angular2html
7 | conda create -n EnergyForecasting python=3.7.10
8 | conda activate EnergyForecasting
9 | ```
10 | - Install required packages
11 | ```angular2html
12 | pip install -r requirements.txt
13 | ```
14 | ## Benchmark Results Reproduction
15 | You can find all codes of different models in folder `models` and their respective configuration files in folder
16 | `configs`. Change the configurations in `***.yaml` from `configs` and run `***.py` from `models` for model training
17 | and evaluation. Or you can directly run `***.py` from `models` with configuration hyperparameters in the command line.
18 |
19 | **NOTE:** The `data_folder` in configuration files is the path to the **raw** `minute-level load and renewable` dataset folder. For experiment log,
20 | you can check the folder named `logs`, or you can name your own log folder by setting `logging_params:save_dir` in
21 | configuration files.
22 |
23 | #### Implementation Details
24 | `num_files` in command line indicates the number of location-year for load, solar and wind power forecasting. There are 66 locations
25 | in total and measurements in 2018, 2019 and 2020 are provided. Therefore, setting `num_files=198` will training and testing individual models
26 | for each location per year.
27 |
28 | Configurations in `config` folder are consistent of what we've tried in dataset benchmark submission, except the
29 | `max_epochs`, which should be 50 by default. GPU acceleration is supported for both Pytorch and Tensorflow.
30 | When considering temporal dependencies, we use `sliding_window=120` by default.
31 | ## References
32 |
33 | 1. **N-BEATS**
34 |
35 | Oreshkin, Boris N., et al. "N-BEATS: Neural basis expansion analysis for interpretable time series forecasting." arXiv preprint arXiv:1905.10437 (2019).
36 |
37 | https://github.com/ElementAI/N-BEATS.git
38 |
39 | 1. **WaveNet**
40 |
41 | Oord, Aaron van den, et al. "Wavenet: A generative model for raw audio." arXiv preprint arXiv:1609.03499 (2016).
42 |
43 | https://github.com/vincentherrmann/pytorch-wavenet.git
44 |
45 | 1. **TCN**
46 |
47 | Lea, Colin, et al. "Temporal convolutional networks for action segmentation and detection." proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. 2017.
48 |
49 | https://github.com/locuslab/TCN.git
50 |
51 | 1. **LSTNet**
52 |
53 | Lai, Guokun, et al. "Modeling long-and short-term temporal patterns with deep neural networks." The 41st International ACM SIGIR Conference on Research & Development in Information Retrieval. 2018.
54 |
55 | https://github.com/laiguokun/LSTNet.git
56 |
57 | 1. **DeepAR**
58 |
59 | Salinas, David, et al. "DeepAR: Probabilistic forecasting with autoregressive recurrent networks." International Journal of Forecasting 36.3 (2020): 1181-1191.
60 |
61 | https://github.com/zhykoties/TimeSeries.git
62 |
63 | 1. **Informer**
64 |
65 | Zhou, Haoyi, et al. "Informer: Beyond efficient transformer for long sequence time-series forecasting." Proceedings of AAAI. 2021.
66 |
67 | https://github.com/zhouhaoyi/Informer2020.git
68 |
69 | 1. **Neural ODE**
70 |
71 | Chen, Ricky TQ, et al. "Neural ordinary differential equations." arXiv preprint arXiv:1806.07366 (2018).
72 |
73 | https://github.com/rtqichen/torchdiffeq.git
--------------------------------------------------------------------------------
/Code/BenchmarkModel/LoadForecasting/configs/DeepAR.yaml:
--------------------------------------------------------------------------------
1 | model_params:
2 | model_name: 'DeepAR'
3 | dropout: [0.1]
4 | hidden_dim: [40]
5 | hidden_layers: [3]
6 |
7 | exp_params:
8 | variate: ['multi'] #['uni', 'multi']
9 |
10 | sliding_window: [ 120 ]
11 | selection_metric: 'RMSE' # one of ['RMSE', 'MAE', 'MAPE']
12 |
13 | train_valid_ratio: 0.9
14 |
15 | external_flag: [True, False]
16 | external_features: ['month_day', 'weekday', 'holiday', 'DHI', 'DNI', 'GHI', 'Dew Point', 'Solar Zenith Angle', 'Wind Speed', 'Relative Humidity', 'Temperature']
17 |
18 | test_flag: False
19 | last_version: 11
20 |
21 | # DNNs
22 | batch_size: [128] #[50, 100, 150]
23 | learning_rate: [0.001] #[0.1, 0.01, 0.001]
24 | num_workers: 0
25 | normalization: ['minmax'] #['none', 'minmax', 'standard']
26 |
27 | prediction_interval: 0.95
28 |
29 | data_folder: '../../../../PSML/processed_datasets/forecasting'
30 |
31 | trainer_params:
32 | max_epochs: 2 #NOTE: default is 50
33 | gpus: [3]
34 |
35 | logging_params:
36 | save_dir: './../logs'
37 | name: 'DeepAR'
38 | manual_seed: 0
--------------------------------------------------------------------------------
/Code/BenchmarkModel/LoadForecasting/configs/ELM.yaml:
--------------------------------------------------------------------------------
1 | model_params:
2 | model_name: 'ELM'
3 | hidden_size: [64] #[32, 64, 128]
4 | num_layers: [3] #[1, 2, 3]
5 | dropout: [0.1]
6 |
7 | exp_params:
8 | sliding_window: [ 120 ]
9 | selection_metric: 'RMSE' # one of ['RMSE', 'MAE', 'MAPE']
10 |
11 | train_valid_ratio: 0.9
12 |
13 | external_features: ['month_day', 'weekday', 'holiday', 'DHI', 'DNI', 'GHI', 'Dew Point', 'Solar Zenith Angle', 'Wind Speed', 'Relative Humidity', 'Temperature']
14 |
15 | test_flag: False
16 | last_version: 6
17 |
18 | # DNNs
19 | batch_size: [50] #[50, 100, 150]
20 | learning_rate: [0.01] #[0.1, 0.01, 0.001]
21 | num_workers: 0
22 | normalization: ['minmax'] #['none', 'minmax', 'standard']
23 |
24 | prediction_interval: 0.95
25 |
26 | data_folder: '../../../../PSML/processed_datasets/forecasting'
27 |
28 | trainer_params:
29 | max_epochs: 2 # by default, it should be 50
30 | gpus: [1]
31 |
32 | logging_params:
33 | save_dir: './../logs'
34 | name: 'ELM'
35 | manual_seed: 0
--------------------------------------------------------------------------------
/Code/BenchmarkModel/LoadForecasting/configs/FNN.yaml:
--------------------------------------------------------------------------------
1 | model_params:
2 | model_name: 'FNN'
3 | hidden_size: [64] #[32, 64, 128]
4 | num_layers: [3] #[1, 2, 3]
5 | dropout: [0.1]
6 |
7 | exp_params:
8 | sliding_window: [ 120 ]
9 | selection_metric: 'RMSE' # one of ['RMSE', 'MAE', 'MAPE']
10 |
11 | train_valid_ratio: 0.9
12 |
13 | external_features: ['month_day', 'weekday', 'holiday', 'DHI', 'DNI', 'GHI', 'Dew Point', 'Solar Zenith Angle', 'Wind Speed', 'Relative Humidity', 'Temperature']
14 |
15 | test_flag: False
16 | last_version: 12
17 |
18 | # DNNs
19 | batch_size: [50] #[50, 100, 150]
20 | learning_rate: [0.01] #[0.1, 0.01, 0.001]
21 | num_workers: 0
22 | normalization: ['minmax'] #, 'standard'] #['none', 'minmax', 'standard']
23 |
24 | prediction_interval: 0.95
25 |
26 | data_folder: '../../../../PSML/processed_datasets/forecasting'
27 |
28 | trainer_params:
29 | max_epochs: 2 #NOTE: Default is 50
30 | gpus: [1]
31 |
32 | logging_params:
33 | save_dir: './../logs'
34 | name: 'FNN'
35 | manual_seed: 0
--------------------------------------------------------------------------------
/Code/BenchmarkModel/LoadForecasting/configs/LSTNet.yaml:
--------------------------------------------------------------------------------
1 | model_params:
2 | model_name: 'LSTNet'
3 | dropout: [0.2] #[0., 0.1]
4 | hidRNN: [100]
5 | hidCNN: [100]
6 | hidSkip: [10]
7 | cnn_kernel: [6]
8 | skip: [24]
9 | highway_window: [24]
10 |
11 | exp_params:
12 |
13 | sliding_window: [ 120 ]
14 | selection_metric: 'RMSE' # one of ['RMSE', 'MAE', 'MAPE']
15 |
16 | train_valid_ratio: 0.9
17 |
18 | external_features: ['month_day', 'weekday', 'holiday', 'DHI', 'DNI', 'GHI', 'Dew Point', 'Solar Zenith Angle', 'Wind Speed', 'Relative Humidity', 'Temperature']
19 |
20 | test_flag: False
21 | last_version: 23
22 |
23 | # DNNs
24 | batch_size: [128] #[50, 100, 150]
25 | learning_rate: [0.001] #[0.1, 0.01, 0.001]
26 | num_workers: 0
27 | normalization: ['minmax'] #['none', 'minmax', 'standard']
28 |
29 | prediction_interval: 0.95
30 |
31 | data_folder: '../../../../PSML/processed_datasets/forecasting'
32 |
33 | trainer_params:
34 | max_epochs: 2 #NOTE: by default it's 50
35 | gpus: [2]
36 |
37 | logging_params:
38 | save_dir: './../logs'
39 | name: 'LSTNet'
40 | manual_seed: 0
--------------------------------------------------------------------------------
/Code/BenchmarkModel/LoadForecasting/configs/NBeats.yaml:
--------------------------------------------------------------------------------
1 | model_params:
2 | model_name: 'NBeats'
3 | stacks: [30]
4 | layers: [4]
5 | layer_size: [512]
6 |
7 | exp_params:
8 | # variate: ['uni', 'multi'] #['single', 'multiple']
9 |
10 | sliding_window: [ 120 ]
11 | selection_metric: 'RMSE' # one of ['RMSE', 'MAE', 'MAPE']
12 |
13 | train_valid_ratio: 0.9
14 |
15 | external_flag: [True, False] #[True, False]
16 | external_features: ['month_day', 'weekday', 'holiday', 'DHI', 'DNI', 'GHI', 'Dew Point', 'Solar Zenith Angle', 'Wind Speed', 'Relative Humidity', 'Temperature']
17 |
18 | test_flag: False
19 | last_version: 11
20 |
21 | # DNNs
22 | batch_size: [128] #[50, 100, 150]
23 | learning_rate: [0.001] #[0.1, 0.01, 0.001]
24 | num_workers: 0
25 | normalization: ['minmax'] #['none', 'minmax', 'standard']
26 |
27 | prediction_interval: 0.95
28 |
29 | data_folder: '../../../../PSML/processed_datasets/forecasting'
30 |
31 | trainer_params:
32 | max_epochs: 2 # by default it's 50
33 | gpus: [3]
34 |
35 | logging_params:
36 | save_dir: './../logs'
37 | name: 'NBeats'
38 | manual_seed: 0
--------------------------------------------------------------------------------
/Code/BenchmarkModel/LoadForecasting/configs/NeuralODE.yaml:
--------------------------------------------------------------------------------
1 | model_params:
2 | model_name: 'NeuralODE'
3 | latent_dim: [128] #4
4 | nhidden: [128] #20
5 | rnn_nhidden: [128] #25
6 | noise_std: [0.3]
7 |
8 | exp_params:
9 | sliding_window: [ 120 ]
10 | selection_metric: 'RMSE' # one of ['RMSE', 'MAE', 'MAPE']
11 |
12 | train_valid_ratio: 0.9
13 |
14 | external_features: ['month_day', 'weekday', 'holiday', 'DHI', 'DNI', 'GHI', 'Dew Point', 'Solar Zenith Angle', 'Wind Speed', 'Relative Humidity', 'Temperature']
15 |
16 | test_flag: False
17 | last_version: 17
18 |
19 | # DNNs
20 | batch_size: [128] # 128 for training
21 | learning_rate: [0.001] #[0.1, 0.01, 0.001]
22 | num_workers: 0
23 | normalization: ['minmax'] #['none', 'minmax', 'standard']
24 |
25 | prediction_interval: 0.95
26 |
27 | data_folder: '../../../../PSML/processed_datasets/forecasting'
28 |
29 | trainer_params:
30 | max_epochs: 2 # by default, it's 50
31 | gpus: [1]
32 |
33 | logging_params:
34 | save_dir: './../logs'
35 | name: 'NeuralODE'
36 | manual_seed: 0
--------------------------------------------------------------------------------
/Code/BenchmarkModel/LoadForecasting/configs/RNN.yaml:
--------------------------------------------------------------------------------
1 | model_params:
2 | model_name: 'LSTM' # ['RNN', 'GRU', 'LSTM']
3 | hidden_size: [64] #[32, 64, 128]
4 | num_layers: [2] #[1, 2, 3]
5 | direction: ['bi', 'uni']
6 | dropout: [0.1] #[0., 0.1]
7 |
8 | exp_params:
9 | sliding_window: [ 120 ]
10 | selection_metric: 'RMSE' # one of ['RMSE', 'MAE', 'MAPE']
11 |
12 | train_valid_ratio: 0.9
13 |
14 | external_features: ['month_day', 'weekday', 'holiday', 'DHI', 'DNI', 'GHI', 'Dew Point', 'Solar Zenith Angle', 'Wind Speed', 'Relative Humidity', 'Temperature']
15 |
16 | test_flag: False
17 | last_version: 23
18 |
19 | # DNNs
20 | batch_size: [50] #[50, 100, 150]
21 | learning_rate: [0.01] #[0.1, 0.01, 0.001]
22 | num_workers: 0
23 |
24 | normalization: ['minmax'] #['none', 'minmax', 'standard']
25 | prediction_interval: 0.95
26 |
27 | data_folder: '../../../../PSML/processed_datasets/forecasting'
28 |
29 | trainer_params:
30 | max_epochs: 2 #NOTE: by default it's 50
31 | gpus: [1]
32 |
33 | logging_params:
34 | save_dir: './../logs'
35 | name: 'RNN'
36 | manual_seed: 0
--------------------------------------------------------------------------------
/Code/BenchmarkModel/LoadForecasting/configs/WaveNet.yaml:
--------------------------------------------------------------------------------
1 | model_params:
2 | model_name: 'WaveNetpy' # ['RNN', 'GRU', 'LSTM']
3 | layers: [3]
4 | blocks: [2]
5 | dilation_channels: [32]
6 | residual_channels: [32]
7 | skip_channels: [1024]
8 | end_channels: [512]
9 | kernel_size: [2]
10 |
11 | exp_params:
12 |
13 | sliding_window: [ 120 ]
14 | selection_metric: 'RMSE' # one of ['RMSE', 'MAE', 'MAPE']
15 |
16 | train_valid_ratio: 0.8
17 |
18 | external_features: ['month_day', 'weekday', 'holiday', 'DHI', 'DNI', 'GHI', 'Dew Point', 'Solar Zenith Angle', 'Wind Speed', 'Relative Humidity', 'Temperature']
19 |
20 | test_flag: False
21 | last_version: 65
22 |
23 | # DNNs
24 | batch_size: [250] #[50, 100, 150]
25 | learning_rate: [0.01] #[0.1, 0.01, 0.001]
26 | num_workers: 0
27 | normalization: ['minmax'] # ['none', 'minmax', 'standard']
28 |
29 | weight_decay: 0.2
30 |
31 | prediction_interval: 0.95
32 |
33 | data_folder: '../../../../PSML/processed_datasets/forecasting'
34 |
35 | trainer_params:
36 | max_epochs: 2 # NOTE: by default it's 50
37 | gpus: [3]
38 |
39 | logging_params:
40 | save_dir: './../logs'
41 | name: 'WaveNetpy'
42 | manual_seed: 0
--------------------------------------------------------------------------------
/Code/BenchmarkModel/LoadForecasting/configs/arima.yaml:
--------------------------------------------------------------------------------
1 | model_params:
2 | model_name: 'arima'
3 |
4 | exp_params:
5 | variate: ['uni'] #['uni', 'multi']
6 | sliding_window: [120]
7 | p_values: [2] #[1, 2, 4, 6, 8, 10]
8 | d_values: [2] #[1, 2]
9 | q_values: [2] #[1, 2]
10 | selection_metric: 'RMSE' # one of ['RMSE', 'MAE', 'MAPE']
11 |
12 | train_valid_ratio: 0.99
13 |
14 | external_feature_flag: [False] #[False, True]
15 | external_features: ['month_day', 'weekday', 'holiday', 'DHI', 'DNI', 'GHI', 'Dew Point', 'Solar Zenith Angle', 'Wind Speed', 'Relative Humidity', 'Temperature']
16 |
17 | test_flag: False
18 | last_version: 25
19 |
20 | prediction_interval: 0.95
21 |
22 | data_folder: '../../../../PSML/processed_datasets/forecasting'
23 |
24 | trainer_params:
25 |
26 |
27 | logging_params:
28 | save_dir: './../logs'
29 | name: 'arima'
30 | manual_seed: 0
--------------------------------------------------------------------------------
/Code/BenchmarkModel/LoadForecasting/configs/cnn.yaml:
--------------------------------------------------------------------------------
1 | model_params:
2 | model_name: 'cnn'
3 | dropout: [0.1] #[0., 0.1]
4 | hidden_layers: [[32, 64]] #[[32, 64, 128]]
5 | kernel_size: [[4, 4]] #[[8, 8, 8]]
6 | stride: [[2, 2]] # [[2, 2, 4]]
7 | pooling: ['max'] #['max', 'avg', 'AdaptiveAvg', 'AdaptiveMax', 'lp']
8 |
9 | exp_params:
10 | sliding_window: [ 120 ]
11 | selection_metric: 'RMSE' # one of ['RMSE', 'MAE', 'MAPE']
12 |
13 | train_valid_ratio: 0.9
14 |
15 | external_features: ['month_day', 'weekday', 'holiday', 'DHI', 'DNI', 'GHI', 'Dew Point', 'Solar Zenith Angle', 'Wind Speed', 'Relative Humidity', 'Temperature']
16 |
17 | test_flag: False
18 | last_version: 4
19 |
20 | # DNNs
21 | batch_size: [50] #[50, 100, 150]
22 | learning_rate: [0.01] #[0.1, 0.01, 0.001]
23 | num_workers: 0
24 |
25 | normalization: ['minmax'] #['none', 'minmax', 'standard']
26 |
27 | prediction_interval: 0.95
28 |
29 | data_folder: '../../../../PSML/processed_datasets/forecasting'
30 |
31 | trainer_params:
32 | max_epochs: 2 # NOTE: default is 50
33 | gpus: [1]
34 |
35 | logging_params:
36 | save_dir: './../logs'
37 | name: 'cnn'
38 | manual_seed: 0
--------------------------------------------------------------------------------
/Code/BenchmarkModel/LoadForecasting/configs/exponential_smoothing.yaml:
--------------------------------------------------------------------------------
1 | model_params:
2 | model_name: 'exponential_smoothing'
3 |
4 | exp_params:
5 | sliding_window: [ 1440 ]
6 | # ep_method: 'simple' # in ['simple', 'Holt', 'Holt Winters']
7 | selection_metric: 'RMSE' # one of ['RMSE', 'MAE', 'MAPE']
8 |
9 | train_valid_ratio: 0.99
10 |
11 | test_flag: False
12 | last_version: 2
13 |
14 | prediction_interval: 0.95
15 |
16 | data_folder: '../../../../PSML/processed_datasets/forecasting'
17 |
18 | trainer_params:
19 |
20 |
21 | logging_params:
22 | save_dir: './../logs'
23 | name: 'exponential_smoothing'
24 | manual_seed: 0
--------------------------------------------------------------------------------
/Code/BenchmarkModel/LoadForecasting/configs/gradient_boosting.yaml:
--------------------------------------------------------------------------------
1 | model_params:
2 | model_name: 'gradient_boosting'
3 | n_estimators: [100]
4 | learning_rate: [1., 0.1, 0.01]
5 |
6 | exp_params:
7 | variate: [ 'uni', 'multi' ] #['uni', 'multi']
8 | sliding_window: [120]
9 | selection_metric: 'RMSE' # one of ['RMSE', 'MAE', 'MAPE']
10 |
11 | train_valid_ratio: 1000
12 |
13 | external_feature_flag: [True, False]
14 | external_features: ['month_day', 'weekday', 'holiday', 'DHI', 'DNI', 'GHI', 'Dew Point', 'Solar Zenith Angle', 'Wind Speed', 'Relative Humidity', 'Temperature']
15 |
16 | test_flag: False
17 | last_version: 16
18 |
19 | prediction_interval: 0.95
20 |
21 | data_folder: '../../../../PSML/processed_datasets/forecasting'
22 |
23 | trainer_params:
24 |
25 | logging_params:
26 | save_dir: './../logs'
27 | name: 'gradient_boosting'
28 | manual_seed: 0
--------------------------------------------------------------------------------
/Code/BenchmarkModel/LoadForecasting/configs/informer.yaml:
--------------------------------------------------------------------------------
1 | model_params:
2 | model_name: 'informer'
3 | dropout: [0.05]
4 | autoregressive: [False] #[True, False]
5 | label_len: [60]
6 | attn: ['prob']
7 | d_model: [128] #[512]
8 | factor: [5]
9 | n_heads: [8]
10 | e_layers: [2]
11 | d_layers: [1]
12 | d_ff: [512] #[2048]
13 | activation: ['gelu']
14 | distil: [True]
15 | mix: [True]
16 |
17 | exp_params:
18 | sliding_window: [ 120 ]
19 | selection_metric: 'RMSE' # one of ['RMSE', 'MAE', 'MAPE']
20 |
21 | train_valid_ratio: 0.9
22 |
23 | external_features: ['DHI', 'DNI', 'GHI', 'Dew Point', 'Solar Zenith Angle', 'Wind Speed', 'Relative Humidity', 'Temperature']
24 | time_features: [ 'month_day', 'weekday', 'holiday' ]
25 |
26 | test_flag: False
27 | last_version: 19
28 |
29 | # DNNs
30 | batch_size: [128] # 128 for training
31 | learning_rate: [0.001] #[0.1, 0.01, 0.001]
32 | num_workers: 0
33 | normalization: ['none', 'minmax', 'standard'] #['none', 'minmax', 'standard']
34 |
35 | prediction_interval: 0.95
36 |
37 | data_folder: '../../../../PSML/processed_datasets/forecasting'
38 |
39 | trainer_params:
40 | max_epochs: 2 #NOTE: by default it's 50
41 | gpus: [1]
42 |
43 | logging_params:
44 | save_dir: './../logs'
45 | name: 'informer'
46 | manual_seed: 0
--------------------------------------------------------------------------------
/Code/BenchmarkModel/LoadForecasting/configs/linear_regression.yaml:
--------------------------------------------------------------------------------
1 | model_params:
2 | model_name: 'linear_regression'
3 | normalize: [False, True]
4 |
5 | exp_params:
6 | variate: [ 'uni', 'multi' ] #['uni', 'multi']
7 | sliding_window: [120]
8 | selection_metric: 'RMSE' # one of ['RMSE', 'MAE', 'MAPE']
9 |
10 | train_valid_ratio: 2000
11 |
12 | external_feature_flag: [True, False]
13 | external_features: ['month_day', 'weekday', 'holiday', 'DHI', 'DNI', 'GHI', 'Dew Point', 'Solar Zenith Angle', 'Wind Speed', 'Relative Humidity', 'Temperature']
14 |
15 | test_flag: False
16 | last_version: 16
17 |
18 | prediction_interval: 0.95
19 |
20 | data_folder: '../../../../PSML/processed_datasets/forecasting'
21 |
22 | trainer_params:
23 |
24 | logging_params:
25 | save_dir: './../logs'
26 | name: 'linear_regression'
27 | manual_seed: 0
--------------------------------------------------------------------------------
/Code/BenchmarkModel/LoadForecasting/configs/naive.yaml:
--------------------------------------------------------------------------------
1 | model_params:
2 | model_name: 'naive'
3 |
4 | exp_params:
5 | # feature_name: 'load' # either in ['solar', 'wind', 'load']
6 | # prediction_horizon: [60, 1440]
7 | # location_type: 'single' # 'single' and 'multiple' should be same for naive methods
8 |
9 | test_flag: False
10 | last_version: 1
11 |
12 | prediction_interval: 0.95
13 |
14 | data_folder: '../../../../PSML/processed_datasets/forecasting'
15 |
16 | trainer_params:
17 |
18 | logging_params:
19 | save_dir: './../logs'
20 | name: 'naive'
21 |
--------------------------------------------------------------------------------
/Code/BenchmarkModel/LoadForecasting/configs/random_forest.yaml:
--------------------------------------------------------------------------------
1 | model_params:
2 | model_name: 'random_forest'
3 | n_estimators: [100]
4 | criterion: ['mse']
5 |
6 | exp_params:
7 | variate: [ 'uni', 'multi' ] #[ 'uni', 'multi' ] #['uni', 'multi']
8 |
9 | sliding_window: [120]
10 | selection_metric: 'RMSE' # one of ['RMSE', 'MAE', 'MAPE']
11 |
12 | train_valid_ratio: 1000
13 |
14 | external_feature_flag: [True, False]
15 | external_features: ['month_day', 'weekday', 'holiday', 'DHI', 'DNI', 'GHI', 'Dew Point', 'Solar Zenith Angle', 'Wind Speed', 'Relative Humidity', 'Temperature']
16 |
17 | test_flag: False
18 | last_version: 16
19 |
20 | prediction_interval: 0.95
21 |
22 | data_folder: '../../../../PSML/processed_datasets/forecasting'
23 |
24 | trainer_params:
25 |
26 | logging_params:
27 | save_dir: './../logs'
28 | name: 'random_forest'
29 | manual_seed: 0
--------------------------------------------------------------------------------
/Code/BenchmarkModel/LoadForecasting/configs/svr.yaml:
--------------------------------------------------------------------------------
1 | model_params:
2 | model_name: 'svr'
3 | kernel: ['rbf'] #, 'poly', 'rbf', 'sigmoid']
4 |
5 |
6 | exp_params:
7 | variate: [ 'uni', 'multi' ] #['uni', 'multi']
8 | sliding_window: [ 120 ]
9 | selection_metric: 'RMSE' # one of ['RMSE', 'MAE', 'MAPE']
10 |
11 | external_feature_flag: [True, False]
12 | external_features: ['month_day', 'weekday', 'holiday', 'DHI', 'DNI', 'GHI', 'Dew Point', 'Solar Zenith Angle', 'Wind Speed', 'Relative Humidity', 'Temperature']
13 |
14 | train_valid_ratio: 2000
15 |
16 | test_flag: False
17 | last_version: 17
18 |
19 | prediction_interval: 0.95
20 |
21 | data_folder: '../../../../PSML/processed_datasets/forecasting'
22 |
23 | trainer_params:
24 |
25 | logging_params:
26 | save_dir: './../logs'
27 | name: 'svr'
28 | manual_seed: 0
--------------------------------------------------------------------------------
/Code/BenchmarkModel/LoadForecasting/configs/tcn.yaml:
--------------------------------------------------------------------------------
1 | model_params:
2 | model_name: 'tcn' # ['RNN', 'GRU', 'LSTM']
3 | dropout: [0.] #[0., 0.1]
4 | nhid: [600]
5 | levels: [4]
6 | kernel_size: [3]
7 | classification_token: ['first', 'last', 'all'] #['all']
8 |
9 | exp_params:
10 | sliding_window: [ 120 ]
11 | selection_metric: 'RMSE' # one of ['RMSE', 'MAE', 'MAPE']
12 |
13 | train_valid_ratio: 0.9
14 |
15 | external_features: ['month_day', 'weekday', 'holiday', 'DHI', 'DNI', 'GHI', 'Dew Point', 'Solar Zenith Angle', 'Wind Speed', 'Relative Humidity', 'Temperature']
16 |
17 | test_flag: False
18 | last_version: 23
19 |
20 | # DNNs
21 | batch_size: [50] #[50, 100, 150]
22 | learning_rate: [0.01] #[0.1, 0.01, 0.001]
23 | num_workers: 0
24 | normalization: ['minmax'] #['none', 'minmax', 'standard']
25 |
26 | prediction_interval: 0.95
27 |
28 | data_folder: '../../../../PSML/processed_datasets/forecasting'
29 |
30 | trainer_params:
31 | max_epochs: 2 #NOTE: by default it's 50
32 | gpus: [1]
33 |
34 | logging_params:
35 | save_dir: './../logs'
36 | name: 'tcn'
37 | manual_seed: 0
--------------------------------------------------------------------------------
/Code/BenchmarkModel/LoadForecasting/configs/transformer.yaml:
--------------------------------------------------------------------------------
1 | model_params:
2 | model_name: 'transformer' # ['RNN', 'GRU', 'LSTM']
3 | hidden_size: [64] #[32, 64, 128]
4 | num_layers: [2] #[1, 2, 3]
5 | dropout: [0.1] #[0., 0.1]
6 | num_heads: [8]
7 | classification_token: ['first', 'last', 'all']
8 |
9 | exp_params:
10 | sliding_window: [ 120 ]
11 | selection_metric: 'RMSE' # one of ['RMSE', 'MAE', 'MAPE']
12 |
13 | train_valid_ratio: 0.9
14 |
15 | external_features: ['month_day', 'weekday', 'holiday', 'DHI', 'DNI', 'GHI', 'Dew Point', 'Solar Zenith Angle', 'Wind Speed', 'Relative Humidity', 'Temperature']
16 |
17 | test_flag: False
18 | last_version: 23
19 |
20 | # DNNs
21 | batch_size: [50] #[50, 100, 150]
22 | learning_rate: [0.01] #[0.1, 0.01, 0.001]
23 | num_workers: 0
24 | normalization: ['minmax'] #['none', 'minmax', 'standard']
25 |
26 | prediction_interval: 0.95
27 |
28 | data_folder: '../../../../PSML/processed_datasets/forecasting'
29 |
30 | trainer_params:
31 | max_epochs: 2 # NOTE: by default it's 50
32 | gpus: [2]
33 |
34 | logging_params:
35 | save_dir: './../logs'
36 | name: 'transformer'
37 | manual_seed: 0
--------------------------------------------------------------------------------
/Code/BenchmarkModel/LoadForecasting/evaluating.py:
--------------------------------------------------------------------------------
1 | # Created by xunannancy at 2021/9/25
2 | import os
3 | from tqdm import tqdm
4 | import pandas as pd
5 | import numpy as np
6 | from collections import OrderedDict
7 | from sklearn.metrics import mean_squared_error, mean_absolute_error, mean_absolute_percentage_error
8 |
9 | task_prediction_horizon = OrderedDict({
10 | 'load': [60, 1440],
11 | 'wind': [5, 30],
12 | 'solar': [5, 30],
13 | })
14 |
15 | def perform_evaluate(gt_file, pred):
16 | gt_data = pd.read_csv(gt_file)
17 | train_flag = gt_data['train_flag'].to_numpy()
18 | testing_index = sorted(np.argwhere(train_flag == 0).reshape([-1]))
19 | gt_testing_data = gt_data.iloc[testing_index]
20 |
21 | pred_data = pd.DataFrame(
22 | data=np.transpose(np.array(list(pred.values())), (1, 0)),
23 | columns=list(pred.keys()),
24 | )
25 |
26 | # combine
27 | merged_results = pd.merge(left=gt_testing_data, right=pred_data, how='left', on='ID')
28 |
29 | results = dict()
30 |
31 | for task_name, task_prediction_horizon_list in task_prediction_horizon.items():
32 | for horizon_index, horizon_val in enumerate(task_prediction_horizon_list):
33 | cur_gt_val, cur_gt_flag, cur_pred_mean = merged_results[f'y{task_name[0]}_t+{horizon_val}(val)'].to_numpy(), merged_results[f'y{task_name[0]}_t+{horizon_val}(flag)'].to_numpy(), merged_results[f'y{task_name[0]}_t+{horizon_val}(mean)'].to_numpy()
34 | cur_pred_U, cur_pred_L = merged_results[f'y{task_name[0]}_t+{horizon_val}(U)'].to_numpy(), merged_results[f'y{task_name[0]}_t+{horizon_val}(L)'].to_numpy()
35 | selected_index = sorted(np.argwhere(cur_gt_flag == 1).reshape([-1]))
36 | valid_gt = cur_gt_val[selected_index]
37 | val_pred_mean = cur_pred_mean[selected_index]
38 | val_pred_U, val_pred_L = cur_pred_U[selected_index], cur_pred_L[selected_index]
39 | results[f'y{task_name[0]}_t+{horizon_val}'] = [valid_gt, val_pred_mean, val_pred_U, val_pred_L]
40 | return results
41 |
42 | def run_evaluate_forecasting(root, input_dict, prediction_interval=0.95):
43 | data_folder = os.path.join(root, 'processed_dataset', 'forecasting')
44 |
45 | gt_file_dict = dict()
46 | for i in os.listdir(data_folder):
47 | cur_year = int(i.split('.')[0].split('_')[-1])
48 | if cur_year not in gt_file_dict:
49 | gt_file_dict[cur_year] = [i]
50 | else:
51 | gt_file_dict[cur_year].append(i)
52 |
53 | summary = dict()
54 | for year, file_list in gt_file_dict.items():
55 | summary[year] = dict()
56 | file_counter = 0
57 | total_results = dict()
58 | for gt_file in tqdm(file_list):
59 | if gt_file.split('.')[0] not in input_dict:
60 | continue
61 | cur_results = perform_evaluate(os.path.join(data_folder, gt_file), input_dict[gt_file.split('.')[0]])
62 | for key, val in cur_results.items():
63 | gt, pred_mean, pred_U, pred_L = val[0], val[1], val[2], val[3]
64 | if key not in total_results:
65 | total_results[key] = [gt, pred_mean, pred_U, pred_L]
66 | else:
67 | total_results[key][0] = np.concatenate([total_results[key][0], gt])
68 | total_results[key][1] = np.concatenate([total_results[key][1], pred_mean])
69 | total_results[key][2] = np.concatenate([total_results[key][2], pred_U])
70 | total_results[key][3] = np.concatenate([total_results[key][3], pred_L])
71 |
72 | file_counter += 1
73 | for key, val in total_results.items():
74 | gt, pred_mean, pred_U, pred_L = val[0], val[1], val[2], val[3]
75 | RMSE = np.sqrt(mean_squared_error(gt, pred_mean))
76 | MAE = mean_absolute_error(gt, pred_mean)
77 | MAPE = mean_absolute_percentage_error(gt, pred_mean)
78 | a = prediction_interval
79 | term1 = pred_U - pred_L
80 | term2 = 2./a * (pred_L - gt) * (gt < pred_L)
81 | term3 = 2./a * (gt - pred_U) * (gt > pred_U)
82 | MSIS = np.mean(term1 + term2 + term3)# / config['exp_params']['naive_scale'][int(horizon_val.split('+')[1])]
83 | summary[year][key] = {
84 | '#locs': file_counter,
85 | 'RMSE': RMSE,
86 | 'MAE': MAE,
87 | 'MAPE': MAPE,
88 | 'MSIS': MSIS
89 | }
90 | summary = OrderedDict(sorted(summary.items()))
91 | print(f'summary: {summary}')
92 | return summary
93 |
94 |
--------------------------------------------------------------------------------
/Code/BenchmarkModel/LoadForecasting/requirements.txt:
--------------------------------------------------------------------------------
1 | --find-links https://download.pytorch.org/whl/torch_stable.html
2 | torch==1.8.1+cu111
3 | pytorch-lightning==1.3.1
4 | scikit-learn==0.24.2
5 | pandas==1.2.4
6 | matplotlib==3.4.2
7 | tensorflow==2.4.1
8 | test-tube==0.7.5
9 | torchdiffeq==0.2.2
--------------------------------------------------------------------------------
/Code/BenchmarkModel/README.md:
--------------------------------------------------------------------------------
1 | We provide baseline methods used in our benchmarks for all three tasks. Each set of methods is hosted in its own folder. Unless otherwise specified, the default hyper-parameters are what we used to generate the results in our paper and leaderboards. Please read their respective README.md files for further information on how to run them.
2 |
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/DoppelGANger/LICENSE:
--------------------------------------------------------------------------------
1 | The Clear BSD License
2 |
3 | Copyright (c) 2019 Carnegie Mellon University
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
7 | * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
8 | * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
9 | * Neither the name of Carnegie Mellon University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
10 |
11 | NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/DoppelGANger/example_generating_data/config_generate_data.py:
--------------------------------------------------------------------------------
1 | config = {
2 | "scheduler_config": {
3 | "gpu": ["0"],
4 | "config_string_value_maxlen": 1000,
5 | "result_root_folder": "../results/",
6 | "scheduler_log_file_path": "scheduler_generate_data.log",
7 | "log_file": "worker_generate_data.log",
8 | "force_rerun": True
9 | },
10 |
11 | "global_config": {
12 | "batch_size": 100,
13 | "vis_freq": 200,
14 | "vis_num_sample": 5,
15 | "d_rounds": 1,
16 | "g_rounds": 1,
17 | "num_packing": 1,
18 | "noise": True,
19 | "feed_back": False,
20 | "g_lr": 0.001,
21 | "d_lr": 0.001,
22 | "d_gp_coe": 10.0,
23 | "gen_feature_num_layers": 1,
24 | "gen_feature_num_units": 100,
25 | "gen_attribute_num_layers": 3,
26 | "gen_attribute_num_units": 100,
27 | "disc_num_layers": 5,
28 | "disc_num_units": 200,
29 | "initial_state": "random",
30 |
31 | "attr_d_lr": 0.001,
32 | "attr_d_gp_coe": 10.0,
33 | "g_attr_d_coe": 1.0,
34 | "attr_disc_num_layers": 5,
35 | "attr_disc_num_units": 200,
36 |
37 | "generate_num_train_sample": 50000,
38 | "generate_num_test_sample": 50000
39 | },
40 |
41 | "test_config": [
42 | {
43 | "dataset": ["google"],
44 | "epoch": [400],
45 | "run": [0, 1, 2],
46 | "sample_len": [1, 5, 10],
47 | "extra_checkpoint_freq": [5],
48 | "epoch_checkpoint_freq": [1],
49 | "aux_disc": [False],
50 | "self_norm": [False]
51 | },
52 | {
53 | "dataset": ["web"],
54 | "epoch": [400],
55 | "run": [0, 1, 2],
56 | "sample_len": [1, 5, 10, 25, 50],
57 | "extra_checkpoint_freq": [5],
58 | "epoch_checkpoint_freq": [1],
59 | "aux_disc": [True],
60 | "self_norm": [True]
61 | },
62 | {
63 | "dataset": ["FCC_MBA"],
64 | "epoch": [17000],
65 | "run": [0, 1, 2],
66 | "sample_len": [1, 4, 8],
67 | "extra_checkpoint_freq": [850],
68 | "epoch_checkpoint_freq": [70],
69 | "aux_disc": [False],
70 | "self_norm": [False]
71 | }
72 | ]
73 | }
74 |
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/DoppelGANger/example_generating_data/main_generate_data.py:
--------------------------------------------------------------------------------
1 | if __name__ == "__main__":
2 | from gan_generate_data_task import GANGenerateDataTask
3 | from config_generate_data import config
4 | from gpu_task_scheduler.gpu_task_scheduler import GPUTaskScheduler
5 |
6 | scheduler = GPUTaskScheduler(
7 | config=config, gpu_task_class=GANGenerateDataTask)
8 | scheduler.start()
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/DoppelGANger/example_main(without_GPUTaskScheduler)/main.py:
--------------------------------------------------------------------------------
1 | import warnings
2 | warnings.simplefilter(action='ignore', category=FutureWarning)
3 |
4 | import sys
5 | sys.path.append("..")
6 |
7 | from gan import output
8 | sys.modules["output"] = output
9 |
10 | from gan.doppelganger import DoppelGANger
11 | from gan.util import add_gen_flag, normalize_per_sample
12 | from gan.load_data import load_data
13 | from gan.network import DoppelGANgerGenerator, Discriminator, AttrDiscriminator
14 | import os
15 | import tensorflow as tf
16 | import numpy as np
17 | import pandas as pd
18 | from sklearn import preprocessing
19 | import pickle
20 | from scipy.interpolate import InterpolatedUnivariateSpline
21 |
22 |
23 | def min_max_scale(data):
24 | min_val = np.min(np.min(data, axis = 0), axis = 0)
25 | data = data - min_val
26 | max_val = np.max(np.max(data, axis = 0), axis = 0)
27 | norm_data = data / (max_val + 1e-7)
28 | return norm_data, min_val, max_val
29 |
30 | if __name__ == "__main__":
31 | sample_len = 10
32 | num_channels = 91
33 |
34 | data_npz = np.load("../../data/real_train.npz")
35 | data_feature, min_val, max_val = min_max_scale(data_npz["trans"])
36 | data_attribute = data_npz["y"]
37 | data_gen_flag = np.ones((data_feature.shape[0], data_feature.shape[1]))
38 |
39 | data_feature_outputs = [ # 91 channels
40 | output.Output(output.OutputType.CONTINUOUS, 1, output.Normalization.ZERO_ONE, is_gen_flag=False)
41 | for _ in range(num_channels)]
42 |
43 | data_attribute_outputs = [ # 1 categorical feature with 5 possibilities
44 | output.Output(output.OutputType.DISCRETE, 5, None, is_gen_flag=False),]
45 |
46 | (data_feature, data_attribute, data_attribute_outputs,
47 | real_attribute_mask) = \
48 | normalize_per_sample(
49 | data_feature, data_attribute, data_feature_outputs,
50 | data_attribute_outputs)
51 |
52 | data_feature, data_feature_outputs = add_gen_flag(
53 | data_feature, data_gen_flag, data_feature_outputs, sample_len)
54 |
55 | generator = DoppelGANgerGenerator(
56 | feed_back=False,
57 | noise=True,
58 | feature_outputs=data_feature_outputs,
59 | attribute_outputs=data_attribute_outputs,
60 | real_attribute_mask=real_attribute_mask,
61 | sample_len=sample_len,
62 | feature_num_units=256,
63 | feature_num_layers=2)
64 | discriminator = Discriminator()
65 | attr_discriminator = AttrDiscriminator()
66 |
67 | checkpoint_dir = "./log/checkpoint"
68 | if not os.path.exists(checkpoint_dir):
69 | os.makedirs(checkpoint_dir)
70 | sample_dir = "./log/sample"
71 | if not os.path.exists(sample_dir):
72 | os.makedirs(sample_dir)
73 | time_path = "./log/time.txt"
74 | np.savez("./log/minmax.npz", min_val=min_val, max_val=max_val)
75 |
76 | # Parameters
77 | epoch = 400
78 | batch_size = 100
79 | vis_freq = 200
80 | vis_num_sample = 5
81 | d_rounds = 1
82 | g_rounds = 1
83 | d_gp_coe = 10.0
84 | attr_d_gp_coe = 10.0
85 | g_attr_d_coe = 1.0
86 | extra_checkpoint_freq = 5
87 | num_packing = 1
88 |
89 | run_config = tf.ConfigProto()
90 | with tf.Session(config=run_config) as sess:
91 | gan = DoppelGANger(
92 | sess=sess,
93 | checkpoint_dir=checkpoint_dir,
94 | sample_dir=sample_dir,
95 | time_path=time_path,
96 | epoch=epoch,
97 | batch_size=batch_size,
98 | data_feature=data_feature,
99 | data_attribute=data_attribute,
100 | real_attribute_mask=real_attribute_mask,
101 | data_gen_flag=data_gen_flag,
102 | sample_len=sample_len,
103 | data_feature_outputs=data_feature_outputs,
104 | data_attribute_outputs=data_attribute_outputs,
105 | vis_freq=vis_freq,
106 | vis_num_sample=vis_num_sample,
107 | generator=generator,
108 | discriminator=discriminator,
109 | attr_discriminator=attr_discriminator,
110 | d_gp_coe=d_gp_coe,
111 | attr_d_gp_coe=attr_d_gp_coe,
112 | g_attr_d_coe=g_attr_d_coe,
113 | d_rounds=d_rounds,
114 | g_rounds=g_rounds,
115 | num_packing=num_packing,
116 | extra_checkpoint_freq=extra_checkpoint_freq)
117 | gan.build()
118 | gan.train()
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/DoppelGANger/example_training/config.py:
--------------------------------------------------------------------------------
1 | config = {
2 | "scheduler_config": {
3 | "gpu": ["0"],
4 | "config_string_value_maxlen": 1000,
5 | "result_root_folder": "../results/"
6 | },
7 |
8 | "global_config": {
9 | "batch_size": 100,
10 | "vis_freq": 200,
11 | "vis_num_sample": 5,
12 | "d_rounds": 1,
13 | "g_rounds": 1,
14 | "num_packing": 1,
15 | "noise": True,
16 | "feed_back": False,
17 | "g_lr": 0.001,
18 | "d_lr": 0.001,
19 | "d_gp_coe": 10.0,
20 | "gen_feature_num_layers": 1,
21 | "gen_feature_num_units": 100,
22 | "gen_attribute_num_layers": 3,
23 | "gen_attribute_num_units": 100,
24 | "disc_num_layers": 5,
25 | "disc_num_units": 200,
26 | "initial_state": "random",
27 |
28 | "attr_d_lr": 0.001,
29 | "attr_d_gp_coe": 10.0,
30 | "g_attr_d_coe": 1.0,
31 | "attr_disc_num_layers": 5,
32 | "attr_disc_num_units": 200,
33 | },
34 |
35 | "test_config": [
36 | {
37 | "dataset": ["google"],
38 | "epoch": [400],
39 | "run": [0, 1, 2],
40 | "sample_len": [1, 5, 10],
41 | "extra_checkpoint_freq": [5],
42 | "epoch_checkpoint_freq": [1],
43 | "aux_disc": [False],
44 | "self_norm": [False]
45 | },
46 | {
47 | "dataset": ["web"],
48 | "epoch": [400],
49 | "run": [0, 1, 2],
50 | "sample_len": [1, 5, 10, 25, 50],
51 | "extra_checkpoint_freq": [5],
52 | "epoch_checkpoint_freq": [1],
53 | "aux_disc": [True],
54 | "self_norm": [True]
55 | },
56 | {
57 | "dataset": ["FCC_MBA"],
58 | "epoch": [17000],
59 | "run": [0, 1, 2],
60 | "sample_len": [1, 4, 8],
61 | "extra_checkpoint_freq": [850],
62 | "epoch_checkpoint_freq": [70],
63 | "aux_disc": [False],
64 | "self_norm": [False]
65 | }
66 | ]
67 | }
68 |
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/DoppelGANger/example_training/main.py:
--------------------------------------------------------------------------------
1 | if __name__ == "__main__":
2 | from gan_task import GANTask
3 | from config import config
4 | from gpu_task_scheduler.gpu_task_scheduler import GPUTaskScheduler
5 |
6 | scheduler = GPUTaskScheduler(config=config, gpu_task_class=GANTask)
7 | scheduler.start()
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/DoppelGANger/gan/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tamu-engineering-research/Open-source-power-dataset/7b3644d75fd493b82f4129e6b01f36cd91e26dd4/Code/BenchmarkModel/SyntheticDataGeneration/DoppelGANger/gan/__init__.py
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/DoppelGANger/gan/load_data.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 | import pickle
4 |
5 |
6 | def load_data(path, flag="train"):
7 | data_npz = np.load(
8 | os.path.join(path, "data_{}.npz".format(flag)))
9 | with open(os.path.join(path, "data_feature_output.pkl"), "rb") as f:
10 | data_feature_outputs = pickle.load(f)
11 | with open(os.path.join(path,
12 | "data_attribute_output.pkl"), "rb") as f:
13 | data_attribute_outputs = pickle.load(f)
14 |
15 | data_feature = data_npz["data_feature"]
16 | data_attribute = data_npz["data_attribute"]
17 | data_gen_flag = data_npz["data_gen_flag"]
18 | return (data_feature, data_attribute,
19 | data_gen_flag,
20 | data_feature_outputs, data_attribute_outputs)
21 |
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/DoppelGANger/gan/op.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 |
4 |
5 | def linear(input_, output_size, scope_name="linear"):
6 | with tf.variable_scope(scope_name):
7 | input_ = tf.reshape(
8 | input_,
9 | [-1, np.prod(input_.get_shape().as_list()[1:])])
10 | output = tf.layers.dense(
11 | input_,
12 | output_size)
13 | return output
14 |
15 |
16 | def flatten(input_, scope_name="flatten"):
17 | with tf.variable_scope(scope_name):
18 | output = tf.reshape(
19 | input_,
20 | [-1, np.prod(input_.get_shape().as_list()[1:])])
21 | return output
22 |
23 |
24 | class batch_norm(object):
25 | # Code from:
26 | # https://github.com/carpedm20/DCGAN-tensorflow
27 | def __init__(self, epsilon=1e-5, momentum=0.9, name="batch_norm"):
28 | with tf.variable_scope(name):
29 | self.epsilon = epsilon
30 | self.momentum = momentum
31 | self.name = name
32 |
33 | def __call__(self, x, train=True):
34 | return tf.contrib.layers.batch_norm(x,
35 | decay=self.momentum,
36 | updates_collections=None,
37 | epsilon=self.epsilon,
38 | scale=True,
39 | is_training=train,
40 | scope=self.name)
41 |
42 |
43 | class layer_norm(object):
44 | def __init__(self, name="layer_norm"):
45 | self.name = name
46 |
47 | def __call__(self, x):
48 | return tf.contrib.layers.layer_norm(x, scope=self.name)
49 |
50 |
51 | def deconv2d(input_, output_shape,
52 | k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
53 | name="deconv2d"):
54 | # Code from:
55 | # https://github.com/carpedm20/DCGAN-tensorflow
56 | with tf.variable_scope(name):
57 | # filter : [height, width, output_channels, in_channels]
58 | w = tf.get_variable(
59 | 'w',
60 | [k_h, k_w, output_shape[-1], input_.get_shape()[-1]],
61 | initializer=tf.random_normal_initializer(stddev=stddev))
62 |
63 | try:
64 | deconv = tf.nn.conv2d_transpose(
65 | input_,
66 | w,
67 | output_shape=output_shape,
68 | strides=[1, d_h, d_w, 1])
69 |
70 | # Support for verisons of TensorFlow before 0.7.0
71 | except AttributeError:
72 | deconv = tf.nn.deconv2d(
73 | input_,
74 | w,
75 | output_shape=output_shape,
76 | strides=[1, d_h, d_w, 1])
77 |
78 | biases = tf.get_variable(
79 | 'biases',
80 | [output_shape[-1]],
81 | initializer=tf.constant_initializer(0.0))
82 | deconv = tf.reshape(tf.nn.bias_add(deconv, biases), output_shape)
83 |
84 | return deconv
85 |
86 |
87 | def conv2d(input_, output_dim,
88 | k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
89 | name="conv2d"):
90 | # Code from:
91 | # https://github.com/carpedm20/DCGAN-tensorflow
92 | with tf.variable_scope(name):
93 | w = tf.get_variable(
94 | 'w',
95 | [k_h, k_w, input_.get_shape()[-1], output_dim],
96 | initializer=tf.truncated_normal_initializer(stddev=stddev))
97 | conv = tf.nn.conv2d(
98 | input_,
99 | w,
100 | strides=[1, d_h, d_w, 1],
101 | padding='SAME')
102 |
103 | biases = tf.get_variable(
104 | 'biases', [output_dim], initializer=tf.constant_initializer(0.0))
105 | conv = tf.reshape(
106 | tf.nn.bias_add(conv, biases),
107 | [-1] + conv.get_shape().as_list()[1:])
108 |
109 | return conv
110 |
111 |
112 | def lrelu(x, leak=0.2, name="lrelu"):
113 | # Code from:
114 | # https://github.com/carpedm20/DCGAN-tensorflow
115 | return tf.maximum(x, leak * x)
116 |
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/DoppelGANger/gan/op.py.LICENSE:
--------------------------------------------------------------------------------
1 | Part of this op.py is modified from https://github.com/carpedm20/DCGAN-tensorflow, which is licensed under
2 |
3 | The MIT License (MIT)
4 |
5 | Copyright (c) 2016 Taehoon Kim
6 |
7 | Permission is hereby granted, free of charge, to any person obtaining a copy
8 | of this software and associated documentation files (the "Software"), to deal
9 | in the Software without restriction, including without limitation the rights
10 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 | copies of the Software, and to permit persons to whom the Software is
12 | furnished to do so, subject to the following conditions:
13 |
14 | The above copyright notice and this permission notice shall be included in all
15 | copies or substantial portions of the Software.
16 |
17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 | SOFTWARE.
24 |
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/DoppelGANger/gan/output.py:
--------------------------------------------------------------------------------
1 | from enum import Enum
2 |
3 |
4 | class OutputType(Enum):
5 | CONTINUOUS = "CONTINUOUS"
6 | DISCRETE = "DISCRETE"
7 |
8 |
9 | class Normalization(Enum):
10 | ZERO_ONE = "ZERO_ONE"
11 | MINUSONE_ONE = "MINUSONE_ONE"
12 |
13 |
14 | class Output(object):
15 | def __init__(self, type_, dim, normalization=None, is_gen_flag=False):
16 | self.type_ = type_
17 | self.dim = dim
18 | self.normalization = normalization
19 | self.is_gen_flag = is_gen_flag
20 |
21 | if type_ == OutputType.CONTINUOUS and normalization is None:
22 | raise Exception("normalization must be set for continuous output")
23 |
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/README.md:
--------------------------------------------------------------------------------
1 | # Synthetic Data Generation
2 | Here we describe how to reproduce the benchmark results for synthetic time-series generation given PMU data.
3 |
4 | ## Relevant Packages Install
5 | - Create and activate anaconda virtual environment for each method
6 | ```angular2html
7 | conda create -n TSGeneration python=3.x.x
8 | conda activate TSGeneration
9 | ```
10 | - Install required packages
11 | ```angular2html
12 | pip install -r requirements.txt
13 | ```
14 |
15 | ## Benchmark Results Reproduction
16 | You can find all codes of different models in folders above and their respective configuration files within each folder. You can directly run `main.py` from the models above to produce `generated_samples.npz`.
17 |
18 | ## References
19 |
20 | 1. **DoppelGANger**
21 |
22 | Lin, Zinan, et al. "Using GANs for sharing networked time series data: Challenges, initial promise, and open questions." Proceedings of the ACM Internet Measurement Conference (2020).
23 |
24 | https://github.com/fjxmlzn/DoppelGANger
25 |
26 | 1. **COT-GAN**
27 |
28 | Xu, Tianlin, et al. "Cot-gan: Generating sequential data via causal optimal transport." arXiv preprint arXiv:2006.08571 (2020).
29 |
30 | https://github.com/tianlinxu312/cot-gan
31 |
32 | 1. **TimeGAN**
33 |
34 | Yoon, Jinsung, Daniel Jarrett, and Mihaela Van der Schaar. "Time-series generative adversarial networks." (2019).
35 |
36 | https://github.com/jsyoon0823/TimeGAN
37 |
38 | 1. **RCGAN**
39 |
40 | Esteban, Cristóbal, Stephanie L. Hyland, and Gunnar Rätsch. "Real-valued (medical) time series generation with recurrent conditional gans." arXiv preprint arXiv:1706.02633 (2017).
41 |
42 | https://github.com/ratschlab/RGAN
43 |
44 | 1. **WGAN-GP (NaiveGAN)**
45 |
46 | Gulrajani, Ishaan, et al. "Improved training of wasserstein gans." arXiv preprint arXiv:1704.00028 (2017).
47 |
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/RGAN/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2017 ratschlab
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/RGAN/RVAE/launch_sine_generation_RVAE.py:
--------------------------------------------------------------------------------
1 | import itertools
2 | import numpy as np
3 | from os import system
4 | from time import sleep
5 |
6 |
7 | def submit_job(params, use_gpu=False):
8 |
9 | if use_gpu:
10 | cmd_line = 'bsub -W 2:00 -n 1 -R "rusage[mem=1000,ngpus_excl_p=1]"'
11 | else:
12 | cmd_line = 'bsub -W 2:00 -n 1 -R "rusage[mem=1000]"'
13 |
14 | job_name = "_".join(map(lambda x: str(x), params.values()))
15 |
16 | cmd_line += ' -J %s -o %s.txt'%(job_name, job_name)
17 |
18 |
19 | cmd_line += ' python sine_generation_RVAE_new.py '
20 |
21 | for key, val in params.items():
22 | cmd_line += ' -%s %s'%(key, val)
23 |
24 | print(cmd_line)
25 | system(cmd_line)
26 |
27 | if __name__ == '__main__':
28 |
29 | learning_rate = [0.001, 0.01]
30 | optimizer_str = ["adam"]
31 | hidden_units_dec = [10, 50, 100, 300]
32 | hidden_units_enc = [10, 50, 100, 300]
33 | emb_dim = [10, 50, 100, 300]
34 | mult = [0.1, 0.001, 0.01]
35 |
36 | configs = itertools.product(learning_rate, optimizer_str, hidden_units_dec, hidden_units_enc, emb_dim, mult)
37 | config_keys = ['learning_rate', 'optimizer_str', 'hidden_units_dec', 'hidden_units_enc', 'emb_dim', 'mult']
38 |
39 | for config in configs:
40 | params = {}
41 | params['learning_rate'] = config[0]
42 | params['optimizer_str'] = config[1]
43 | params['hidden_units_dec'] = config[2]
44 | params['hidden_units_enc'] = config[3]
45 | params['emb_dim'] = config[4]
46 | params['mult'] = config[5]
47 | params['experiment_id'] = 'experiments_test_RVAE_sine_SRNN_new_RVAE_HS_short'
48 |
49 | #if((params['hidden_units_dec'] == 300) or (params['hidden_units_enc'] == 300) or (params['emb_dim'] == 300) or (params['mult'] == 0.01)):
50 | submit_job(params, use_gpu=True)
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/RGAN/RVAE/launch_sine_generation_RVAE_concatenated_input.py:
--------------------------------------------------------------------------------
1 | import itertools
2 | import numpy as np
3 | from os import system
4 | from time import sleep
5 |
6 |
7 | def submit_job(params, use_gpu=False):
8 |
9 | if use_gpu:
10 | cmd_line = 'bsub -W 2:00 -n 1 -R "rusage[mem=1000,ngpus_excl_p=1]"'
11 | else:
12 | cmd_line = 'bsub -W 2:00 -n 1 -R "rusage[mem=1000]"'
13 |
14 | job_name = "_".join(map(lambda x: str(x), params.values()))
15 |
16 | cmd_line += ' -J %s -o %s.txt'%(job_name, job_name)
17 |
18 |
19 | cmd_line += ' python sine_generation_RVAE_new_concatenated_input.py '
20 |
21 | for key, val in params.items():
22 | cmd_line += ' -%s %s'%(key, val)
23 |
24 | print(cmd_line)
25 | system(cmd_line)
26 |
27 | if __name__ == '__main__':
28 |
29 | learning_rate = [0.001, 0.01]
30 | optimizer_str = ["adam"]
31 | hidden_units_dec = [10, 50, 100, 300]
32 | hidden_units_enc = [10, 50, 100, 300]
33 | emb_dim = [10, 50, 100, 300]
34 | mult = [0.1, 0.001, 0.01]
35 |
36 | configs = itertools.product(learning_rate, optimizer_str, hidden_units_dec, hidden_units_enc, emb_dim, mult)
37 | config_keys = ['learning_rate', 'optimizer_str', 'hidden_units_dec', 'hidden_units_enc', 'emb_dim', 'mult']
38 |
39 | for config in configs:
40 | params = {}
41 | params['learning_rate'] = config[0]
42 | params['optimizer_str'] = config[1]
43 | params['hidden_units_dec'] = config[2]
44 | params['hidden_units_enc'] = config[3]
45 | params['emb_dim'] = config[4]
46 | params['mult'] = config[5]
47 | params['experiment_id'] = 'experiments_test_RVAE_sine_SRNN_new_RVAE_concatenated_HS_short'
48 |
49 | #if((params['hidden_units_dec'] == 300) or (params['hidden_units_enc'] == 300) or (params['emb_dim'] == 300) or (params['mult'] == 0.01)):
50 | submit_job(params, use_gpu=True)
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/RGAN/RVAE/sine_generation_RVAE_concatenated_input_find_best_model.py:
--------------------------------------------------------------------------------
1 | # Targets not used, not possible to condition generated sequences
2 |
3 | import data_utils_2
4 | import pandas as pd
5 | import numpy as np
6 | import tensorflow as tf
7 | import math, random, itertools
8 | import pickle
9 | import time
10 | import json
11 | import os
12 | import math
13 | from data_utils_2 import get_data
14 |
15 | import tensorflow as tf
16 | import numpy as np
17 | from sklearn.metrics import roc_curve, auc, precision_recall_curve
18 | import matplotlib
19 | # change backend so that it can plot figures without an X-server running.
20 | matplotlib.use('Agg')
21 | import matplotlib.pyplot as plt
22 | import math, time, json, random
23 |
24 | import glob
25 | import copy
26 |
27 | experiment_id = './experiments_test_RVAE_sine_SRNN_new_RVAE_concatenated_HS_short_callable'
28 |
29 | ########################
30 | # SELECT RVAE WITH LOWER COST
31 | ########################
32 |
33 | # Find the model that performed best in the validation set
34 | experiments_directory = "./" + experiment_id + "/"
35 |
36 | files = glob.glob(experiments_directory + '/*.json')
37 | rows = []
38 | for fi in files:
39 | with open(fi, 'r') as f:
40 | r = json.load(f)
41 | r['final_train_cost'] = r['costs_train'][-1]
42 | r['final_validation_cost'] = r['costs_val'][-1]
43 | r['final_test_cost'] = r['costs_test'][-1]
44 | r['filename'] = fi
45 | rows.append(r)
46 |
47 | df = pd.DataFrame(rows)
48 | to_delete = [col for col in df.columns if ('costs' in col) or ('other' in col)]
49 | df.drop(to_delete, axis=1, inplace=True)
50 | best_model_filename = df.ix[df.sort_values(by='final_validation_cost').index[0]]["filename"]
51 | print(best_model_filename.replace(".json", ""))
52 | print("learning_rate, delta_error, optimizer_str, hidden_units_dec, hidden_units_enc, emb_dim, mult")
53 | print(df.ix[df.sort_values(by='final_validation_cost').index[0]]["config"])
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/RGAN/RVAE/sine_generation_RVAE_find_best_model.py:
--------------------------------------------------------------------------------
1 | # Targets not used, not possible to condition generated sequences
2 |
3 | import data_utils_2
4 | import pandas as pd
5 | import numpy as np
6 | import tensorflow as tf
7 | import math, random, itertools
8 | import pickle
9 | import time
10 | import json
11 | import os
12 | import math
13 | from data_utils_2 import get_data
14 |
15 | import tensorflow as tf
16 | import numpy as np
17 | from sklearn.metrics import roc_curve, auc, precision_recall_curve
18 | import matplotlib
19 | # change backend so that it can plot figures without an X-server running.
20 | matplotlib.use('Agg')
21 | import matplotlib.pyplot as plt
22 | import math, time, json, random
23 |
24 | import glob
25 | import copy
26 |
27 | experiment_id = './experiments_test_RVAE_sine_SRNN_new_RVAE_HS_short_callable'
28 |
29 | ########################
30 | # SELECT RVAE WITH LOWER COST
31 | ########################
32 |
33 | # Find the model that performed best in the validation set
34 | experiments_directory = "./" + experiment_id + "/"
35 |
36 | files = glob.glob(experiments_directory + '/*.json')
37 | rows = []
38 | for fi in files:
39 | with open(fi, 'r') as f:
40 | r = json.load(f)
41 | r['final_train_cost'] = r['costs_train'][-1]
42 | r['final_validation_cost'] = r['costs_val'][-1]
43 | r['final_test_cost'] = r['costs_test'][-1]
44 | r['filename'] = fi
45 | rows.append(r)
46 |
47 | df = pd.DataFrame(rows)
48 | to_delete = [col for col in df.columns if ('costs' in col) or ('other' in col)]
49 | df.drop(to_delete, axis=1, inplace=True)
50 | best_model_filename = df.ix[df.sort_values(by='final_validation_cost').index[0]]["filename"]
51 | print(best_model_filename.replace(".json", ""))
52 | print("learning_rate, delta_error, optimizer_str, hidden_units_dec, hidden_units_enc, emb_dim, mult")
53 | print(df.ix[df.sort_values(by='final_validation_cost').index[0]]["config"])
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/RGAN/experiments/settings/power.txt:
--------------------------------------------------------------------------------
1 | {
2 | "settings_file": "",
3 | "data": "sine",
4 | "num_samples": 400,
5 | "seq_length": 960,
6 | "num_signals": 91,
7 | "normalise": false,
8 | "cond_dim": 0,
9 | "max_val": 1,
10 | "one_hot": true,
11 | "predict_labels": false,
12 | "scale": 0.1,
13 | "freq_low": 1.0,
14 | "freq_high": 5.0,
15 | "amplitude_low": 0.1,
16 | "amplitude_high": 0.9,
17 | "multivariate_mnist": false,
18 | "full_mnist": false,
19 | "data_load_from": "",
20 | "resample_rate_in_min": 15,
21 | "hidden_units_g": 256,
22 | "hidden_units_d": 256,
23 | "kappa": 1,
24 | "latent_dim": 5,
25 | "batch_mean": false,
26 | "learn_scale": false,
27 | "learning_rate": 0.1,
28 | "batch_size": 28,
29 | "num_epochs": 400,
30 | "D_rounds": 1,
31 | "G_rounds": 3,
32 | "use_time": false,
33 | "WGAN": false,
34 | "WGAN_clip": false,
35 | "shuffle": true,
36 | "wrong_labels": false,
37 | "identifier": "power",
38 | "dp": false,
39 | "l2norm_bound": 4,
40 | "batches_per_lot": 1,
41 | "dp_sigma": 0.6,
42 | "num_generated_features": 91
43 | }
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/RGAN/experiments/settings/test.txt:
--------------------------------------------------------------------------------
1 | {
2 | "settings_file": "",
3 | "data": "sine",
4 | "num_samples": 14000,
5 | "seq_length": 30,
6 | "num_signals": 1,
7 | "normalise": false,
8 | "cond_dim": 0,
9 | "max_val": 1,
10 | "one_hot": false,
11 | "predict_labels": false,
12 | "scale": 0.1,
13 | "freq_low": 1.0,
14 | "freq_high": 5.0,
15 | "amplitude_low": 0.1,
16 | "amplitude_high": 0.9,
17 | "multivariate_mnist": false,
18 | "full_mnist": false,
19 | "data_load_from": "",
20 | "resample_rate_in_min": 15,
21 | "hidden_units_g": 100,
22 | "hidden_units_d": 100,
23 | "kappa": 1,
24 | "latent_dim": 5,
25 | "batch_mean": false,
26 | "learn_scale": false,
27 | "learning_rate": 0.1,
28 | "batch_size": 28,
29 | "num_epochs": 100,
30 | "D_rounds": 5,
31 | "G_rounds": 1,
32 | "use_time": false,
33 | "WGAN": false,
34 | "WGAN_clip": false,
35 | "shuffle": true,
36 | "wrong_labels": false,
37 | "identifier": "test",
38 | "dp": false,
39 | "l2norm_bound": 1e-05,
40 | "batches_per_lot": 1,
41 | "dp_sigma": 1e-05,
42 | "num_generated_features": 1
43 | }
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/RGAN/kernel.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env ipython
2 | # Experimenting with kernel between two equal-length "time series".
3 | # Note: kernel must be consistent for MMD formuluation to be valid.
4 | # Note: the time series *may* be multi-dimensional.
5 |
6 | import numpy as np
7 | import scipy as sp
8 | #from sklearn.metrics.pairwise import my_rbf
9 | import matplotlib.pyplot as plt
10 | import re
11 | from fastdtw import fastdtw
12 | from scipy.spatial.distance import euclidean
13 | import pdb
14 |
15 | import data_utils
16 |
17 | seq_length = 30
18 | num_signals = 1
19 | num_samples = 10
20 | X, pdf = data_utils.GP(seq_length, num_samples, num_signals)
21 | # for testing: make Y quite similar to X
22 | #X = data_utils.sine_wave(seq_length, num_samples, num_signals)
23 | #, freq_low=2, freq_high=2.1)
24 | #X = np.random.normal(size=(num_samples, seq_length, num_signals), scale=0.5)
25 | #for i in range(1, num_samples):
26 | # X[i] = X[i-1] + np.random.normal(size=(seq_length, num_signals), scale=0.3)
27 |
28 | def cos_dist(x, y):
29 | dist = 0.5*(1 - np.dot(x.T,y)[0, 0]/(np.linalg.norm(x)*np.linalg.norm(y)))
30 | return dist
31 |
32 | def my_rbf(x, y=None, gamma=1.0/(2.1)**2, withnorm=False):
33 | """
34 | """
35 | if y is None:
36 | y = x
37 | if withnorm:
38 | xn = x/np.linalg.norm(x)
39 | yn = y/np.linalg.norm(y)
40 | else:
41 | xn = x
42 | yn = y
43 | dist = np.linalg.norm(xn - yn)
44 | return np.exp(-gamma*(dist**2))
45 |
46 | def compare_metrics(X, num=10):
47 | """
48 | """
49 | fig, axarr = plt.subplots(num, 4, figsize=(15, 15))
50 | xx = np.arange(30)
51 | fig.suptitle(' '.join(['dtw', 'cos', 'euc', 'rbf']))
52 | for (col, distance_measure) in enumerate([fastdtw, cos_dist, euclidean, my_rbf]):
53 | dists = []
54 | for i in range(num):
55 | try:
56 | d, _ = distance_measure(X[0], X[i])
57 | except TypeError:
58 | d = distance_measure(X[0], X[i])
59 | if col == 3:
60 | d = -d
61 | dists.append(dtw)
62 | # now, plot in order
63 | for (i, j) in enumerate(np.argsort(dists)):
64 | axarr[i, col].plot(xx, X[j])
65 | axarr[i, col].plot(xx, X[0], alpha=0.5)
66 | dtw, _ = fastdtw(X[0], X[j])
67 | title = '%.1f %.1f %.1f %.1f' % (dtw, cos_dist(X[0], X[j]), euclidean(X[0], X[j]), my_rbf(X[0], X[j]))
68 | #title = '%.1f' % (dtw)
69 | axarr[i, col].set_title(title)
70 | axarr[i, col].set_ylim(-1.1, 1.1)
71 | plt.tight_layout()
72 | plt.savefig("dtw.png")
73 | plt.clf()
74 | plt.close()
75 | return True
76 |
77 |
78 |
79 | def compare_y(X, scale, gamma=1):
80 | seq_length = X.shape[1]
81 | num_signals = X.shape[2]
82 | Y = X + np.random.normal(size=(seq_length, num_signals), scale=scale)
83 |
84 | x = X[0, :, :]
85 | y = Y[0, :, :]
86 |
87 | kxy = my_rbf(x, y, gamma=gamma)
88 | print(kxy)
89 |
90 | plt.plot(x[:, 0], color='blue')
91 | plt.plot(x[:, 1], color='green')
92 | plt.plot(x[:, 2], color='red')
93 | plt.plot(y[:, 0], color='#4286f4')
94 | plt.plot(y[:, 1], color='#20cc4b')
95 | plt.plot(y[:, 2], color='#ea4b4b')
96 | plt.axhline(y=kxy, color='black', linestyle='-', label='kxy')
97 | plt.fill_between(plt.xlim(), 0, 1, facecolor='black', alpha=0.15)
98 | plt.title('gamma' + str(gamma) + ' scale' + str(scale).zfill(3))
99 | plt.xlim(0, seq_length-1)
100 | plt.ylim(-1.01, 1.01)
101 | #plt.ylim(4, 4)
102 | plt.savefig('sine_gamma' + str(gamma) + '_scale' + str(scale*100).zfill(5) + '.png')
103 | plt.clf()
104 | plt.close()
105 |
106 | #for scale in np.concatenate(([5, 1, 0.5, 0.4, 0.3, 0.2, 0.15, 0.1], np.arange(0.09, 0.00, -0.01))):
107 | # compare_y(X, scale, 0.1)
108 | # compare_y(X, scale, 0.5)
109 | # compare_y(X, scale, 1)
110 | # compare_y(X, scale, 2)
111 |
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/RGAN/paths.py:
--------------------------------------------------------------------------------
1 | eICU_hdf_dir = 'where you keep eICU, CSV converted to hdf5'
2 | eICU_proc_dir = 'processed eICU files directory (as you wish)'
3 | eICU_synthetic_dir = 'where pre-generated synthetic data from eICU lives'
4 |
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/RGAN/requirements.txt:
--------------------------------------------------------------------------------
1 | bleach==1.5.0
2 | cycler==0.10.0
3 | enum34==1.1.6
4 | html5lib==0.9999999
5 | Keras==2.1.2
6 | Markdown==2.6.11
7 | matplotlib==2.1.1
8 | numpy==1.14.0
9 | pandas==0.22.0
10 | Pillow==5.0.0
11 | protobuf==3.5.1
12 | pyparsing==2.2.0
13 | python-dateutil==2.6.1
14 | pytz==2017.3
15 | PyYAML==3.12
16 | scikit-learn==0.19.1
17 | scipy==1.0.0
18 | six==1.11.0
19 | tensorflow==1.4.1
20 | tensorflow-tensorboard==0.4.0rc3
21 | Werkzeug==0.14.1
22 |
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/RGAN/tf_ops.py:
--------------------------------------------------------------------------------
1 | ### from https://github.com/eugenium/MMD/blob/master/tf_ops.py
2 | import tensorflow as tf
3 |
4 |
5 | def sq_sum(t, name=None):
6 | "The squared Frobenius-type norm of a tensor, sum(t ** 2)."
7 | with tf.name_scope(name, "SqSum", [t]):
8 | t = tf.convert_to_tensor(t, name='t')
9 | return 2 * tf.nn.l2_loss(t)
10 |
11 |
12 | def dot(x, y, name=None):
13 | "The dot product of two vectors x and y."
14 | with tf.name_scope(name, "Dot", [x, y]):
15 | x = tf.convert_to_tensor(x, name='x')
16 | y = tf.convert_to_tensor(y, name='y')
17 |
18 | x.get_shape().assert_has_rank(1)
19 | y.get_shape().assert_has_rank(1)
20 |
21 | return tf.squeeze(tf.matmul(tf.expand_dims(x, 0), tf.expand_dims(y, 1)))
22 |
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/RGAN/tstr.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env ipython
2 | # Run TSTR on a trained model. (helper script)
3 |
4 | import sys
5 | import glob
6 | import numpy as np
7 | import pdb
8 |
9 | from eval import TSTR_mnist, TSTR_eICU
10 |
11 | assert len(sys.argv) >= 2
12 | identifier = sys.argv[1]
13 | print(identifier)
14 |
15 | model = sys.argv[2]
16 | if model == 'CNN':
17 | CNN = True
18 | print('Using CNN')
19 | else:
20 | CNN = False
21 | print('Using RF')
22 |
23 | task = sys.argv[3]
24 | if task == 'mnist':
25 | mnist = True
26 | print('testing on mnist')
27 | else:
28 | mnist = False
29 | print('testing on eicu')
30 |
31 | params_dir = 'REDACTED'
32 | params = glob.glob(params_dir + identifier + '_*.npy')
33 | print(params)
34 | epochs = [int(p.split('_')[-1].strip('.npy')) for p in params]
35 |
36 | # (I write F1 here but we're not actually reporting the F1, sorry :/)
37 | epoch_f1 = np.zeros(len(epochs))
38 | print('Running TSTR on validation set across all epochs for which parameters are available')
39 | for (i, e) in enumerate(epochs):
40 | if mnist:
41 | synth_f1, real_f1 = TSTR_mnist(identifier, e, generate=True, vali=True, CNN=CNN)
42 | else:
43 | print('testing eicu')
44 | synth_f1 = TSTR_eICU(identifier, e, generate=True, vali=True, CNN=CNN)
45 | epoch_f1[i] = synth_f1
46 |
47 | best_epoch_index = np.argmax(epoch_f1)
48 | best_epoch = epochs[best_epoch_index]
49 |
50 | print('Running TSTR on', identifier, 'at epoch', best_epoch, '(validation f1 was', epoch_f1[best_epoch_index], ')')
51 | if mnist:
52 | TSTR_mnist(identifier, best_epoch, generate=True, vali=False, CNN=CNN)
53 | # also run TRTS at that epoch
54 | TSTR_mnist(identifier, best_epoch, generate=True, vali=False, CNN=CNN, reverse=True)
55 | else:
56 | TSTR_eICU(identifier, best_epoch, generate=True, vali=False, CNN=CNN)
57 | # also run TRTS at that epoch
58 | TSTR_eICU(identifier, best_epoch, generate=True, vali=False, CNN=CNN, reverse=True)
59 |
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/cot-gan/README.md:
--------------------------------------------------------------------------------
1 | # COT-GAN: Generating Sequential Data via Causal Optimal Transport
2 | Authors: Tianlin Xu, Li K. Wenliang, Michael Munn, Beatrice Acciaio
3 |
4 | COT-GAN is an adversarial algorithm to train implicit generative models optimized for producing sequential data. The loss function of this algorithm is formulated using ideas from Causal Optimal Transport (COT), which combines classic optimal transport methods with an additional temporal causality constraint.
5 |
6 | This repository contains an implementation and further details of COT-GAN.
7 |
8 | Reference: Tianlin Xu, Li K. Wenliang, Michael Munn, Beatrice Acciaio, "COT-GAN: Generating Sequential Data via Causal Optimal Transport," Neural Information Processing Systems (NeurIPS), 2020.
9 |
10 | Paper Link: https://arxiv.org/abs/2006.08571
11 |
12 | Contact: tianlin.xu1@gmail.com
13 |
14 | ## Setup
15 |
16 | Begin by installing pip and setting up virtualenv.
17 |
18 | ```
19 | $ curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
20 | $ python get-pip.py
21 | $ python3 -m pip install virtualenv
22 | ```
23 |
24 | ### Clone the Github repository and install requirements
25 |
26 | ```
27 | $ git clone https://github.com/tianlinxu312/cot-gan.git
28 | $ cd cot-gan/
29 |
30 | # Create a virtual environment called 'venv'
31 | $ virtualenv venv
32 | $ source venv/bin/activate # Activate virtual environment
33 | $ python3 -m pip install -r requirements.txt
34 | ```
35 |
36 | ### Data
37 | The .tfrecord files used in the COT-GAN experiments can be downloaded here: https://drive.google.com/drive/folders/1ja9OlAyObPTDIp8bNl8rDT1RgpEt0qO-?usp=sharing
38 | To run the code in this repository as is, download the .tfrecord files to the corresponding subfolder inside the `data` folder.
39 |
40 | **Note:** If you have the Google Cloud SDK installed, you can copy the files using `gsutil` from this publicly accessible bucket
41 |
42 | ```
43 | # Download the data from Google Cloud Storage
44 | $ gsutil -m cp -r gs://munn-sandbox/cwgan/data .
45 | ```
46 |
47 |
48 | ## Training COT-GAN
49 | We trained COT-GAN on synthetic low-dimensional datasets as well as two high-dimensional video datasets: a [human action dataset](http://www.wisdom.weizmann.ac.il/~vision/SpaceTimeActions.html) and an [animated Sprites dataset](https://github.com/jrconway3/Universal-LPC-spritesheet)
50 |
51 | For training on low-dimensional datasets, use a flag to specify either synthetic time series sine data (`SineImage`), auto-regressive data of order one (`AROne`), or EEG data (`eeg`). For example, to train on AR-1 data:
52 | ```
53 | # Train COTGAN on AR-1 data
54 | $ python3 -m toy_train \
55 | --dname="AROne"
56 | ```
57 | See the code for how to modify the default values of other training parameters or hyperparameters.
58 |
59 | Similarly, for training on video datasets, specify either the human action or animated Sprites dataset; either `human_action` or `animation`, resp. For example,
60 |
61 | ```
62 | # Train COTGAN on human action dataset
63 | $ python3 -m video_train \
64 | --dname="human_action" \
65 | --path="./data/human_action/*.tfrecord"
66 | ```
67 |
68 | or
69 | ```
70 | # Train COTGAN on animated sprites dataset
71 | $ python3 -m video_train \
72 | --dname="animation" \
73 | --path="./data/animation/*.tfrecord"
74 | ```
75 |
76 | See the code for how to modify the default values of other training parameters or hyperparameters.
77 |
78 | ## Results
79 | Baseline models chosen for the video datasets are MoCoGAN (S. Tulyakov et al.) and direct minimization
80 | of the mixed Sinkhorn divergence. The evaluation metrics we use to assess model performance are the Fréchet Inception
81 | Distance (FID) which compares individual frames, the Fréchet Video Distance (FVD)
82 | which compares the video sequences as a whole by mapping samples into features via pretrained 3D
83 | convolutional networks, and their kernel counterparts (KID, KVD). Previous studies suggest that FVD correlates better
84 | with human judgement than KVD for videos, whereas KID correlates better than FID on images. Generated samples are provided below.
85 |
86 | ### Animated Sprites
87 | | | FVD | FID | KVD | KID
88 | -------------|----------|-----------|-------|----------
89 | |MoCoGAN | 1,108.2 | 280.25 | 146.8 | 0.34
90 | |direct minimization | 498.8 | **81.56** | 83.2 | **0.078**
91 | |COT-GAN | **458.0** | 84.6 | **66.1** | 0.081
92 |
93 |
94 |
95 | ### Human Actions
96 | | | FVD | FID | KVD | KID
97 | -------------|----------|-----------|-------|----------
98 | | MoCoGAN | 1,034.3 | 151.3 | 89.0 | 0.26
99 | | direct minimization | 507.6 | 120.7 | **34.3** | 0.23
100 | | COT-GAN | **462.8** | **58.9** | 43.7 | **0.13**
101 |
102 |
103 |
104 | ### More
105 | A minimum PyTorch implementation please see [here](https://github.com/tianlinxu312/cot-gan-pytorch).
106 |
107 |
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/cot-gan/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tamu-engineering-research/Open-source-power-dataset/7b3644d75fd493b82f4129e6b01f36cd91e26dd4/Code/BenchmarkModel/SyntheticDataGeneration/cot-gan/__init__.py
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/cot-gan/figs/animation.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tamu-engineering-research/Open-source-power-dataset/7b3644d75fd493b82f4129e6b01f36cd91e26dd4/Code/BenchmarkModel/SyntheticDataGeneration/cot-gan/figs/animation.gif
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/cot-gan/figs/humanaction.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tamu-engineering-research/Open-source-power-dataset/7b3644d75fd493b82f4129e6b01f36cd91e26dd4/Code/BenchmarkModel/SyntheticDataGeneration/cot-gan/figs/humanaction.gif
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/cot-gan/requirements.txt:
--------------------------------------------------------------------------------
1 | matplotlib==3.3.2
2 | tensorflow==2.1
3 | tensorflow-probability==0.11.1
4 | seaborn==0.11.0
5 | tqdm==4.51.0
6 |
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/timeGAN/README.md:
--------------------------------------------------------------------------------
1 | # Codebase for "Time-series Generative Adversarial Networks (TimeGAN)"
2 |
3 | Authors: Jinsung Yoon, Daniel Jarrett, Mihaela van der Schaar
4 |
5 | Reference: Jinsung Yoon, Daniel Jarrett, Mihaela van der Schaar,
6 | "Time-series Generative Adversarial Networks,"
7 | Neural Information Processing Systems (NeurIPS), 2019.
8 |
9 | Paper Link: https://papers.nips.cc/paper/8789-time-series-generative-adversarial-networks
10 |
11 | Contact: jsyoon0823@gmail.com
12 |
13 | This directory contains implementations of TimeGAN framework for synthetic time-series data generation
14 | using one synthetic dataset and two real-world datasets.
15 |
16 | - Sine data: Synthetic
17 | - Stock data: https://finance.yahoo.com/quote/GOOG/history?p=GOOG
18 | - Energy data: http://archive.ics.uci.edu/ml/datasets/Appliances+energy+prediction
19 |
20 | To run the pipeline for training and evaluation on TimeGAN framwork, simply run
21 | python3 -m main_timegan.py or see jupyter-notebook tutorial of TimeGAN in tutorial_timegan.ipynb.
22 |
23 | Note that any model architecture can be used as the generator and
24 | discriminator model such as RNNs or Transformers.
25 |
26 | ### Code explanation
27 |
28 | (1) data_loading.py
29 | - Transform raw time-series data to preprocessed time-series data (Googld data)
30 | - Generate Sine data
31 |
32 | (2) Metrics directory
33 | (a) visualization_metrics.py
34 | - PCA and t-SNE analysis between Original data and Synthetic data
35 | (b) discriminative_metrics.py
36 | - Use Post-hoc RNN to classify Original data and Synthetic data
37 | (c) predictive_metrics.py
38 | - Use Post-hoc RNN to predict one-step ahead (last feature)
39 |
40 | (3) timegan.py
41 | - Use original time-series data as training set to generater synthetic time-series data
42 |
43 | (4) main_timegan.py
44 | - Report discriminative and predictive scores for the dataset and t-SNE and PCA analysis
45 |
46 | (5) utils.py
47 | - Some utility functions for metrics and timeGAN.
48 |
49 | ### Command inputs:
50 |
51 | - data_name: sine, stock, or energy
52 | - seq_len: sequence length
53 | - module: gru, lstm, or lstmLN
54 | - hidden_dim: hidden dimensions
55 | - num_layers: number of layers
56 | - iterations: number of training iterations
57 | - batch_size: the number of samples in each batch
58 | - metric_iterations: number of iterations for metric computation
59 |
60 | Note that network parameters should be optimized for different datasets.
61 |
62 | ### Example command
63 |
64 | ```shell
65 | $ python3 main_timegan.py --data_name stock --seq_len 24 --module gru
66 | --hidden_dim 24 --num_layer 3 --iteration 50000 --batch_size 128
67 | --metric_iteration 10
68 | ```
69 |
70 | ### Outputs
71 |
72 | - ori_data: original data
73 | - generated_data: generated synthetic data
74 | - metric_results: discriminative and predictive scores
75 | - visualization: PCA and tSNE analysis
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/timeGAN/data_loading.py:
--------------------------------------------------------------------------------
1 | """Time-series Generative Adversarial Networks (TimeGAN) Codebase.
2 |
3 | Reference: Jinsung Yoon, Daniel Jarrett, Mihaela van der Schaar,
4 | "Time-series Generative Adversarial Networks,"
5 | Neural Information Processing Systems (NeurIPS), 2019.
6 |
7 | Paper link: https://papers.nips.cc/paper/8789-time-series-generative-adversarial-networks
8 |
9 | Last updated Date: April 24th 2020
10 | Code author: Jinsung Yoon (jsyoon0823@gmail.com)
11 |
12 | -----------------------------
13 |
14 | data_loading.py
15 |
16 | (0) MinMaxScaler: Min Max normalizer
17 | (1) sine_data_generation: Generate sine dataset
18 | (2) real_data_loading: Load and preprocess real data
19 | - stock_data: https://finance.yahoo.com/quote/GOOG/history?p=GOOG
20 | - energy_data: http://archive.ics.uci.edu/ml/datasets/Appliances+energy+prediction
21 | """
22 |
23 | ## Necessary Packages
24 | import numpy as np
25 |
26 |
27 | def MinMaxScaler(data):
28 | """Min Max normalizer.
29 |
30 | Args:
31 | - data: original data
32 |
33 | Returns:
34 | - norm_data: normalized data
35 | """
36 | numerator = data - np.min(data, 0)
37 | denominator = np.max(data, 0) - np.min(data, 0)
38 | norm_data = numerator / (denominator + 1e-7)
39 | return norm_data
40 |
41 |
42 | def sine_data_generation (no, seq_len, dim):
43 | """Sine data generation.
44 |
45 | Args:
46 | - no: the number of samples
47 | - seq_len: sequence length of the time-series
48 | - dim: feature dimensions
49 |
50 | Returns:
51 | - data: generated data
52 | """
53 | # Initialize the output
54 | data = list()
55 |
56 | # Generate sine data
57 | for i in range(no):
58 | # Initialize each time-series
59 | temp = list()
60 | # For each feature
61 | for k in range(dim):
62 | # Randomly drawn frequency and phase
63 | freq = np.random.uniform(0, 0.1)
64 | phase = np.random.uniform(0, 0.1)
65 |
66 | # Generate sine signal based on the drawn frequency and phase
67 | temp_data = [np.sin(freq * j + phase) for j in range(seq_len)]
68 | temp.append(temp_data)
69 |
70 | # Align row/column
71 | temp = np.transpose(np.asarray(temp))
72 | # Normalize to [0,1]
73 | temp = (temp + 1)*0.5
74 | # Stack the generated data
75 | data.append(temp)
76 |
77 | return data
78 |
79 |
80 | def real_data_loading (data_name, seq_len):
81 | """Load and preprocess real-world datasets.
82 |
83 | Args:
84 | - data_name: stock or energy
85 | - seq_len: sequence length
86 |
87 | Returns:
88 | - data: preprocessed data.
89 | """
90 | assert data_name in ['stock','energy']
91 |
92 | if data_name == 'stock':
93 | ori_data = np.loadtxt('data/stock_data.csv', delimiter = ",",skiprows = 1)
94 | elif data_name == 'energy':
95 | ori_data = np.loadtxt('data/energy_data.csv', delimiter = ",",skiprows = 1)
96 |
97 | # Flip the data to make chronological data
98 | ori_data = ori_data[::-1]
99 | # Normalize the data
100 | ori_data = MinMaxScaler(ori_data)
101 |
102 | # Preprocess the dataset
103 | temp_data = []
104 | # Cut data by sequence length
105 | for i in range(0, len(ori_data) - seq_len):
106 | _x = ori_data[i:i + seq_len]
107 | temp_data.append(_x)
108 |
109 | # Mix the datasets (to make it similar to i.i.d)
110 | idx = np.random.permutation(len(temp_data))
111 | data = []
112 | for i in range(len(temp_data)):
113 | data.append(temp_data[idx[i]])
114 |
115 | return data
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/timeGAN/main.py:
--------------------------------------------------------------------------------
1 | ## Necessary packages
2 | from __future__ import absolute_import
3 | from __future__ import division
4 | from __future__ import print_function
5 |
6 | import numpy as np
7 | import warnings
8 | warnings.filterwarnings("ignore")
9 |
10 | from timegan import timegan
11 | from data_loading import real_data_loading, sine_data_generation
12 | from metrics.discriminative_metrics import discriminative_score_metrics
13 | from metrics.predictive_metrics import predictive_score_metrics
14 | from metrics.visualization_metrics import visualization
15 | import time
16 |
17 | ## Newtork parameters
18 | parameters = dict()
19 | parameters['module'] = 'gru'
20 | parameters['hidden_dim'] = 256
21 | parameters['num_layer'] = 2
22 | parameters['iterations'] = 5000
23 | parameters['batch_size'] = 64
24 |
25 | # Run TimeGAN
26 | data_npz = np.load("../data/real_train.npz")
27 | ori_data = data_npz["trans"]
28 |
29 | start_time = time.time()
30 | generated_data = timegan(ori_data, parameters)
31 | end_time = time.time()
32 | print('Finish Synthetic Data Generation')
33 |
34 | print(end_time-start_time)
35 | print(generated_data.shape)
36 | np.savez('generated_data_3.npz', data_feature=generated_data, time=end_time-start_time)
37 |
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/timeGAN/metrics/predictive_metrics.py:
--------------------------------------------------------------------------------
1 | """Time-series Generative Adversarial Networks (TimeGAN) Codebase.
2 |
3 | Reference: Jinsung Yoon, Daniel Jarrett, Mihaela van der Schaar,
4 | "Time-series Generative Adversarial Networks,"
5 | Neural Information Processing Systems (NeurIPS), 2019.
6 |
7 | Paper link: https://papers.nips.cc/paper/8789-time-series-generative-adversarial-networks
8 |
9 | Last updated Date: April 24th 2020
10 | Code author: Jinsung Yoon (jsyoon0823@gmail.com)
11 |
12 | -----------------------------
13 |
14 | predictive_metrics.py
15 |
16 | Note: Use Post-hoc RNN to predict one-step ahead (last feature)
17 | """
18 |
19 | # Necessary Packages
20 | import tensorflow as tf
21 | import numpy as np
22 | from sklearn.metrics import mean_absolute_error
23 | from utils import extract_time
24 |
25 |
26 | def predictive_score_metrics (ori_data, generated_data):
27 | """Report the performance of Post-hoc RNN one-step ahead prediction.
28 |
29 | Args:
30 | - ori_data: original data
31 | - generated_data: generated synthetic data
32 |
33 | Returns:
34 | - predictive_score: MAE of the predictions on the original data
35 | """
36 | # Initialization on the Graph
37 | tf.reset_default_graph()
38 |
39 | # Basic Parameters
40 | no, seq_len, dim = np.asarray(ori_data).shape
41 |
42 | # Set maximum sequence length and each sequence length
43 | ori_time, ori_max_seq_len = extract_time(ori_data)
44 | generated_time, generated_max_seq_len = extract_time(ori_data)
45 | max_seq_len = max([ori_max_seq_len, generated_max_seq_len])
46 |
47 | ## Builde a post-hoc RNN predictive network
48 | # Network parameters
49 | hidden_dim = int(dim/2)
50 | iterations = 5000
51 | batch_size = 128
52 |
53 | # Input place holders
54 | X = tf.placeholder(tf.float32, [None, max_seq_len-1, dim-1], name = "myinput_x")
55 | T = tf.placeholder(tf.int32, [None], name = "myinput_t")
56 | Y = tf.placeholder(tf.float32, [None, max_seq_len-1, 1], name = "myinput_y")
57 |
58 | # Predictor function
59 | def predictor (x, t):
60 | """Simple predictor function.
61 |
62 | Args:
63 | - x: time-series data
64 | - t: time information
65 |
66 | Returns:
67 | - y_hat: prediction
68 | - p_vars: predictor variables
69 | """
70 | with tf.variable_scope("predictor", reuse = tf.AUTO_REUSE) as vs:
71 | p_cell = tf.nn.rnn_cell.GRUCell(num_units=hidden_dim, activation=tf.nn.tanh, name = 'p_cell')
72 | p_outputs, p_last_states = tf.nn.dynamic_rnn(p_cell, x, dtype=tf.float32, sequence_length = t)
73 | y_hat_logit = tf.contrib.layers.fully_connected(p_outputs, 1, activation_fn=None)
74 | y_hat = tf.nn.sigmoid(y_hat_logit)
75 | p_vars = [v for v in tf.all_variables() if v.name.startswith(vs.name)]
76 |
77 | return y_hat, p_vars
78 |
79 | y_pred, p_vars = predictor(X, T)
80 | # Loss for the predictor
81 | p_loss = tf.losses.absolute_difference(Y, y_pred)
82 | # optimizer
83 | p_solver = tf.train.AdamOptimizer().minimize(p_loss, var_list = p_vars)
84 |
85 | ## Training
86 | # Session start
87 | sess = tf.Session()
88 | sess.run(tf.global_variables_initializer())
89 |
90 | # Training using Synthetic dataset
91 | for itt in range(iterations):
92 |
93 | # Set mini-batch
94 | idx = np.random.permutation(len(generated_data))
95 | train_idx = idx[:batch_size]
96 |
97 | X_mb = list(generated_data[i][:-1,:(dim-1)] for i in train_idx)
98 | T_mb = list(generated_time[i]-1 for i in train_idx)
99 | Y_mb = list(np.reshape(generated_data[i][1:,(dim-1)],[len(generated_data[i][1:,(dim-1)]),1]) for i in train_idx)
100 |
101 | # Train predictor
102 | _, step_p_loss = sess.run([p_solver, p_loss], feed_dict={X: X_mb, T: T_mb, Y: Y_mb})
103 |
104 | ## Test the trained model on the original data
105 | idx = np.random.permutation(len(ori_data))
106 | train_idx = idx[:no]
107 |
108 | X_mb = list(ori_data[i][:-1,:(dim-1)] for i in train_idx)
109 | T_mb = list(ori_time[i]-1 for i in train_idx)
110 | Y_mb = list(np.reshape(ori_data[i][1:,(dim-1)], [len(ori_data[i][1:,(dim-1)]),1]) for i in train_idx)
111 |
112 | # Prediction
113 | pred_Y_curr = sess.run(y_pred, feed_dict={X: X_mb, T: T_mb})
114 |
115 | # Compute the performance in terms of MAE
116 | MAE_temp = 0
117 | for i in range(no):
118 | MAE_temp = MAE_temp + mean_absolute_error(Y_mb[i], pred_Y_curr[i,:,:])
119 |
120 | predictive_score = MAE_temp / no
121 |
122 | return predictive_score
123 |
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/timeGAN/metrics/visualization_metrics.py:
--------------------------------------------------------------------------------
1 | """Time-series Generative Adversarial Networks (TimeGAN) Codebase.
2 |
3 | Reference: Jinsung Yoon, Daniel Jarrett, Mihaela van der Schaar,
4 | "Time-series Generative Adversarial Networks,"
5 | Neural Information Processing Systems (NeurIPS), 2019.
6 |
7 | Paper link: https://papers.nips.cc/paper/8789-time-series-generative-adversarial-networks
8 |
9 | Last updated Date: April 24th 2020
10 | Code author: Jinsung Yoon (jsyoon0823@gmail.com)
11 |
12 | -----------------------------
13 |
14 | visualization_metrics.py
15 |
16 | Note: Use PCA or tSNE for generated and original data visualization
17 | """
18 |
19 | # Necessary packages
20 | from sklearn.manifold import TSNE
21 | from sklearn.decomposition import PCA
22 | import matplotlib.pyplot as plt
23 | import numpy as np
24 |
25 |
26 | def visualization (ori_data, generated_data, analysis):
27 | """Using PCA or tSNE for generated and original data visualization.
28 |
29 | Args:
30 | - ori_data: original data
31 | - generated_data: generated synthetic data
32 | - analysis: tsne or pca
33 | """
34 | # Analysis sample size (for faster computation)
35 | anal_sample_no = min([1000, len(ori_data)])
36 | idx = np.random.permutation(len(ori_data))[:anal_sample_no]
37 |
38 | # Data preprocessing
39 | ori_data = np.asarray(ori_data)
40 | generated_data = np.asarray(generated_data)
41 |
42 | ori_data = ori_data[idx]
43 | generated_data = generated_data[idx]
44 |
45 | no, seq_len, dim = ori_data.shape
46 |
47 | for i in range(anal_sample_no):
48 | if (i == 0):
49 | prep_data = np.reshape(np.mean(ori_data[0,:,:], 1), [1,seq_len])
50 | prep_data_hat = np.reshape(np.mean(generated_data[0,:,:],1), [1,seq_len])
51 | else:
52 | prep_data = np.concatenate((prep_data,
53 | np.reshape(np.mean(ori_data[i,:,:],1), [1,seq_len])))
54 | prep_data_hat = np.concatenate((prep_data_hat,
55 | np.reshape(np.mean(generated_data[i,:,:],1), [1,seq_len])))
56 |
57 | # Visualization parameter
58 | colors = ["red" for i in range(anal_sample_no)] + ["blue" for i in range(anal_sample_no)]
59 |
60 | if analysis == 'pca':
61 | # PCA Analysis
62 | pca = PCA(n_components = 2)
63 | pca.fit(prep_data)
64 | pca_results = pca.transform(prep_data)
65 | pca_hat_results = pca.transform(prep_data_hat)
66 |
67 | # Plotting
68 | f, ax = plt.subplots(1)
69 | plt.scatter(pca_results[:,0], pca_results[:,1],
70 | c = colors[:anal_sample_no], alpha = 0.2, label = "Original")
71 | plt.scatter(pca_hat_results[:,0], pca_hat_results[:,1],
72 | c = colors[anal_sample_no:], alpha = 0.2, label = "Synthetic")
73 |
74 | ax.legend()
75 | plt.title('PCA plot')
76 | plt.xlabel('x-pca')
77 | plt.ylabel('y_pca')
78 | plt.show()
79 |
80 | elif analysis == 'tsne':
81 |
82 | # Do t-SNE Analysis together
83 | prep_data_final = np.concatenate((prep_data, prep_data_hat), axis = 0)
84 |
85 | # TSNE anlaysis
86 | tsne = TSNE(n_components = 2, verbose = 1, perplexity = 40, n_iter = 300)
87 | tsne_results = tsne.fit_transform(prep_data_final)
88 |
89 | # Plotting
90 | f, ax = plt.subplots(1)
91 |
92 | plt.scatter(tsne_results[:anal_sample_no,0], tsne_results[:anal_sample_no,1],
93 | c = colors[:anal_sample_no], alpha = 0.2, label = "Original")
94 | plt.scatter(tsne_results[anal_sample_no:,0], tsne_results[anal_sample_no:,1],
95 | c = colors[anal_sample_no:], alpha = 0.2, label = "Synthetic")
96 |
97 | ax.legend()
98 |
99 | plt.title('t-SNE plot')
100 | plt.xlabel('x-tsne')
101 | plt.ylabel('y_tsne')
102 | plt.show()
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/timeGAN/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy>=1.17.2
2 | tensorflow>=1.15.0
3 | tqdm>=4.36.1
4 | argparse>=1.1
5 | pandas>=0.25.1
6 | scikit-learn>=0.21.3
7 | matplotlib>=3.1.1
--------------------------------------------------------------------------------
/Code/BenchmarkModel/SyntheticDataGeneration/timeGAN/utils.py:
--------------------------------------------------------------------------------
1 | """Time-series Generative Adversarial Networks (TimeGAN) Codebase.
2 |
3 | Reference: Jinsung Yoon, Daniel Jarrett, Mihaela van der Schaar,
4 | "Time-series Generative Adversarial Networks,"
5 | Neural Information Processing Systems (NeurIPS), 2019.
6 |
7 | Paper link: https://papers.nips.cc/paper/8789-time-series-generative-adversarial-networks
8 |
9 | Last updated Date: April 24th 2020
10 | Code author: Jinsung Yoon (jsyoon0823@gmail.com)
11 |
12 | -----------------------------
13 |
14 | utils.py
15 |
16 | (1) train_test_divide: Divide train and test data for both original and synthetic data.
17 | (2) extract_time: Returns Maximum sequence length and each sequence length.
18 | (3) rnn_cell: Basic RNN Cell.
19 | (4) random_generator: random vector generator
20 | (5) batch_generator: mini-batch generator
21 | """
22 |
23 | ## Necessary Packages
24 | import numpy as np
25 | import tensorflow as tf
26 |
27 |
28 | def train_test_divide (data_x, data_x_hat, data_t, data_t_hat, train_rate = 0.8):
29 | """Divide train and test data for both original and synthetic data.
30 |
31 | Args:
32 | - data_x: original data
33 | - data_x_hat: generated data
34 | - data_t: original time
35 | - data_t_hat: generated time
36 | - train_rate: ratio of training data from the original data
37 | """
38 | # Divide train/test index (original data)
39 | no = len(data_x)
40 | idx = np.random.permutation(no)
41 | train_idx = idx[:int(no*train_rate)]
42 | test_idx = idx[int(no*train_rate):]
43 |
44 | train_x = [data_x[i] for i in train_idx]
45 | test_x = [data_x[i] for i in test_idx]
46 | train_t = [data_t[i] for i in train_idx]
47 | test_t = [data_t[i] for i in test_idx]
48 |
49 | # Divide train/test index (synthetic data)
50 | no = len(data_x_hat)
51 | idx = np.random.permutation(no)
52 | train_idx = idx[:int(no*train_rate)]
53 | test_idx = idx[int(no*train_rate):]
54 |
55 | train_x_hat = [data_x_hat[i] for i in train_idx]
56 | test_x_hat = [data_x_hat[i] for i in test_idx]
57 | train_t_hat = [data_t_hat[i] for i in train_idx]
58 | test_t_hat = [data_t_hat[i] for i in test_idx]
59 |
60 | return train_x, train_x_hat, test_x, test_x_hat, train_t, train_t_hat, test_t, test_t_hat
61 |
62 |
63 | def extract_time (data):
64 | """Returns Maximum sequence length and each sequence length.
65 |
66 | Args:
67 | - data: original data
68 |
69 | Returns:
70 | - time: extracted time information
71 | - max_seq_len: maximum sequence length
72 | """
73 | time = list()
74 | max_seq_len = 0
75 | for i in range(len(data)):
76 | max_seq_len = max(max_seq_len, len(data[i][:,0]))
77 | time.append(len(data[i][:,0]))
78 |
79 | return time, max_seq_len
80 |
81 |
82 | def rnn_cell(module_name, hidden_dim):
83 | """Basic RNN Cell.
84 |
85 | Args:
86 | - module_name: gru, lstm, or lstmLN
87 |
88 | Returns:
89 | - rnn_cell: RNN Cell
90 | """
91 | assert module_name in ['gru','lstm','lstmLN']
92 |
93 | # GRU
94 | if (module_name == 'gru'):
95 | rnn_cell = tf.nn.rnn_cell.GRUCell(num_units=hidden_dim, activation=tf.nn.tanh)
96 | # LSTM
97 | elif (module_name == 'lstm'):
98 | rnn_cell = tf.contrib.rnn.BasicLSTMCell(num_units=hidden_dim, activation=tf.nn.tanh)
99 | # LSTM Layer Normalization
100 | elif (module_name == 'lstmLN'):
101 | rnn_cell = tf.contrib.rnn.LayerNormBasicLSTMCell(num_units=hidden_dim, activation=tf.nn.tanh)
102 | return rnn_cell
103 |
104 |
105 | def random_generator (batch_size, z_dim, T_mb, max_seq_len):
106 | """Random vector generation.
107 |
108 | Args:
109 | - batch_size: size of the random vector
110 | - z_dim: dimension of random vector
111 | - T_mb: time information for the random vector
112 | - max_seq_len: maximum sequence length
113 |
114 | Returns:
115 | - Z_mb: generated random vector
116 | """
117 | Z_mb = list()
118 | for i in range(batch_size):
119 | temp = np.zeros([max_seq_len, z_dim])
120 | temp_Z = np.random.uniform(0., 1, [T_mb[i], z_dim])
121 | temp[:T_mb[i],:] = temp_Z
122 | Z_mb.append(temp_Z)
123 | return Z_mb
124 |
125 |
126 | def batch_generator(data, time, batch_size):
127 | """Mini-batch generator.
128 |
129 | Args:
130 | - data: time-series data
131 | - time: time information
132 | - batch_size: the number of samples in each batch
133 |
134 | Returns:
135 | - X_mb: time-series data in each batch
136 | - T_mb: time information in each batch
137 | """
138 | no = len(data)
139 | idx = np.random.permutation(no)
140 | train_idx = idx[:batch_size]
141 |
142 | X_mb = list(data[i] for i in train_idx)
143 | T_mb = list(time[i] for i in train_idx)
144 |
145 | return X_mb, T_mb
--------------------------------------------------------------------------------
/Code/Data Processing/README.md:
--------------------------------------------------------------------------------
1 | # Data Processing for Simulation
2 | Here we describe how to process the original load consumption, weather, wind and solar time series data into the ready-for-simulation format.
3 |
4 | ## Prerequisite
5 | - Install required packages
6 | ```angular2html
7 | pip install -r requirements.txt
8 | ```
9 |
10 | ## Processing Details
11 | - Before running codes, make sure the data and save paths are consistent with your downloaded data.
12 | - Run the codes sequentially as indicated by the name of each code file.
13 | - `renewable_v2_step1_weather_v2_extended.py`: collect 5-minute-level weather data from 2018 to 2020 across the U.S. from [NSRDB](https://maps.nrel.gov/nsrdb-viewer/?aL=mcQtmw%255Bv%255D%3Dt&bL=clight&cE=0&lR=0&mC=31.970803930433096%2C-82.705078125&zL=5).
14 | - `renewable_v2_step2_wind_v2_extended.py`: calculate the local wind turbine output time series across the U.S. from 2018 to 2020 based on the collected weather data using the model of [PreREISE](https://github.com/Breakthrough-Energy/PreREISE.git).
15 | - `renewable_v2_step3_solar_v2_extended.py`: calculate the local solar PV output time series data across the U.S. from 2018 to 2020 based on the collected weather data using the model of [SAM](https://sam.nrel.gov/).
16 | - `renewable_v2_step4_load_v2_extended.py`: collect the real-world zone-wide load time series data across the U.S. from 2018 to 2020 from [COVID-EMDA](https://github.com/tamu-engineering-research/COVID-EMDA.git).
17 | - `renewable_v2_step5_aggregate_v2_extended.py`: collect all processed files in the former steps into the ready-for-simulation format.
18 | - The obatined results should be the same as shown in the `Minute-level Load and Renewable` folder shared in [Zenodo](https://zenodo.org/record/5130612#.YTIiZI5KiUk).
19 |
20 | ## References
21 | 1. **COVID-EMDA**:
22 | G. Ruan, D. Wu, X. Zheng, H. Zhong, C. Kang, M. A. Dahleh, S. Sivaranjani, and L. Xie, ``A Cross-Domain Approach to Analyzing the Short-Run Impact of COVID-19 on the U.S. Electricity Sector,'' Joule, vol. 4, pp. 1-16, 2020.
23 | https://github.com/tamu-engineering-research/COVID-EMDA.git
24 |
25 | 2. **PreREISE**:
26 | Breakthrough Energy, PreREISE.
27 | https://github.com/Breakthrough-Energy/PreREISE.git
28 |
29 | 3. **NSRDB**:
30 | NREL, NSRDB Data Viewer.
31 | https://maps.nrel.gov/nsrdb-viewer/?aL=mcQtmw%255Bv%255D%3Dt&bL=clight&cE=0&lR=0&mC=31.970803930433096%2C-82.705078125&zL=5
32 |
33 | 4. **SAM**:
34 | NREL, System Advisor Model.
35 | https://sam.nrel.gov/
36 |
37 |
--------------------------------------------------------------------------------
/Code/Data Processing/renewable_v2_step1_weather_v2_extended.py:
--------------------------------------------------------------------------------
1 | from powersimdata.input.grid import Grid
2 | # from PreREISE import prereise as prr#rap, impute, helpers
3 | from prereise.gather.winddata.rap import rap, impute, helpers
4 | from prereise.gather.winddata.rap import power_curves
5 | from datetime import datetime
6 | import os
7 | import pandas as pd
8 | import numpy as np
9 | from matplotlib import pyplot as plt
10 | import requests
11 |
12 | def read_solar_weather_csv(location, year, file_name, save_path):
13 | data = pd.read_csv(file_name, delimiter = ',', skiprows=2)
14 | data['time_str'] = data.apply(lambda row: str(int(row['Year']))+"-"+str(int(row['Month']))+"-"+str(int(row['Day']))+' '+str(int(row['Hour']))+':'+str(int(row['Minute'])), axis=1)
15 | data['time'] = data.apply(lambda row: datetime.strptime(row['time_str'], '%Y-%m-%d %H:%M'), axis=1)
16 | column_list_raw = ['time','DHI','DNI','GHI','Dew Point','Solar Zenith Angle', 'Wind Speed','Relative Humidity', 'Temperature']
17 | weather_raw = data[column_list_raw].copy()
18 | weather_interp = pd.DataFrame()
19 | time_interp = pd.date_range(start='1-1-'+year, end='12-31-'+year, freq='1T')
20 | weather_interp['time'] = time_interp
21 | for i in range(1, len(column_list_raw)):
22 | column_tmp = column_list_raw[i]
23 | column_interp = np.interp(time_interp, data['time'].copy(), data[column_tmp].copy())
24 | weather_interp[column_tmp] = column_interp
25 | weather_raw.to_csv(save_path+'weather_'+location+year+'.csv')
26 | weather_interp.to_csv(save_path+'weather_interp_'+location+year+'.csv')
27 |
28 | if __name__=='__main__':
29 | iso_list = [ 'CAISO','NYISO','PJM','ERCOT','MISO','SPP']
30 | iso_zone_list = {
31 | 'CAISO':['CAISO_zone_'+str(num)+"_" for num in range(1,4+1)],
32 | 'NYISO':['NYISO_zone_'+str(num)+"_" for num in range(1,11+1)],
33 | 'PJM':['PJM_zone_'+str(num)+"_" for num in range(1,20+1)],
34 | 'ERCOT':['ERCOT_zone_'+str(num)+"_" for num in range(1,8+1)],
35 | 'MISO':['MISO_zone_'+str(num)+"_" for num in range(1,6+1)],
36 | 'SPP':['SPP_zone_'+str(num)+"_" for num in range(1,17+1)],
37 | }
38 | year_list = ['2018','2019','2020']
39 | weather_path = r'C:\Users\zheng\Google Drive\Colab Notebooks\94 PowerDataSet\Dataset\Renewable\weather_v2_extended/'
40 | save_path = r'C:\Users\zheng\Google Drive\Colab Notebooks\94 PowerDataSet\Dataset\Renewable\weather_v2_extended/'
41 | # folder_list = [x[0] for x in os.walk(weather_path)]
42 | # folder_list = folder_list[1:]
43 | folder_list = next(os.walk(weather_path))[1]
44 | for iso_tmp in iso_list:
45 | print(iso_tmp)
46 | iso_zone_list_tmp = iso_zone_list[iso_tmp]
47 | for iso_zone_tmp in iso_zone_list_tmp:
48 | for folder in folder_list:
49 | if folder.startswith(iso_zone_tmp):
50 | subfolder = weather_path+folder
51 | for year in year_list:
52 | print(iso_zone_tmp+year)
53 | for root, direct, files in os.walk(subfolder+'/'):
54 | for file in files:
55 | if file.endswith(year+'.csv'):
56 | solar_data = read_solar_weather_csv(
57 | location=iso_zone_tmp, year=year,
58 | file_name = subfolder+'/'+file,
59 | save_path=save_path)
60 |
61 |
62 |
63 |
64 |
--------------------------------------------------------------------------------
/Code/Data Processing/renewable_v2_step2_wind_v2_extended.py:
--------------------------------------------------------------------------------
1 | from powersimdata.input.grid import Grid
2 | # from PreREISE import prereise as prr#rap, impute, helpers
3 | from prereise.gather.winddata.rap import rap, impute, helpers
4 | from prereise.gather.winddata.rap import power_curves
5 | from datetime import datetime
6 |
7 | import pandas as pd
8 | import numpy as np
9 | from matplotlib import pyplot as plt
10 | import requests
11 |
12 | def read_solar_weather_csv(
13 | location, year, power_curve,
14 | file_name, save_path,
15 | ASOS_height = 10,
16 | turbine_height = 80,
17 | alpha=0.15):
18 | data = pd.read_csv(file_name, delimiter = ',')
19 | data['time'] = data.apply(lambda row: datetime.strptime(row['time'], '%Y-%m-%d %H:%M:%S'), axis=1)
20 | column_list = ['time', 'Wind Speed']
21 | weather = data[['time', 'Wind Speed']].copy()
22 | weather['Wind Speed_noise'] = weather.apply(lambda row: np.maximum(0, row['Wind Speed']+np.random.normal(0, 0.05, 1)[0]), axis=1)
23 | weather['Wind Speed_turbine'] = weather['Wind Speed_noise']*(turbine_height/ASOS_height)**0.15
24 | # estimate wind energy based on the speed
25 | wind_speed_base = power_curve.index.to_numpy()
26 | power_base = power_curve.to_numpy()
27 | wind_speed_esti = weather['Wind Speed_turbine'].to_numpy()
28 | estimated_power = np.interp(wind_speed_esti, wind_speed_base, power_base)
29 | weather['wind_power'] = estimated_power
30 | # interpolation
31 | wind_raw = weather[['time','wind_power']].copy()
32 | wind_interp = pd.DataFrame()
33 | time_interp = pd.date_range(start='1-1-'+year, end='12-31-'+year, freq='1T')
34 | wind_interp['time'] = time_interp
35 | wind_interp['wind_power'] = np.interp(time_interp, wind_raw['time'].copy(), wind_raw['wind_power'].copy())
36 | # save csv
37 | wind_raw.to_csv(save_path+'wind_'+location+year+'.csv')
38 | wind_interp.to_csv(save_path+'wind_interp_'+location+year+'.csv')
39 |
40 |
41 | if __name__=='__main__':
42 | # read power curves utilizing the functions in PreREISE
43 | state_power_curves = power_curves.get_state_power_curves()
44 | turbine_power_curves = power_curves.get_turbine_power_curves()
45 |
46 | # read solar generation data, rated 1 kW
47 | iso_list = [ 'CAISO','NYISO','PJM','ERCOT','MISO','SPP']
48 | state_list = ['CA','NY','IL','TX','OK','MI']
49 | iso_zone_list = {
50 | 'CAISO':['CAISO_zone_'+str(num)+"_" for num in range(1,4+1)],
51 | 'NYISO':['NYISO_zone_'+str(num)+"_" for num in range(1,11+1)],
52 | 'PJM':['PJM_zone_'+str(num)+"_" for num in range(1,20+1)],
53 | 'ERCOT':['ERCOT_zone_'+str(num)+"_" for num in range(1,8+1)],
54 | 'MISO':['MISO_zone_'+str(num)+"_" for num in range(1,6+1)],
55 | 'SPP':['SPP_zone_'+str(num)+"_" for num in range(1,17+1)],
56 | }
57 | year_list = ['2018','2019','2020']
58 | weather_path = r'C:\Users\zheng\Google Drive\Colab Notebooks\94 PowerDataSet\Dataset\Renewable\weather_v2_extended/'
59 | save_path = r'C:\Users\zheng\Google Drive\Colab Notebooks\94 PowerDataSet\Dataset\Renewable\wind_v2_extended/'
60 | i=0
61 | for iso_tmp in iso_list:
62 | print(iso_tmp)
63 | state = state_list[i]
64 | i+=1
65 | iso_zone_list_tmp = iso_zone_list[iso_tmp]
66 | for iso_zone_tmp in iso_zone_list_tmp:
67 | for year in year_list:
68 | file_name = 'weather_'+iso_zone_tmp+year+'.csv'
69 | print(iso_zone_tmp+'_'+year)
70 | solar_data = read_solar_weather_csv(
71 | location=iso_zone_tmp, year=year,
72 | power_curve=state_power_curves[state],
73 | file_name = weather_path+'weather_'+iso_zone_tmp+year+".csv",
74 | save_path=save_path)
75 |
76 |
77 |
78 |
79 |
--------------------------------------------------------------------------------
/Code/Data Processing/renewable_v2_step3_solar_v2_extended.py:
--------------------------------------------------------------------------------
1 | from powersimdata.input.grid import Grid
2 | # from PreREISE import prereise as prr#rap, impute, helpers
3 | from prereise.gather.winddata.rap import rap, impute, helpers
4 | from prereise.gather.winddata.rap import power_curves
5 | from datetime import datetime
6 |
7 | import pandas as pd
8 | import numpy as np
9 | from matplotlib import pyplot as plt
10 | import requests
11 |
12 | def read_solar_csv(
13 | location, year,
14 | file_name, save_path,):
15 | # read data
16 | data = pd.read_csv(file_name, delimiter = ',')
17 | data['time'] = data.apply(lambda row: datetime.strptime(row['Time stamp'], '%b %d, %I:%M %p'), axis=1)
18 | data['time'] = data.apply(lambda row: row['time'].replace(year=int(year)), axis=1)
19 | data['solar_power'] = data['System power generated | (kW)'].copy()/4 # 4 is maximum DC power
20 | # interpolation
21 | solar_raw = data.copy()
22 | time_esti = pd.date_range(start='1-1-'+year, end='12-31-'+year, freq='1T')
23 | power_esti = np.interp(time_esti, solar_raw['time'], solar_raw['solar_power'])
24 | solar_interp = pd.DataFrame()
25 | solar_interp['time'] = time_esti
26 | solar_interp['solar_power'] = power_esti
27 | # save csv
28 | solar_raw.to_csv(save_path+'solar_'+location+year+'.csv')
29 | solar_interp.to_csv(save_path+'solar_interp_'+location+year+'.csv')
30 |
31 |
32 | if __name__=='__main__':
33 | # read power curves utilizing the functions in PreREISE
34 | state_power_curves = power_curves.get_state_power_curves()
35 | turbine_power_curves = power_curves.get_turbine_power_curves()
36 |
37 | # read solar generation data, rated 1 kW
38 | iso_list = [ 'CAISO','NYISO','PJM','ERCOT','MISO','SPP']
39 | state_list = ['CA','NY','IL','TX','OK','MI']
40 | iso_zone_list = {
41 | 'CAISO':['CAISO_zone_'+str(num)+"_" for num in range(1,4+1)],
42 | 'NYISO':['NYISO_zone_'+str(num)+"_" for num in range(1,11+1)],
43 | 'PJM':['PJM_zone_'+str(num)+"_" for num in range(1,20+1)],
44 | 'ERCOT':['ERCOT_zone_'+str(num)+"_" for num in range(1,8+1)],
45 | 'MISO':['MISO_zone_'+str(num)+"_" for num in range(1,6+1)],
46 | 'SPP':['SPP_zone_'+str(num)+"_" for num in range(1,17+1)],
47 | }
48 | year_list = ['2018','2019','2020']
49 | solar_path = r'C:\Users\zheng\Google Drive\Colab Notebooks\94 PowerDataSet\Dataset\Renewable\solar_v2_extended/solar_SAM_output/'
50 | save_path = r'C:\Users\zheng\Google Drive\Colab Notebooks\94 PowerDataSet\Dataset\Renewable\solar_v2_extended/'
51 | for iso_tmp in iso_list:
52 | print(iso_tmp)
53 | iso_zone_list_tmp = iso_zone_list[iso_tmp]
54 | for iso_zone_tmp in iso_zone_list_tmp:
55 | for year in year_list:
56 | file_name = iso_zone_tmp+year+'.csv'
57 | print(iso_zone_tmp+year)
58 | solar_data = read_solar_csv(
59 | location=iso_zone_tmp, year=year,
60 | file_name = solar_path+iso_zone_tmp+year+".csv",
61 | save_path=save_path)
62 |
63 |
64 |
65 |
66 |
--------------------------------------------------------------------------------
/Code/Data Processing/renewable_v2_step4_load_v2_extended.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, timedelta
2 |
3 | import pandas as pd
4 | import numpy as np
5 | from matplotlib import pyplot as plt
6 | import requests
7 |
8 | def load_process(file, location, year_list, save_path):
9 | df = pd.read_csv(file, parse_dates = True,)
10 | flg_test = df.loc[(df['date']>='2018-01-01') & (df['date']<='2020-12-31')]
11 | if flg_test.empty:
12 | df['date_datetime'] = pd.to_datetime(df['date'])
13 | df['date'] = df.apply(lambda row: row['date_datetime'].strftime('%Y-%m-%d'), axis=1)
14 | df = df.loc[(df['date']>='2018-01-01') & (df['date']<='2020-12-31')]
15 | df = df.set_index('date')
16 | df_converted = pd.DataFrame(columns=['time', 'load_power'])
17 | for i in range(len(df.index)):
18 | print(df.index[i])
19 | date_tmp = df.index[i]
20 | hour_tmp = df['time'][i]
21 | time_tmp = datetime.strptime(date_tmp+" "+hour_tmp, '%Y-%m-%d %H:%M')#+timedelta(hours=j)
22 | power_tmp = df['load'][i]
23 | df_converted.loc[len(df_converted.index)] = [time_tmp, power_tmp]
24 | power_mean = df_converted['load_power'].mean()
25 |
26 | # interpolation
27 | for year in year_list:
28 | time_esti = pd.date_range(start='1-1-'+year, end='12-31-'+year, freq='1T')
29 | power_esti = np.interp(time_esti, df_converted['time'], df_converted['load_power'])
30 | df_interp = pd.DataFrame()
31 | df_interp['time'] = time_esti
32 | df_interp['load_power'] = power_esti
33 | df_interp['load_power'] = df_interp['load_power']/power_mean
34 | df_interp.to_csv(save_path+"load_interp_"+location+year+'.csv')
35 |
36 | if __name__=='__main__':
37 | # read solar generation data, rated 1 kW
38 | iso_list = ['PJM',]#[ 'CAISO','NYISO','PJM','ERCOT','MISO','SPP']
39 | iso_zone_list = {
40 | 'CAISO':['CAISO_zone_'+str(num)+"_" for num in range(1,4+1)],
41 | 'NYISO':['NYISO_zone_'+str(num)+"_" for num in range(1,11+1)],
42 | 'PJM':['PJM_zone_'+str(num)+"_" for num in range(1,20+1)],
43 | 'ERCOT':['ERCOT_zone_'+str(num)+"_" for num in range(1,8+1)],
44 | 'MISO':['MISO_zone_'+str(num)+"_" for num in range(1,6+1)],
45 | 'SPP':['SPP_zone_'+str(num)+"_" for num in range(1,17+1)],
46 | }
47 | year_list = ['2018','2019','2020']
48 | load_path = r'C:\Users\zheng\Google Drive\Colab Notebooks\94 PowerDataSet\Dataset\Renewable\load_v2_extended/'
49 | save_path = r'C:\Users\zheng\Google Drive\Colab Notebooks\94 PowerDataSet\Dataset\Renewable\load_v2_extended/'
50 | for iso_tmp in iso_list:
51 | print(iso_tmp)
52 | iso_zone_list_tmp = iso_zone_list[iso_tmp]
53 | for iso_zone_tmp in iso_zone_list_tmp:
54 | print(iso_zone_tmp)
55 | file_path = load_path+iso_zone_tmp[:-1]+'.csv'
56 | load_process(file_path, iso_zone_tmp, year_list, save_path)
57 |
58 |
59 |
60 |
61 |
--------------------------------------------------------------------------------
/Code/Data Processing/renewable_v2_step5_aggregate_v2_extended.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, timedelta
2 |
3 | import pandas as pd
4 | import numpy as np
5 | from matplotlib import pyplot as plt
6 | import requests
7 |
8 |
9 | def read_data(file, location, year_list, columns, prefix):
10 | df_all = pd.DataFrame()
11 | for year in year_list:
12 | file_tmp = file+location+year+'.csv'
13 | df_tmp = pd.read_csv(file_tmp)
14 | df_tmp = df_tmp[columns]
15 | df_tmp = df_tmp.set_index('time')
16 | if df_all.empty:
17 | df_all = df_tmp.copy()
18 | else:
19 | df_all = pd.concat([df_all, df_tmp], axis=0)
20 | # df_all = df_all.add_prefix(prefix)
21 | return df_all
22 |
23 | if __name__=='__main__':
24 | # read solar generation data, rated 1 kW
25 | iso_list = [ 'CAISO','NYISO','PJM','ERCOT','MISO','SPP']
26 | iso_zone_list = {
27 | 'CAISO':['CAISO_zone_'+str(num)+"_" for num in range(1,4+1)],
28 | 'NYISO':['NYISO_zone_'+str(num)+"_" for num in range(1,11+1)],
29 | 'PJM':['PJM_zone_'+str(num)+"_" for num in range(1,20+1)],
30 | 'ERCOT':['ERCOT_zone_'+str(num)+"_" for num in range(1,8+1)],
31 | 'MISO':['MISO_zone_'+str(num)+"_" for num in range(1,6+1)],
32 | 'SPP':['SPP_zone_'+str(num)+"_" for num in range(1,17+1)],
33 | }
34 | year_list = ['2018','2019','2020']
35 | load_path = r'C:\Users\zheng\Google Drive\Colab Notebooks\94 PowerDataSet\Dataset\Renewable\load_v2_extended/'
36 | solar_path = r'C:\Users\zheng\Google Drive\Colab Notebooks\94 PowerDataSet\Dataset\Renewable\solar_v2_extended/'
37 | wind_path = r'C:\Users\zheng\Google Drive\Colab Notebooks\94 PowerDataSet\Dataset\Renewable\wind_v2_extended/'
38 | weather_path = r'C:\Users\zheng\Google Drive\Colab Notebooks\94 PowerDataSet\Dataset\Renewable\weather_v2_extended/'
39 | save_path = r'C:\Users\zheng\Google Drive\Colab Notebooks\94 PowerDataSet\Dataset\Renewable\psse_input_v3/'
40 |
41 | for iso_tmp in iso_list:
42 | print(iso_tmp)
43 | iso_zone_list_tmp = iso_zone_list[iso_tmp]
44 | for iso_zone_tmp in iso_zone_list_tmp:
45 | print(iso_zone_tmp)
46 | df_aggregate = pd.DataFrame()
47 | # load
48 | file_tmp = load_path+"load_interp_"
49 | prefix = ''
50 | df_data = read_data(file_tmp, iso_zone_tmp, year_list, ['time','load_power'], prefix)
51 | df_aggregate = df_data.copy()
52 | #wind
53 | file_tmp = wind_path+"wind_interp_"
54 | prefix = ''
55 | df_data = read_data(file_tmp, iso_zone_tmp, year_list, ['time','wind_power'], prefix)
56 | df_aggregate = pd.concat([df_aggregate, df_data],axis=1)
57 | #solar
58 | file_tmp = solar_path+"solar_interp_"
59 | prefix = ''
60 | df_data = read_data(file_tmp, iso_zone_tmp, year_list, ['time','solar_power'], prefix)
61 | df_aggregate = pd.concat([df_aggregate, df_data],axis=1)
62 | #weather
63 | file_tmp = weather_path+"weather_interp_"
64 | prefix = ''
65 | column_list_raw = ['time','DHI','DNI','GHI','Dew Point','Solar Zenith Angle', 'Wind Speed','Relative Humidity', 'Temperature']
66 | df_data = read_data(file_tmp, iso_zone_tmp, year_list, column_list_raw, prefix)
67 | df_aggregate = pd.concat([df_aggregate, df_data],axis=1)
68 | # save csv
69 | df_aggregate.to_csv(save_path+iso_zone_tmp+".csv")
70 | a=0
71 |
72 |
73 |
74 |
75 |
--------------------------------------------------------------------------------
/Code/Data Processing/requirements.txt:
--------------------------------------------------------------------------------
1 | # This file may be used to create an environment using:
2 | # $ conda create --name --file
3 | # platform: win-64
4 | appdirs=1.4.4=pypi_0
5 | atomicwrites=1.4.0=pypi_0
6 | attrs=21.2.0=pypi_0
7 | bcrypt=3.2.0=pypi_0
8 | black=21.6b0=pypi_0
9 | bokeh=2.0.2=pypi_0
10 | ca-certificates=2020.10.14=0
11 | certifi=2020.6.20=py37_0
12 | cffi=1.14.5=pypi_0
13 | cftime=1.5.0=pypi_0
14 | chardet=3.0.4=pypi_0
15 | click=8.0.1=pypi_0
16 | colorama=0.4.4=pypi_0
17 | coverage=5.5=pypi_0
18 | cryptography=3.4.7=pypi_0
19 | cycler=0.10.0=pypi_0
20 | decorator=4.4.2=pypi_0
21 | h5pyd=0.3.3=pypi_0
22 | idna=2.10=pypi_0
23 | importlib-metadata=4.6.0=pypi_0
24 | iniconfig=1.1.1=pypi_0
25 | jinja2=3.0.1=pypi_0
26 | kiwisolver=1.3.1=pypi_0
27 | markupsafe=2.0.1=pypi_0
28 | matplotlib=3.4.2=pypi_0
29 | mypy-extensions=0.4.3=pypi_0
30 | netcdf4=1.5.4=pypi_0
31 | networkx=2.5.1=pypi_0
32 | nrel-pysam=2.2.2=pypi_0
33 | nrel-pysam-stubs=2.2.2=pypi_0
34 | numpy=1.21.0=pypi_0
35 | openssl=1.1.1k=h2bbff1b_0
36 | packaging=21.0=pypi_0
37 | pandas=1.1.5=pypi_0
38 | paramiko=2.7.2=pypi_0
39 | pathspec=0.8.1=pypi_0
40 | pillow=8.3.0=pypi_0
41 | pip=21.1.3=py37haa95532_0
42 | pluggy=0.13.1=pypi_0
43 | powersimdata=0.4.3=dev_0
44 | prereise=0.4=pypi_0
45 | py=1.10.0=pypi_0
46 | pycparser=2.20=pypi_0
47 | pynacl=1.4.0=pypi_0
48 | pyparsing=2.4.7=pypi_0
49 | pyproj=2.6.1.post1=pypi_0
50 | pytest=6.2.4=pypi_0
51 | pytest-cov=2.12.1=pypi_0
52 | python=3.7.10=h6244533_0
53 | python-dateutil=2.7.5=pypi_0
54 | pytz=2021.1=pypi_0
55 | pywin32=227=py37he774522_1
56 | pyyaml=5.4.1=pypi_0
57 | regex=2021.7.1=pypi_0
58 | requests=2.24.0=pypi_0
59 | scipy=1.7.0=pypi_0
60 | setuptools=52.0.0=py37haa95532_0
61 | six=1.16.0=pypi_0
62 | sqlite=3.36.0=h2bbff1b_0
63 | toml=0.10.2=pypi_0
64 | tornado=6.1=pypi_0
65 | tqdm=4.29.1=pypi_0
66 | typed-ast=1.4.3=pypi_0
67 | typing-extensions=3.10.0.0=pypi_0
68 | urllib3=1.25.11=pypi_0
69 | vc=14.2=h21ff451_1
70 | vs2015_runtime=14.27.29016=h5e58377_2
71 | wheel=0.36.2=pyhd3eb1b0_0
72 | wincertstore=0.2=py37_0
73 | xlrd=1.2.0=pypi_0
74 | zipp=3.5.0=pypi_0
75 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/README.md:
--------------------------------------------------------------------------------
1 | # Transmission and Distribution (T+D) Joint Simulation
2 | Here we provide the source code and simulation case files for PSS/E and OpenDSS used to perfrom joint simulation
3 |
4 | ## Prerequisite
5 | - A Windows environment is required since both simulators do not support Linux or MacOS
6 | - Install PSS/E and the Python modules for Python 3.4 (psspy34): https://new.siemens.com/global/en/products/energy/energy-automation-and-smart-grid/pss-software/pss-e.html
7 | - Install OpenDSS: https://www.epri.com/pages/sa/opendss
8 | - - Install required Python dependencies
9 | ```angular2html
10 | pip install -r requirements.txt
11 | ```
12 |
13 | ## Usage
14 | The core co-simulation engine is programmed in code script `cosim.py` There are 3 ways to run the code for different purposes:
15 | - **Test, develop and debug:** `main.py` gives a simple example to initialize the co-simulation engine and run a steady-state or dynamic simulation. In the header the user will need to set the path to the 1) `case_T`: PSS/E transmission system case file (.sav / .raw); 2) `case_D`: OpenDSS distribution system case file (.dss); 'data_path': 3) Input profile data from the PSML dataset. The data handle `data` and co-simulation handle `env` is then created using the input paths. Then the user can use one of the two functions to create and inspect simulated data: 1) `env.solve_ss(hour_num)` which attempts to solve the T+D steady-state power flow for the hour in input profile data specified in the argument. 2) `env.dynsim(hour_num, step_num)` performs a dynamic simulation of a random disturbance event for the hour `hour_num`. The simulation is continued for `step_num` timesteps.
16 | - **Create consecutive steady-state output data:** In `ss_data.py` set the four path variables in the header: `case_T`, `case_D`, `data_path` are the same as described above. `out_path` is the csv file where simulation output is to be stored. Then, in `run_sspf.bat` set the amount of rows in input profile data to be simulated. Last, execute `run_sspf.bat` and wait for result. Note that if the simulation is interrupted the code is able to find the breakpoint and continue from the row where the last simulation ended, so the user can simply double-click the batch again to continue simulation.
17 | - **Create consecutive transient output data:** In `one.py` set the four path variables in the header as described above. Then, set the index of the starting and ending rows in the input profile data in the loop in `run_scenarios.bat` and execute the batch file. The result folders will automatically be created in the specified path.
18 |
19 | ## References
20 |
21 | - User manual for OpenDSS and its Python interface: https://sourceforge.net/p/electricdss/code/HEAD/tree/trunk/Distrib/Doc/OpenDSSManual.pdf
22 | - Tutorials for the Python interface of PSS/E (psspy): http://www.whit.com.au/blog/
23 | - A review paper about T+D joint simulation: https://www.mdpi.com/1996-1073/14/1/12
24 |
25 |
26 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/case_D/13Bus/IEEE13Node_BusXY.csv:
--------------------------------------------------------------------------------
1 | SourceBus, 200, 400
2 | 650, 200, 350
3 | RG60, 200, 300
4 | 646, 0, 250
5 | 645, 100, 250
6 | 632, 200, 250
7 | 633, 350, 250
8 | 634, 400, 250
9 | 670, 200, 200
10 | 611, 0, 100
11 | 684, 100, 100
12 | 671, 200, 100
13 | 692, 250, 100
14 | 675, 400, 100
15 | 652, 100, 0
16 | 680, 200, 0
17 |
18 |
19 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/case_T/PSSE/IEEE_39_bus.dyr:
--------------------------------------------------------------------------------
1 | 30 'GENROU' 1 10.200000 0.050000 0.010000 0.035000 21.000000 0.000000 0.200000
2 | 0.138000 0.062000 0.060000 0.050000 0.025000 0.000000 0.000000 /
3 | 30 'IEEEST' 1 1 0 0 0 0 0 0 0 1 0.05 3 0.5 10 10 1 0.2 -0.2 0 0 /
4 | 31 'GENROU' 1 6.560000 0.050000 1.500000 0.035000 30.299999 0.000000 0.295000
5 | 0.282000 0.069700 0.170000 0.050000 0.035000 0.000000 0.000000 /
6 | 31 'IEEEST' 1 1 0 0 0 0 0 0 0 5 0.4 1 0.1 10 10 0.5 0.2 -0.2 0 0 /
7 | 32 'GENROU' 1 5.700000 0.050000 1.500000 0.035000 17.900000 0.000000 0.499000
8 | 0.474000 0.106200 0.175200 0.090000 0.060800 0.000000 0.000000 /
9 | 32 'IEEEST' 1 1 0 0 0 0 0 0 0 3 0.2 2 0.2 10 10 0.5 0.2 -0.2 0 0 /
10 | 33 'GENROU' 1 5.690000 0.050000 1.500000 0.035000 14.300000 0.000000 0.524000
11 | 0.516000 0.087200 0.332000 0.070000 0.059000 0.000000 0.000000 /
12 | 33 'IEEEST' 1 1 0 0 0 0 0 0 0 1 0.1 1 0.3 10 10 2 0.2 -0.2 0 0 /
13 | 34 'GENROU' 1 5.400000 0.050000 0.440000 0.035000 26.000000 0.000000 0.670000
14 | 0.620000 0.132000 0.166000 0.089000 0.054000 0.000000 0.000000 /
15 | 34 'IEEEST' 1 1 0 0 0 0 0 0 0 1.5 0.2 1 0.1 10 10 1 0.2 -0.2 0 0 /
16 | 35 'GENROU' 1 7.300000 0.050000 0.400000 0.035000 17.400000 0.000000 0.508000
17 | 0.482000 0.100000 0.162800 0.080000 0.044800 0.000000 0.000000 /
18 | 35 'IEEEST' 1 1 0 0 0 0 0 0 0 0.5 0.1 0.5 0.1 10 10 4 0.2 -0.2 0 0 /
19 | 36 'GENROU' 1 5.660000 0.050000 1.500000 0.035000 13.200000 0.000000 0.590000
20 | 0.584000 0.098000 0.372000 0.088000 0.064400 0.000000 0.000000 /
21 | 36 'IEEEST' 1 1 0 0 0 0 0 0 0 0.2 0.02 0.5 0.1 10 10 7.5 0.2 -0.2 0 0 /
22 | 37 'GENROU' 1 6.700000 0.050000 0.410000 0.035000 12.150000 0.000000 0.580000
23 | 0.560000 0.114000 0.182200 0.090000 0.056000 0.000000 0.000000 /
24 | 37 'IEEEST' 1 1 0 0 0 0 0 0 0 1 0.2 1 0.1 10 10 2 0.2 -0.2 0 0 /
25 | 38 'GENROU' 1 4.790000 0.050000 1.960000 0.035000 17.250000 0.000000 0.421200
26 | 0.410000 0.114000 0.117400 0.090000 0.059600 0.000000 0.000000 /
27 | 38 'IEEEST' 1 1 0 0 0 0 0 0 0 1 0.5 2 0.1 10 10 2 0.2 -0.2 0 0 /
28 | 39 'GENROU' 1 7.000000 0.050000 0.700000 0.035000 25.000000 0.000000 0.400000
29 | 0.380000 0.120000 0.160000 0.080000 0.060000 0.000000 0.000000 /
30 | 39 'IEEEST' 1 1 0 0 0 0 0 0 0 5 0.6 3 0.5 10 10 1 0.2 -0.2 0 0 /
31 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/case_T/PSSE/IEEE_39_bus.out:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tamu-engineering-research/Open-source-power-dataset/7b3644d75fd493b82f4129e6b01f36cd91e26dd4/Code/Joint Simulation/case_T/PSSE/IEEE_39_bus.out
--------------------------------------------------------------------------------
/Code/Joint Simulation/case_T/PSSE23/output_tuned1.outx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tamu-engineering-research/Open-source-power-dataset/7b3644d75fd493b82f4129e6b01f36cd91e26dd4/Code/Joint Simulation/case_T/PSSE23/output_tuned1.outx
--------------------------------------------------------------------------------
/Code/Joint Simulation/case_T/PSSE23/savnw_tuned.dyr:
--------------------------------------------------------------------------------
1 | 101 'GENROU' 1 6.5000 0.60000E-01 0.20000 0.50000E-01
2 | 4.0000 0.0000 1.8000 1.7500 0.60000
3 | 0.80000 0.30000 0.15000 0.90000E-01 0.38000 /
4 | 101 'IEEET1' 1 0.0000 400.00 0.40000E-01 7.3000
5 | -7.3000 1.0000 0.80000 0.30000E-01 1.0000
6 | 0.0000 2.4700 0.35000E-01 4.5000 0.47000 /
7 | 101 'TGOV1' 1 0.50000E-01 0.50000E-01 1.0500 0.30000
8 | 1.0000 1.0000 0.0000 /
9 | 102 'GENROU' 1 6.5000 0.60000E-01 0.20000 0.50000E-01
10 | 4.0000 0.0000 1.8000 1.7500 0.60000
11 | 0.80000 0.30000 0.15000 0.90000E-01 0.38000 /
12 | 102 'IEEET1' 1 0.0000 400.00 0.40000E-01 7.3000
13 | -7.3000 1.0000 0.80000 0.30000E-01 1.0000
14 | 0.0000 2.4700 0.35000E-01 4.5000 0.47000 /
15 | 102 'TGOV1' 1 0.50000E-01 0.50000E-01 1.0500 0.30000
16 | 1.0000 1.0000 0.0000 /
17 | 206 'GENROU' 1 4.5000 0.70000E-01 0.15000 0.50000E-01
18 | 2.5000 0.0000 1.4000 1.3500 0.50000
19 | 0.70000 0.25000 0.10000 0.90000E-01 0.38000 /
20 | 206 'IEEET1' 1 0.0000 40.000 0.60000E-01 2.1000
21 | -2.1000 0.0000 0.50000 0.80000E-01 0.80000
22 | 0.0000 2.4700 0.35000E-01 3.5000 0.60000 /
23 | 206 'TGOV1' 1 0.50000E-01 0.50000E-01 0.9000 0.30000
24 | 3.0000 9.0000 0.0000 /
25 | 211 'GENSAL' 1 5.0000 0.50000E-01 0.20000 5.0000
26 | 0.0000 1.0000 0.75000 0.40000 0.26000
27 | 0.10000 0.11000 0.62000 /
28 | 211 'SCRX' 1 0.10000 10.000 200.00 0.50000E-01
29 | -5.0000 5.0000 1.0000 10.000 /
30 | 211 'HYGOV' 1 0.50000E-01 0.30000 5.0000 0.50000E-01
31 | 0.50000 0.20000 1.0000 0.0000 1.2500
32 | 1.2000 0.50000 0.80000E-01/
33 | 3011 'GENROU' 1 5.0000 0.60000E-01 0.20000 0.60000E-01
34 | 3.0000 0.0000 1.6000 1.5500 0.70000
35 | 0.85000 0.35000 0.20000 0.90000E-01 0.38000 /
36 | 3011 'SEXS' 1 0.10000 10.000 100.00 0.10000
37 | 0.0000 4.0000 /
38 | 3018 'GENROU' 1 5.0000 0.60000E-01 0.20000 0.60000E-01
39 | 3.0000 0.0000 1.6000 1.5500 0.70000
40 | 0.85000 0.35000 0.20000 0.90000E-01 0.38000 /
41 | 3018 'SEXS' 1 0.10000 10.000 100.00 0.10000
42 | 0.0000 4.0000 /
43 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/case_T/PSSE23/savnw_tuned.out:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tamu-engineering-research/Open-source-power-dataset/7b3644d75fd493b82f4129e6b01f36cd91e26dd4/Code/Joint Simulation/case_T/PSSE23/savnw_tuned.out
--------------------------------------------------------------------------------
/Code/Joint Simulation/case_T/PSSE23/savnw_tuned.sav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tamu-engineering-research/Open-source-power-dataset/7b3644d75fd493b82f4129e6b01f36cd91e26dd4/Code/Joint Simulation/case_T/PSSE23/savnw_tuned.sav
--------------------------------------------------------------------------------
/Code/Joint Simulation/case_T/PSSE23/savnw_tuned.sld:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tamu-engineering-research/Open-source-power-dataset/7b3644d75fd493b82f4129e6b01f36cd91e26dd4/Code/Joint Simulation/case_T/PSSE23/savnw_tuned.sld
--------------------------------------------------------------------------------
/Code/Joint Simulation/case_T/PSSE23/savnw_tuned.snp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tamu-engineering-research/Open-source-power-dataset/7b3644d75fd493b82f4129e6b01f36cd91e26dd4/Code/Joint Simulation/case_T/PSSE23/savnw_tuned.snp
--------------------------------------------------------------------------------
/Code/Joint Simulation/case_T/PSSE23/savnw_tuned_converted.dyr:
--------------------------------------------------------------------------------
1 | 101 'GENROU' 1 6.5000 0.60000E-01 0.20000 0.50000E-01
2 | 4.0000 0.0000 1.8000 1.7500 0.60000
3 | 0.80000 0.30000 0.15000 0.90000E-01 0.38000 /
4 | 101 'IEEET1' 1 0.0000 400.00 0.40000E-01 7.3000
5 | -7.3000 1.0000 0.80000 0.30000E-01 1.0000
6 | 0.0000 2.4700 0.35000E-01 4.5000 0.47000 /
7 | 101 'TGOV1' 1 0.50000E-01 0.50000E-01 1.0500 0.30000
8 | 1.0000 1.0000 0.0000 /
9 | 102 'GENROU' 1 6.5000 0.60000E-01 0.20000 0.50000E-01
10 | 4.0000 0.0000 1.8000 1.7500 0.60000
11 | 0.80000 0.30000 0.15000 0.90000E-01 0.38000 /
12 | 102 'IEEET1' 1 0.0000 400.00 0.40000E-01 7.3000
13 | -7.3000 1.0000 0.80000 0.30000E-01 1.0000
14 | 0.0000 2.4700 0.35000E-01 4.5000 0.47000 /
15 | 102 'TGOV1' 1 0.50000E-01 0.50000E-01 1.0500 0.30000
16 | 1.0000 1.0000 0.0000 /
17 | 206 'GENROU' 1 4.5000 0.70000E-01 0.15000 0.50000E-01
18 | 2.5000 0.0000 1.4000 1.3500 0.50000
19 | 0.70000 0.25000 0.10000 0.90000E-01 0.38000 /
20 | 206 'IEEET1' 1 0.0000 40.000 0.60000E-01 2.1000
21 | -2.1000 0.0000 0.50000 0.80000E-01 0.80000
22 | 0.0000 2.4700 0.35000E-01 3.5000 0.60000 /
23 | 206 'TGOV1' 1 0.50000E-01 0.50000E-01 0.9000 0.30000
24 | 3.0000 9.0000 0.0000 /
25 | 211 'GENSAL' 1 5.0000 0.50000E-01 0.20000 5.0000
26 | 0.0000 1.0000 0.75000 0.40000 0.26000
27 | 0.10000 0.11000 0.62000 /
28 | 211 'SCRX' 1 0.10000 10.000 200.00 0.50000E-01
29 | -5.0000 5.0000 1.0000 10.000 /
30 | 211 'HYGOV' 1 0.50000E-01 0.30000 5.0000 0.50000E-01
31 | 0.50000 0.20000 1.0000 0.0000 1.2500
32 | 1.2000 0.50000 0.80000E-01/
33 | 3011 'GENROU' 1 5.0000 0.60000E-01 0.20000 0.60000E-01
34 | 3.0000 0.0000 1.6000 1.5500 0.70000
35 | 0.85000 0.35000 0.20000 0.90000E-01 0.38000 /
36 | 3011 'SEXS' 1 0.10000 10.000 100.00 0.10000
37 | 0.0000 4.0000 /
38 | 3018 'GENROU' 1 5.0000 0.60000E-01 0.20000 0.60000E-01
39 | 3.0000 0.0000 1.6000 1.5500 0.70000
40 | 0.85000 0.35000 0.20000 0.90000E-01 0.38000 /
41 | 3018 'SEXS' 1 0.10000 10.000 100.00 0.10000
42 | 0.0000 4.0000 /
43 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/case_T/PSSE23/savnw_tuned_converted.sav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tamu-engineering-research/Open-source-power-dataset/7b3644d75fd493b82f4129e6b01f36cd91e26dd4/Code/Joint Simulation/case_T/PSSE23/savnw_tuned_converted.sav
--------------------------------------------------------------------------------
/Code/Joint Simulation/case_T/PSSE23/savnw_tuned_old.dyr:
--------------------------------------------------------------------------------
1 | 101 'GENROU' 1 6.5000 0.60000E-01 0.20000 0.50000E-01
2 | 4.0000 0.0000 1.8000 1.7500 0.60000
3 | 0.80000 0.30000 0.15000 0.90000E-01 0.38000 /
4 | 101 'IEEET1' 1 0.0000 400.00 0.40000E-01 7.3000
5 | -7.3000 1.0000 0.80000 0.30000E-01 1.0000
6 | 0.0000 2.4700 0.35000E-01 4.5000 0.47000 /
7 | 101 'TGOV1' 1 0.50000E-01 0.50000E-01 1.0500 0.30000
8 | 1.0000 1.0000 0.0000 /
9 | 102 'GENROU' 1 6.5000 0.60000E-01 0.20000 0.50000E-01
10 | 4.0000 0.0000 1.8000 1.7500 0.60000
11 | 0.80000 0.30000 0.15000 0.90000E-01 0.38000 /
12 | 102 'IEEET1' 1 0.0000 400.00 0.40000E-01 7.3000
13 | -7.3000 1.0000 0.80000 0.30000E-01 1.0000
14 | 0.0000 2.4700 0.35000E-01 4.5000 0.47000 /
15 | 102 'TGOV1' 1 0.50000E-01 0.50000E-01 1.0500 0.30000
16 | 1.0000 1.0000 0.0000 /
17 | 206 'GENROU' 1 4.5000 0.70000E-01 0.15000 0.50000E-01
18 | 2.5000 0.0000 1.4000 1.3500 0.50000
19 | 0.70000 0.25000 0.10000 0.90000E-01 0.38000 /
20 | 206 'IEEET1' 1 0.0000 40.000 0.60000E-01 2.1000
21 | -2.1000 0.0000 0.50000 0.80000E-01 0.80000
22 | 0.0000 2.4700 0.35000E-01 3.5000 0.60000 /
23 | 206 'TGOV1' 1 0.50000E-01 0.50000E-01 0.9000 0.30000
24 | 3.0000 9.0000 0.0000 /
25 | 211 'GENSAL' 1 5.0000 0.50000E-01 0.20000 5.0000
26 | 0.0000 1.0000 0.75000 0.40000 0.26000
27 | 0.10000 0.11000 0.62000 /
28 | 211 'SCRX' 1 0.10000 10.000 200.00 0.50000E-01
29 | -5.0000 5.0000 1.0000 10.000 /
30 | 211 'HYGOV' 1 0.50000E-01 0.30000 5.0000 0.50000E-01
31 | 0.50000 0.20000 1.0000 0.0000 1.2500
32 | 1.2000 0.50000 0.80000E-01/
33 | 3011 'GENROU' 1 5.0000 0.60000E-01 0.20000 0.60000E-01
34 | 3.0000 0.0000 1.6000 1.5500 0.70000
35 | 0.85000 0.35000 0.20000 0.90000E-01 0.38000 /
36 | 3011 'SEXS' 1 0.10000 10.000 100.00 0.10000
37 | 0.0000 4.0000 /
38 | 3018 'GENROU' 1 5.0000 0.60000E-01 0.20000 0.60000E-01
39 | 3.0000 0.0000 1.6000 1.5500 0.70000
40 | 0.85000 0.35000 0.20000 0.90000E-01 0.38000 /
41 | 3018 'SEXS' 1 0.10000 10.000 100.00 0.10000
42 | 0.0000 4.0000 /
43 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/case_T/PSSE23/savnw_tuned_th.dyr:
--------------------------------------------------------------------------------
1 | 101 'GENROU' 1 6.5000 0.60000E-01 0.20000 0.50000E-01
2 | 4.0000 0.0000 1.8000 1.7500 0.60000
3 | 0.80000 0.30000 0.15000 0.90000E-01 0.38000 /
4 | 101 'IEEET1' 1 0.0000 400.00 0.40000E-01 7.3000
5 | -7.3000 1.0000 0.80000 0.30000E-01 1.0000
6 | 0.0000 2.4700 0.35000E-01 4.5000 0.47000 /
7 | 101 'TGOV1' 1 0.50000E-01 0.50000E-01 1.0500 0.30000
8 | 1.0000 1.0000 0.0000 /
9 | 102 'GENROU' 1 6.5000 0.60000E-01 0.20000 0.50000E-01
10 | 4.0000 0.0000 1.8000 1.7500 0.60000
11 | 0.80000 0.30000 0.15000 0.90000E-01 0.38000 /
12 | 102 'IEEET1' 1 0.0000 400.00 0.40000E-01 7.3000
13 | -7.3000 1.0000 0.80000 0.30000E-01 1.0000
14 | 0.0000 2.4700 0.35000E-01 4.5000 0.47000 /
15 | 102 'TGOV1' 1 0.50000E-01 0.50000E-01 1.0500 0.30000
16 | 1.0000 1.0000 0.0000 /
17 | 206 'GENROU' 1 4.5000 0.70000E-01 0.15000 0.50000E-01
18 | 2.5000 0.0000 1.4000 1.3500 0.50000
19 | 0.70000 0.25000 0.10000 0.90000E-01 0.38000 /
20 | 206 'IEEET1' 1 0.0000 40.000 0.60000E-01 2.1000
21 | -2.1000 0.0000 0.50000 0.80000E-01 0.80000
22 | 0.0000 2.4700 0.35000E-01 3.5000 0.60000 /
23 | 206 'TGOV1' 1 0.50000E-01 0.50000E-01 0.9000 0.30000
24 | 3.0000 9.0000 0.0000 /
25 | 211 'GENSAL' 1 5.0000 0.50000E-01 0.20000 5.0000
26 | 0.0000 1.0000 0.75000 0.40000 0.26000
27 | 0.10000 0.11000 0.62000 /
28 | 211 'SCRX' 1 0.10000 10.000 200.00 0.50000E-01
29 | -5.0000 5.0000 1.0000 10.000 /
30 | 211 'HYGOV' 1 0.50000E-01 0.30000 5.0000 0.50000E-01
31 | 0.50000 0.20000 1.0000 0.0000 1.2500
32 | 1.2000 0.50000 0.80000E-01/
33 | 3011 'GENROU' 1 5.0000 0.60000E-01 0.20000 0.60000E-01
34 | 3.0000 0.0000 1.6000 1.5500 0.70000
35 | 0.85000 0.35000 0.20000 0.90000E-01 0.38000 /
36 | 3011 'SEXS' 1 0.10000 10.000 100.00 0.10000
37 | 0.0000 4.0000 /
38 | 3018 'GENROU' 1 5.0000 0.60000E-01 0.20000 0.60000E-01
39 | 3.0000 0.0000 1.6000 1.5500 0.70000
40 | 0.85000 0.35000 0.20000 0.90000E-01 0.38000 /
41 | 3018 'SEXS' 1 0.10000 10.000 100.00 0.10000
42 | 0.0000 4.0000 /
43 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/case_T/PSSE23/savnw_tuned_th.out:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tamu-engineering-research/Open-source-power-dataset/7b3644d75fd493b82f4129e6b01f36cd91e26dd4/Code/Joint Simulation/case_T/PSSE23/savnw_tuned_th.out
--------------------------------------------------------------------------------
/Code/Joint Simulation/case_T/PSSE23_wind/savnw_wind.out:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tamu-engineering-research/Open-source-power-dataset/7b3644d75fd493b82f4129e6b01f36cd91e26dd4/Code/Joint Simulation/case_T/PSSE23_wind/savnw_wind.out
--------------------------------------------------------------------------------
/Code/Joint Simulation/case_T/PSSE23_wind/savnw_wind_tuned.out:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tamu-engineering-research/Open-source-power-dataset/7b3644d75fd493b82f4129e6b01f36cd91e26dd4/Code/Joint Simulation/case_T/PSSE23_wind/savnw_wind_tuned.out
--------------------------------------------------------------------------------
/Code/Joint Simulation/case_T/Re_wind_v3/Test1_sav_wind.py:
--------------------------------------------------------------------------------
1 | # This file aims to generate an save file with wind generation;
2 | # The result file is r"""savnw_wind.sav"""
3 | import os
4 | import pssexplore34
5 | import psspy
6 | import redirect
7 | import dyntools
8 | import numpy as np
9 | import pssplot
10 | psspy.psseinit()
11 | #redirect.py2psse()
12 | from psspy import _i, _f, _s, _o
13 |
14 |
15 | # SET FILE names
16 | study = 'savnw_REwind'
17 | suffix = '_flat'
18 | savfile = r"""savnw.sav"""
19 | conlfile= r"""savnw_Conl.idv"""
20 | cnvfile = '%s_cnv.sav'%study
21 | dyrfile = '%s.dyr'%study
22 | snpfile = '%s.snp'%study
23 | outfile = '%s%s.out'%(study,suffix)
24 | logfile = '%s%s.log'%(study,suffix)
25 | psspy.progress_output(2,logfile,[0,0])
26 | # -------------------------------------------------------------------------
27 | # 1: LOAD PSSE CASE
28 | # 'replace' gen at bus 3018 with new solar PV plant
29 | # -------------------------------------------------------------------------
30 | psspy.case(savfile)
31 | psspy.solution_parameters_3([_i,100,_i],[_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f])
32 |
33 | # Convert Gen at 3018 to "Solar" [Xs=99999 and Wind Control Mode WCM =2 ]
34 | psspy.machine_data_2(3018,r"""1""",[_i,_i,_i,_i,_i,2],[_f,_f,_f,_f,_f,_f,_f,_f, 99999.0,_f,_f,_f,_f,_f,_f,_f,_f])
35 |
36 | # SAVE THE PSSE CASE
37 | psspy.fdns([1,0,1,1,1,0,99,0])
38 | psspy.save(r"""savnw_wind.sav""")
39 |
40 | # -------------------------------------------------------------------------
41 | # 2: convert case and create snp file
42 | #
43 | # -------------------------------------------------------------------------
--------------------------------------------------------------------------------
/Code/Joint Simulation/case_T/Re_wind_v3/Test2_ScaleDownSav.py:
--------------------------------------------------------------------------------
1 | '''
2 | Please build with py27: Tools --> Build System --> py27
3 | '''
4 | # Incrementally decrease the load
5 | import pssexplore34
6 | import psspy
7 | from psspy import _i, _f
8 | import redirect
9 | psspy.psseinit()
10 | import dyntools
11 | import numpy as np
12 | import pssplot
13 | psspy.psseinit()
14 |
15 | psspy.case(r"""savnw_wind.sav""")
16 |
17 |
18 | gen_P = np.array([750.0, 750.0, 800.0, 600.0, 258.656, 100.0])
19 | gen_Q = np.array([91.5, 91.5,593.2, 70.7, 67.0, 0.0])
20 |
21 |
22 |
23 |
24 |
25 | load_P = np.array([200.0000, 600.0000, 400.0000, 300.0000, 1200.0000, 100.0000, 200.0000, 200.0000])
26 | load_Q = np.array([100.0000, 450.0000, 350.0000, 150.0000, 700.0000, 50.0000, 75.0000, 75.0000])
27 |
28 | perc = 0.0005 # change P and Q "perc" every time
29 |
30 | for k in np.arange(0.75, 0.74, -perc):
31 | gen_P_each = gen_P*k
32 | gen_Q_each = gen_Q*k
33 | load_P_each = load_P*k
34 | load_Q_each = load_Q*k
35 | # modify P and Q
36 | # change Pgen
37 | psspy.machine_chng_2(101,r"""1""",[_i,_i,_i,_i,_i,_i],[ gen_P_each[0],_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f])
38 | psspy.machine_chng_2(102,r"""1""",[_i,_i,_i,_i,_i,_i],[ gen_P_each[1],_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f])
39 | psspy.machine_chng_2(206,r"""1""",[_i,_i,_i,_i,_i,_i],[ gen_P_each[2],_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f])
40 | psspy.machine_chng_2(211,r"""1""",[_i,_i,_i,_i,_i,_i],[ gen_P_each[3],_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f])
41 | psspy.machine_chng_2(3011,r"""1""",[_i,_i,_i,_i,_i,_i],[ gen_P_each[4],_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f])
42 | psspy.machine_chng_2(3018,r"""1""",[_i,_i,_i,_i,_i,_i],[ gen_P_each[5],_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f])
43 | # change Qgen
44 | psspy.machine_chng_2(101,r"""1""",[_i,_i,_i,_i,_i,_i],[_f, gen_Q_each[0],_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f])
45 | psspy.machine_chng_2(102,r"""1""",[_i,_i,_i,_i,_i,_i],[_f, gen_Q_each[1],_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f])
46 | psspy.machine_chng_2(206,r"""1""",[_i,_i,_i,_i,_i,_i],[_f, gen_Q_each[2],_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f])
47 | psspy.machine_chng_2(211,r"""1""",[_i,_i,_i,_i,_i,_i],[_f, gen_Q_each[3],_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f])
48 | psspy.machine_chng_2(3011,r"""1""",[_i,_i,_i,_i,_i,_i],[_f,gen_Q_each[4],_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f])
49 | # Q at 3018 is zero
50 | #psspy.machine_chng_2(3018,r"""1""",[_i,_i,_i,_i,_i,_i],[_f,gen_Q_each[5],_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f])
51 | # change P load
52 | psspy.load_chng_5(153,r"""1""",[_i,_i,_i,_i,_i,_i,_i],[ load_P_each[0],_f,_f,_f,_f,_f,_f,_f])
53 | psspy.load_chng_5(154,r"""1""",[_i,_i,_i,_i,_i,_i,_i],[ load_P_each[1],_f,_f,_f,_f,_f,_f,_f])
54 | psspy.load_chng_5(154,r"""2""",[_i,_i,_i,_i,_i,_i,_i],[ load_P_each[2],_f,_f,_f,_f,_f,_f,_f])
55 | psspy.load_chng_5(203,r"""1""",[_i,_i,_i,_i,_i,_i,_i],[ load_P_each[3],_f,_f,_f,_f,_f,_f,_f])
56 | psspy.load_chng_5(205,r"""1""",[_i,_i,_i,_i,_i,_i,_i],[ load_P_each[4],_f,_f,_f,_f,_f,_f,_f])
57 | psspy.load_chng_5(3005,r"""1""",[_i,_i,_i,_i,_i,_i,_i],[ load_P_each[5],_f,_f,_f,_f,_f,_f,_f])
58 | psspy.load_chng_5(3007,r"""1""",[_i,_i,_i,_i,_i,_i,_i],[ load_P_each[6],_f,_f,_f,_f,_f,_f,_f])
59 | psspy.load_chng_5(3008,r"""1""",[_i,_i,_i,_i,_i,_i,_i],[ load_P_each[7],_f,_f,_f,_f,_f,_f,_f])
60 | # change Q load
61 | psspy.load_chng_5(153,r"""1""",[_i,_i,_i,_i,_i,_i,_i],[_f,load_Q_each[0],_f,_f,_f,_f,_f,_f])
62 | psspy.load_chng_5(154,r"""1""",[_i,_i,_i,_i,_i,_i,_i],[_f, load_Q_each[1],_f,_f,_f,_f,_f,_f])
63 | psspy.load_chng_5(154,r"""2""",[_i,_i,_i,_i,_i,_i,_i],[_f, load_Q_each[2],_f,_f,_f,_f,_f,_f])
64 | psspy.load_chng_5(205,r"""1""",[_i,_i,_i,_i,_i,_i,_i],[_f, load_Q_each[3],_f,_f,_f,_f,_f,_f])
65 | psspy.load_chng_5(203,r"""1""",[_i,_i,_i,_i,_i,_i,_i],[_f, load_Q_each[4],_f,_f,_f,_f,_f,_f])
66 | psspy.load_chng_5(3005,r"""1""",[_i,_i,_i,_i,_i,_i,_i],[_f,load_Q_each[5],_f,_f,_f,_f,_f,_f])
67 | psspy.load_chng_5(3007,r"""1""",[_i,_i,_i,_i,_i,_i,_i],[_f,load_Q_each[6],_f,_f,_f,_f,_f,_f])
68 | # solve power flow
69 | psspy.fdns([0,0,0,1,1,3,99,0])
70 | print("***************Load_change_percentage: "+str(k)+"******************")
71 | #psspy.save(r"""savnw_wind_scale_down.sav""")
72 | # Further decrease P_gen and P_load at 206 and 205
73 | psspy.machine_chng_2(206,r"""1""",[_i,_i,_i,_i,_i,_i],[ gen_P_each[2]-120,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f])
74 | psspy.load_chng_5(205,r"""1""",[_i,_i,_i,_i,_i,_i,_i],[ load_P_each[4]-120,_f,_f,_f,_f,_f,_f,_f])
75 |
76 | # Further decrease P_gen and P_load at 3008 and 3018
77 |
78 | psspy.machine_chng_2(3018,r"""1""",[_i,_i,_i,_i,_i,_i],[ gen_P_each[5]-20,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f,_f])
79 | psspy.load_chng_5(3008,r"""1""",[_i,_i,_i,_i,_i,_i,_i],[ load_P_each[7]-20,_f,_f,_f,_f,_f,_f,_f])
80 |
81 | # run power flow
82 | psspy.fdns([0,0,0,1,1,3,99,0])
83 | psspy.save(r"""savnw_wind_scale_down.sav""")
84 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/case_T/Re_wind_v3/Test3_Initialization.py:
--------------------------------------------------------------------------------
1 | '''
2 | Please build with py27: Tools --> Build System --> py27
3 | '''
4 | # Incrementally decrease the load
5 | import pssexplore34
6 | import psspy
7 | from psspy import _i, _f
8 | import redirect
9 | psspy.psseinit()
10 | import dyntools
11 | import numpy as np
12 | import pssplot
13 | psspy.psseinit()
14 |
15 | def Initialization(output_name):
16 | # first attempt to initialization
17 | psspy.strt_2([0,0],output_name) # initial check is not okay
18 | psspy.run(0, 10.0,0,1,0) # run a 10-second simulation
19 | psspy.strt_2([0,0],output_name) # initialization check is okay
20 |
21 |
22 |
23 | sav_name = r"""C:\Users\Tong Huang\Desktop\FOL\WECC_ORIGINAL\Re_wind_v3\savnw_wind_scale_down.sav"""
24 | dyn_name = r"""C:\Users\Tong Huang\Desktop\FOL\WECC_ORIGINAL\Re_wind_v3\savnw_REwind.dyr"""
25 | output_name = r"""output1"""
26 | psspy.case(sav_name)
27 | psspy.fdns([0,0,0,1,1,3,99,0])
28 | psspy.cong(0)
29 | psspy.conl(0,1,1,[0,0],[ 100.0,0.0,0.0, 100.0])
30 | psspy.conl(0,1,2,[0,0],[ 100.0,0.0,0.0, 100.0])
31 | psspy.conl(0,1,3,[0,0],[ 100.0,0.0,0.0, 100.0])
32 | psspy.save(r"""C:\Users\Tong Huang\Desktop\FOL\WECC_ORIGINAL\Re_wind_v3\savnw_wind_scale_down_cnv.sav""")
33 | psspy.dyre_new([1,1,1,1],dyn_name,"","","")
34 | # add channel
35 | psspy.chsb(0,1,[-1,-1,-1,1,14,0])
36 | psspy.snap([386,170,103,70,46],r"""C:\Users\Tong Huang\Desktop\FOL\WECC_ORIGINAL\Re_wind_v3\savnw_REwind.snp""")
37 | # initialization
38 | Initialization(output_name)
--------------------------------------------------------------------------------
/Code/Joint Simulation/case_T/Re_wind_v3/output1.out:
--------------------------------------------------------------------------------
1 | FuP_pHySPCD% 8B @VOLT 101 [NUC-A 21.600]ANGL 101 [NUC-A 21.600]VOLT 102 [NUC-B 21.600]ANGL 102 [NUC-B 21.600]VOLT 151 [NUCPANT 500.00]ANGL 151 [NUCPANT 500.00]VOLT 152 [MID500 500.00]ANGL 152 [MID500 500.00]VOLT 153 [MID230 230.00]ANGL 153 [MID230 230.00]VOLT 154 [DOWNTN 230.00]ANGL 154 [DOWNTN 230.00]VOLT 201 [HYDRO 500.00]ANGL 201 [HYDRO 500.00]VOLT 202 [EAST500 500.00]ANGL 202 [EAST500 500.00]VOLT 203 [EAST230 230.00]ANGL 203 [EAST230 230.00]VOLT 204 [SUB500 500.00]ANGL 204 [SUB500 500.00]VOLT 205 [SUB230 230.00]ANGL 205 [SUB230 230.00]VOLT 206 [URBGEN 18.000]ANGL 206 [URBGEN 18.000]VOLT 211 [HYDRO_G 20.000]ANGL 211 [HYDRO_G 20.000]VOLT 3001 [MINE 230.00]ANGL 3001 [MINE 230.00]VOLT 3002 [E. MINE 500.00]ANGL 3002 [E. MINE 500.00]VOLT 3003 [S. MINE 230.00]ANGL 3003 [S. MINE 230.00]VOLT 3004 [WEST 500.00]ANGL 3004 [WEST 500.00]VOLT 3005 [WEST 230.00]ANGL 3005 [WEST 230.00]VOLT 3006 [UPTOWN 230.00]ANGL 3006 [UPTOWN 230.00]VOLT 3007 [RURAL 230.00]ANGL 3007 [RURAL 230.00]VOLT 3008 [CATDOG 230.00]ANGL 3008 [CATDOG 230.00]VOLT 3011 [MINE_G 13.800]ANGL 3011 [MINE_G 13.800]VOLT 3018 [CATDOG_G 13.800]ANGL 3018 [CATDOG_G 13.800]PSS(R)E PROGRAM APPLICATION GUIDE EXAMPLE BASE CASE INCLUDING SEQUENCE DATA <�
--------------------------------------------------------------------------------
/Code/Joint Simulation/case_T/Re_wind_v3/output1.outx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tamu-engineering-research/Open-source-power-dataset/7b3644d75fd493b82f4129e6b01f36cd91e26dd4/Code/Joint Simulation/case_T/Re_wind_v3/output1.outx
--------------------------------------------------------------------------------
/Code/Joint Simulation/case_T/Re_wind_v3/savnw.sld:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tamu-engineering-research/Open-source-power-dataset/7b3644d75fd493b82f4129e6b01f36cd91e26dd4/Code/Joint Simulation/case_T/Re_wind_v3/savnw.sld
--------------------------------------------------------------------------------
/Code/Joint Simulation/case_T/Re_wind_v3/savnw_REwind.snp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tamu-engineering-research/Open-source-power-dataset/7b3644d75fd493b82f4129e6b01f36cd91e26dd4/Code/Joint Simulation/case_T/Re_wind_v3/savnw_REwind.snp
--------------------------------------------------------------------------------
/Code/Joint Simulation/case_T/Re_wind_v3/savnw_wind.sav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tamu-engineering-research/Open-source-power-dataset/7b3644d75fd493b82f4129e6b01f36cd91e26dd4/Code/Joint Simulation/case_T/Re_wind_v3/savnw_wind.sav
--------------------------------------------------------------------------------
/Code/Joint Simulation/case_T/Re_wind_v3/savnw_wind_scale_down.out:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tamu-engineering-research/Open-source-power-dataset/7b3644d75fd493b82f4129e6b01f36cd91e26dd4/Code/Joint Simulation/case_T/Re_wind_v3/savnw_wind_scale_down.out
--------------------------------------------------------------------------------
/Code/Joint Simulation/case_T/Re_wind_v3/savnw_wind_scale_down.sav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tamu-engineering-research/Open-source-power-dataset/7b3644d75fd493b82f4129e6b01f36cd91e26dd4/Code/Joint Simulation/case_T/Re_wind_v3/savnw_wind_scale_down.sav
--------------------------------------------------------------------------------
/Code/Joint Simulation/case_T/Re_wind_v3/savnw_wind_scale_down_cnv.sav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tamu-engineering-research/Open-source-power-dataset/7b3644d75fd493b82f4129e6b01f36cd91e26dd4/Code/Joint Simulation/case_T/Re_wind_v3/savnw_wind_scale_down_cnv.sav
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/__pycache__/cosim.cpython-34.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tamu-engineering-research/Open-source-power-dataset/7b3644d75fd493b82f4129e6b01f36cd91e26dd4/Code/Joint Simulation/code/__pycache__/cosim.cpython-34.pyc
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/__pycache__/cosim.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tamu-engineering-research/Open-source-power-dataset/7b3644d75fd493b82f4129e6b01f36cd91e26dd4/Code/Joint Simulation/code/__pycache__/cosim.cpython-39.pyc
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/__pycache__/solar_inverter.cpython-34.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tamu-engineering-research/Open-source-power-dataset/7b3644d75fd493b82f4129e6b01f36cd91e26dd4/Code/Joint Simulation/code/__pycache__/solar_inverter.cpython-34.pyc
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/__pycache__/utils.cpython-34.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tamu-engineering-research/Open-source-power-dataset/7b3644d75fd493b82f4129e6b01f36cd91e26dd4/Code/Joint Simulation/code/__pycache__/utils.cpython-34.pyc
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/__pycache__/utils.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tamu-engineering-research/Open-source-power-dataset/7b3644d75fd493b82f4129e6b01f36cd91e26dd4/Code/Joint Simulation/code/__pycache__/utils.cpython-39.pyc
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/create_metadata.py:
--------------------------------------------------------------------------------
1 | import json
2 | from datetime import datetime
3 | now = datetime.now()
4 |
5 | meta = {}
6 | meta['title'] = 'Multi-Timescale Power System Dataset'
7 | meta['description'] = ''
8 | meta['subject'] = 'Power Systems, Dynamic Simulation, Multi-Timescale'
9 | meta['date'] = now.strftime("%d/%m/%Y %H:%M:%S")
10 | meta['publisher'] = 'Texas A&M Univerisity, University of Southern California'
11 | meta['contactPoint'] = 'le.xie@tamu.edu'
12 | meta['creator'] = 'Dongqi Wu, Xiangtian Zheng, Tong Huang, Loc Trinh, Nan Xu, Sivaranjani Seetharaman, Le Xie and Yan Liu'
13 | meta['format'] = '.csv'
14 | meta['type'] = 'open-source power system dataset for machine learning'
15 | meta['contributor'] = 'Dongqi Wu, Xiangtian Zheng, Tong Huang, Loc Trinh, Nan Xu, Sivaranjani Seetharaman, Le Xie and Yan Liu'
16 | meta['identifier'] = 'TBD'
17 | meta['source'] = 'TBD'
18 | meta['language'] = 'Python'
19 | meta['relation'] = 'TBD'
20 | meta['rights'] = 'TBD'
21 |
22 | with open('.zenodo.json', 'w') as out:
23 | json.dump(meta, out, indent=4)
24 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/idle32.bat -.lnk:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tamu-engineering-research/Open-source-power-dataset/7b3644d75fd493b82f4129e6b01f36cd91e26dd4/Code/Joint Simulation/code/idle32.bat -.lnk
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/inverter/CurCtr_alg.m:
--------------------------------------------------------------------------------
1 | function [v_id_star, v_iq_star] = CurCtr_alg(para, w_n, i_ld, i_lq, ...
2 | i_ld_star, i_lq_star, gamma_d, gamma_q)
3 | %Algebraic equations for current controllor
4 |
5 | K_pc = para.K_pc;
6 | K_ic = para.K_ic;
7 | L_f = para.L_f;
8 |
9 | v_id_star = -w_n*L_f*i_lq + K_pc*(i_ld_star - i_ld) + K_ic*gamma_d;
10 | v_iq_star = w_n*L_f*i_ld + K_pc*(i_lq_star - i_lq) + K_ic*gamma_q;
11 |
12 | end
13 |
14 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/inverter/CurCtr_diff.m:
--------------------------------------------------------------------------------
1 | function [gamma_d, gamma_q] = CurCtr_diff(i_ld_star,i_lq_star, i_ld, i_lq, dt, gamma_d0, gamma_q0)
2 | %This function updates the state variables in the current controllors
3 | % dt: time step
4 | % gamma_d0, gamma_q0: state variables in the last step
5 | % Euler approach is applied
6 | dgamma_d = i_ld_star - i_ld;
7 | dgamma_q = i_lq_star - i_lq;
8 | gamma_d = gamma_d0 + dgamma_d*dt;
9 | gamma_q = gamma_q0 + dgamma_q*dt;
10 | end
11 |
12 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/inverter/Dyn_Angle_Droop.m:
--------------------------------------------------------------------------------
1 | function [ddelta, dv_od_star] = Dyn_Angle_Droop(delta0, v_od_star0, P_star, Q_star, P0, Q0, Ta, Da, Tv, Dv, V_star, delta_star)
2 | %This function defines the dynamics of the frequency droop controller
3 | % w0, and v_od_star0 are the last step state variables
4 | ddelta = 1/Ta*(Da*(P_star - P0) - delta0 + delta_star);
5 | dv_od_star = 1/Tv*(Dv*(Q_star - Q0) - v_od_star0 + V_star);
6 | end
7 |
8 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/inverter/Dyn_Angle_Droop_Simple.m:
--------------------------------------------------------------------------------
1 | function [ddelta, dv] = Dyn_Angle_Droop_Simple(delta0, v0, P, Q, ...
2 | delta_star, V_star, P_star, Q_star,...
3 | Ta, Da, Tv, Dv)
4 | %Differential equation for the angle droop control
5 | ddelta = 1/Ta*(Da*(P_star-P)-(delta0-delta_star));
6 | dv = 1/Tv*(Dv*(Q_star - Q)-(v0 - V_star));
7 | end
8 |
9 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/inverter/Dyn_Freq_Droop.m:
--------------------------------------------------------------------------------
1 | function [ddelta, dw, dv_od_star] = Dyn_Freq_Droop(w0, v_od_star0, P_star, Q_star, P0, Q0, Tf, Df, Tv, Dv, w_n, V_star)
2 | %This function defines the dynamics of the frequency droop controller
3 | % w0, and v_od_star0 are the last step state variables
4 | ddelta = w0 - w_n;
5 | dw = 1/Tf*(-Df*(w0 - w_n) + P_star - P0);
6 | dv_od_star = 1/Tv*(Dv*(Q_star - Q0) - v_od_star0 + V_star);
7 | end
8 |
9 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/inverter/Dyn_Freq_Droop_Simple.m:
--------------------------------------------------------------------------------
1 | function [ddelta, dw, dv] = Dyn_Freq_Droop_Simple(w0, v, P_star, Q_star, P0, Q0, Tf, Df, Tv, Dv, w_n, V_star)
2 | %This function defines the dynamics of the frequency droop controller
3 | % w0, and v_od_star0 are the last step state variables
4 | ddelta = w0 - w_n;
5 | dw = 1/Tf*(-Df*(w0 - w_n) + P_star - P0);
6 | dv = 1/Tv*(Dv*(Q_star - Q0) - v + V_star);
7 | end
8 |
9 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/inverter/Dynamics_CurCtr.m:
--------------------------------------------------------------------------------
1 | function [dgamma_d, dgamma_q] = Dynamics_CurCtr(i_ld_star,i_lq_star, i_ld, i_lq)
2 | % gamma_d0, gamma_q0: state variables in the last step
3 | % Euler approach is applied
4 | dgamma_d = i_ld_star - i_ld;
5 | dgamma_q = i_lq_star - i_lq;
6 | end
7 |
8 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/inverter/Dynamics_LC_Filter.m:
--------------------------------------------------------------------------------
1 | function [di_ld, di_lq, dv_od, dv_oq] = Dynamics_LC_Filter(para_LC, i_ld0, i_lq0, v_od0, v_oq0, v_id0, v_iq0, i_od0, i_oq0, w0)
2 | %The function gives one-step update of the lc filter
3 | % para_LC includes the parameters of the LC filter, e.g., para_LC.r_f
4 | % the variables ending with 0 are the current step value
5 | % i_ld, i_lq, v_od, v_oq, i_od, i_oq are next-step update
6 | % dt: time step
7 | di_ld = -para_LC.r_f/para_LC.L_f*i_ld0 + w0 * i_lq0 + 1/para_LC.L_f*(v_id0 - v_od0);
8 | di_lq = -para_LC.r_f/para_LC.L_f*i_lq0 - w0 * i_ld0 + 1/para_LC.L_f*(v_iq0 - v_oq0);
9 | dv_od = w0*v_oq0 + 1/para_LC.C_f*(i_ld0 -i_od0);
10 | dv_oq = -w0*v_od0 + 1/para_LC.C_f*(i_lq0 -i_oq0);
11 | end
12 |
13 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/inverter/Dynamics_PowSensor.m:
--------------------------------------------------------------------------------
1 | function [dP,dQ] = Dynamics_PowSensor(p_t, q_t, P0, Q0, w_c)
2 | %Power sensor dynamics
3 | % pt, qt: instantanous power
4 | % P0, Q0: last step P and Q
5 | dP = -w_c*P0 + w_c*p_t;
6 | dQ = -w_c*Q0 + w_c*q_t;
7 | end
8 |
9 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/inverter/LC_Filter.m:
--------------------------------------------------------------------------------
1 | function [i_ld, i_lq, v_od, v_oq] = LC_Filter(para_LC, i_ld0, i_lq0, v_od0, v_oq0, v_id0, v_iq0, i_od0, i_oq0, w0,dt)
2 | %The function models the dynamics of a LC filter
3 | % para_LC includes the parameters of the LC filter, e.g., para_LC.r_f
4 | % the variables ending with 0 are the current step value
5 | % i_ld, i_lq, v_od, v_oq, i_od, i_oq are next-step update
6 | % dt: time step
7 | di_ld = -para_LC.r_f/para_LC.L_f*i_ld0 + w0 * i_lq0 + 1/para_LC.L_f*(v_id0 - v_od0);
8 | di_lq = -para_LC.r_f/para_LC.L_f*i_lq0 - w0 * i_ld0 + 1/para_LC.L_f*(v_iq0 - v_oq0);
9 | dv_od = w0*v_oq0 + 1/para_LC.C_f*(i_ld0 -i_od0);
10 | dv_oq = -w0*v_od0 + 1/para_LC.C_f*(i_lq0 -i_oq0);
11 | %di_od = -para_LC.r_c/para_LC.L_c*i_od0 + w0 * i_oq0 + 1/para_LC.L_c(v_od0 - v_bd0);
12 | %di_oq = -para_LC.r_c/para_LC.L_c*i_oq0 - w0 * i_od0 + 1/para_LC.L_c(v_oq0 - v_bq0);
13 | i_ld = i_ld0 + dt* di_ld;
14 | i_lq = i_lq0 + dt* di_lq;
15 | v_od = v_od0 + dt* dv_od;
16 | v_oq = v_oq0 + dt* dv_oq;
17 | %i_od = i_od0 + dt* di_od;
18 | %i_oq = i_oq0 + dt* di_oq;
19 | end
20 |
21 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/inverter/LC_Filter_RK4.m:
--------------------------------------------------------------------------------
1 | function [i_ld, i_lq, v_od, v_oq] = LC_Filter_RK4(para_LC, i_ld0, i_lq0, v_od0, v_oq0, v_id0, v_iq0, i_od0, i_oq0, w0,dt)
2 | %The function models the dynamics of a LC filter
3 | % para_LC includes the parameters of the LC filter, e.g., para_LC.r_f
4 | % the variables ending with 0 are the current step value
5 | % i_ld, i_lq, v_od, v_oq, i_od, i_oq are next-step update
6 | % dt: time step
7 | x0 = [i_ld0; i_lq0; v_od0; v_oq0];
8 | [di_ld, di_lq, dv_od, dv_oq] = Dynamics_LC_Filter(para_LC, i_ld0, i_lq0, v_od0, v_oq0, v_id0, v_iq0, i_od0, i_oq0, w0);
9 | k1 = dt*[di_ld; di_lq; dv_od; dv_oq];
10 | x1 = x0 + 0.5*k1;
11 | [di_ld, di_lq, dv_od, dv_oq] = Dynamics_LC_Filter(para_LC, x1(1), x1(2), x1(3), x1(4), v_id0, v_iq0, i_od0, i_oq0, w0);
12 | k2 = dt*[di_ld; di_lq; dv_od; dv_oq];
13 | x2 = x0 + 0.5*k2;
14 | [di_ld, di_lq, dv_od, dv_oq] = Dynamics_LC_Filter(para_LC, x2(1), x2(2), x2(3), x2(4), v_id0, v_iq0, i_od0, i_oq0, w0);
15 | k3 = dt *[di_ld; di_lq; dv_od; dv_oq];
16 | x3 = x0 + k3;
17 | [di_ld, di_lq, dv_od, dv_oq] = Dynamics_LC_Filter(para_LC, x3(1), x3(2), x3(3), x3(4), v_id0, v_iq0, i_od0, i_oq0, w0);
18 | k4 = dt*[di_ld; di_lq; dv_od; dv_oq];
19 | x = x0 + 1/6*(k1 + 2*k2 + 2*k3 +k4);
20 | i_ld = x(1);
21 | i_lq = x(2);
22 | v_od = x(3);
23 | v_oq = x(4);
24 | end
25 |
26 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/inverter/LinePlusInfBus.m:
--------------------------------------------------------------------------------
1 | function [i_d,i_q] = LinePlusInfBus(r, L, w_n, V_infD, ...
2 | V_infQ, v_bd, v_bq, delta)
3 | % Update current from the network via algebraic equations
4 | % ZI = V
5 | % with DQ transform
6 | Z = [r, -w_n*L; +w_n*L, +r];
7 | T = [cos(delta), -sin(delta); sin(delta), cos(delta)]; %dq to DQ
8 | T_inv = [cos(delta), sin(delta); -sin(delta), cos(delta)];
9 | v_DQ = T*[v_bd; v_bq];
10 | v_bD = v_DQ(1);
11 | v_bQ = v_DQ(2);
12 | I = inv(Z)*[v_bD - V_infD; v_bQ - V_infQ];
13 | I_dq = T_inv*I;
14 | i_d = I_dq(1);
15 | i_q = I_dq(2);
16 |
17 | end
18 |
19 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/inverter/Network_Simple.m:
--------------------------------------------------------------------------------
1 | function [P, Q] = Network_Simple(r, L, w_n, V_infD,V_infQ, delta, v)
2 | %Give delta and v, compute power flow
3 | Z = r + 1i*w_n*L;
4 | Y = 1/Z;
5 | G = real(Y);
6 | B = imag(Y);
7 | Y_mag = abs(Y);
8 | sigma = angle(Y);
9 | V = V_infD + 1i*V_infQ;
10 | V_inf_mag = abs(V);
11 | V_inf_ang = angle(V);
12 |
13 | % power flow equations
14 | P = G*v^2 + v*V_inf_mag*(-Y_mag)*cos(delta - V_inf_ang - sigma); % notice the sign: -Y_mag/_sigma is a component of the admittance matrix
15 | Q = -B*v^2 + v*V_inf_mag*(-Y_mag)*sin(delta - V_inf_ang - sigma);
16 | end
17 |
18 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/inverter/PowCtr_Ang_Simple.m:
--------------------------------------------------------------------------------
1 | function [delta,v, w] = PowCtr_Ang_Simple(delta0, v0, P, Q, ...
2 | delta_star, V_star, P_star, Q_star, para,dt, w_n)
3 | %This is a simple version of the angle droop controller; inner loop
4 | %controllers are not modeled in this function.
5 | Ta = para.Ta;
6 | Da = para.Da;
7 | Tv = para.Tv;
8 | Dv = para.Dv;
9 |
10 |
11 | % Update delta and v using RK4
12 | x0 = [delta0; v0];
13 | [ddelta, dv] = Dyn_Angle_Droop_Simple(x0(1), x0(2), P, Q, ...
14 | delta_star, V_star, P_star, Q_star,...
15 | Ta, Da, Tv, Dv);
16 | k1 = dt*[ddelta; dv];
17 | x1 = x0 + 0.5*k1;
18 | [ddelta, dv] = Dyn_Angle_Droop_Simple(x1(1), x1(2), P, Q, ...
19 | delta_star, V_star, P_star, Q_star,...
20 | Ta, Da, Tv, Dv);
21 | k2 = dt*[ddelta; dv];
22 | x2 = x0 + 0.5*k2;
23 | [ddelta, dv] = Dyn_Angle_Droop_Simple(x2(1), x2(2), P, Q, ...
24 | delta_star, V_star, P_star, Q_star,...
25 | Ta, Da, Tv, Dv);
26 | k3 = dt*[ddelta; dv];
27 | x3 = x0 + k3;
28 | [ddelta, dv] = Dyn_Angle_Droop_Simple(x3(1), x3(2), P, Q, ...
29 | delta_star, V_star, P_star, Q_star,...
30 | Ta, Da, Tv, Dv);
31 | k4 = dt*[ddelta; dv];
32 | x = x0 + 1/6*(k1 + 2*k2 + 2*k3 +k4);
33 | delta = x(1);
34 | v = x(2);
35 | [ddelta, ~] = Dyn_Angle_Droop_Simple(delta, v, P, Q, ...
36 | delta_star, V_star, P_star, Q_star,...
37 | Ta, Da, Tv, Dv);
38 | w = w_n + ddelta;
39 | end
40 |
41 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/inverter/PowCtr_Angle_RK4.m:
--------------------------------------------------------------------------------
1 | function [delta, v_od_star, v_oq_star, w, P, Q] = PowCtr_Angle_RK4(v_od,v_oq, i_od, i_oq, para_pc, ...
2 | P0, Q0, delta0, v_od_star0, w_n, P_star, Q_star,...
3 | V_star,dt, delta_star)
4 | %Power Controlle
5 | % variables ending with 0 are last step variables
6 | % w_n: nominal frequency
7 | % v_oq_star == 0;
8 | Ta = para_pc.Ta;
9 | Tv = para_pc.Tv;
10 | Da = para_pc.Da;
11 | Dv = para_pc.Dv;
12 | w_c = para_pc.w_c;
13 |
14 | %% Dynamics of power sensor
15 | p_t = v_od*i_od + v_oq*i_oq;
16 | q_t = -v_od*i_oq + v_oq*i_od;
17 | % Update P and Q using RK4
18 | x0 = [P0; Q0];
19 | [dP,dQ] = Dynamics_PowSensor(p_t, q_t, P0, Q0, w_c);
20 | k1 = dt*[dP; dQ];
21 | x1 = x0 + 0.5*k1;
22 | [dP,dQ] = Dynamics_PowSensor(p_t, q_t, x1(1), x1(2), w_c);
23 | k2 = dt*[dP; dQ];
24 | x2 = x0 + 0.5*k2;
25 | [dP,dQ] = Dynamics_PowSensor(p_t, q_t, x2(1), x2(2), w_c);
26 | k3 = dt*[dP; dQ];
27 | x3 = x0 + k3;
28 | [dP,dQ] = Dynamics_PowSensor(p_t, q_t, x3(1), x3(2), w_c);
29 | k4 = dt*[dP; dQ];
30 | x = x0 + 1/6*(k1 + 2*k2 + 2*k3 +k4);
31 | P = x(1);
32 | Q = x(2);
33 |
34 | %% Angle Droop Control Dynamics
35 | x0 = [delta0; v_od_star0];
36 | [ddelta, dv_od_star] = Dyn_Angle_Droop(delta0, v_od_star0, P_star, Q_star, P0, Q0, Ta, Da, Tv, Dv, V_star, delta_star);
37 | % next step
38 | k1 = dt*[ddelta; dv_od_star];
39 | x1 = x0 + 0.5*k1;
40 | [ddelta, dv_od_star] = Dyn_Angle_Droop(x1(1), x1(2), P_star, Q_star, P0, Q0, Ta, Da, Tv, Dv, V_star, delta_star);
41 | k2 = dt*[ddelta; dv_od_star];
42 | x2 = x0 + 0.5*k2;
43 | [ddelta, dv_od_star] = Dyn_Angle_Droop(x2(1), x2(2), P_star, Q_star, P0, Q0, Ta, Da, Tv, Dv, V_star, delta_star);
44 | k3 = dt*[ddelta; dv_od_star];
45 | x3 = x0 + k2;
46 | [ddelta, dv_od_star] = Dyn_Angle_Droop(x3(1), x3(2), P_star, Q_star, P0, Q0, Ta, Da, Tv, Dv, V_star, delta_star);
47 | k4 = dt*[ddelta; dv_od_star];
48 | x = x0 + 1/6*(k1 + 2*k2 + 2*k3 +k4);
49 | delta = x(1);
50 | v_od_star = x(2);
51 | v_oq_star = 0;
52 | [ddelta, ~] = Dyn_Angle_Droop(delta, v_od_star, P_star, Q_star, P0, Q0, Ta, Da, Tv, Dv, V_star, delta_star);
53 | w = w_n + ddelta;
54 | end
55 |
56 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/inverter/PowCtr_Freq_RK4.m:
--------------------------------------------------------------------------------
1 | function [delta, v_od_star, v_oq_star, w, P, Q] = PowCtr_Freq_RK4(v_od,v_oq, i_od, i_oq, para_pc, ...
2 | P0, Q0, delta0, v_od_star0, w0, w_n, P_star, Q_star,...
3 | V_star,dt)
4 | %Power Controlle
5 | % variables ending with 0 are last step variables
6 | % w_n: nominal frequency
7 | % v_oq_star == 0;
8 | Tf = para_pc.Tf;
9 | Tv = para_pc.Tv;
10 | Df = para_pc.Df;
11 | Dv = para_pc.Dv;
12 | w_c = para_pc.w_c;
13 |
14 | %% Dynamics of power sensor
15 | p_t = v_od*i_od + v_oq*i_oq;
16 | q_t = -v_od*i_oq + v_oq*i_od;
17 | % Update P and Q using RK4
18 | x0 = [P0; Q0];
19 | [dP,dQ] = Dynamics_PowSensor(p_t, q_t, P0, Q0, w_c);
20 | k1 = dt*[dP; dQ];
21 | x1 = x0 + 0.5*k1;
22 | [dP,dQ] = Dynamics_PowSensor(p_t, q_t, x1(1), x1(2), w_c);
23 | k2 = dt*[dP; dQ];
24 | x2 = x0 + 0.5*k2;
25 | [dP,dQ] = Dynamics_PowSensor(p_t, q_t, x2(1), x2(2), w_c);
26 | k3 = dt*[dP; dQ];
27 | x3 = x0 + k3;
28 | [dP,dQ] = Dynamics_PowSensor(p_t, q_t, x3(1), x3(2), w_c);
29 | k4 = dt*[dP; dQ];
30 | x = x0 + 1/6*(k1 + 2*k2 + 2*k3 +k4);
31 | P = x(1);
32 | Q = x(2);
33 |
34 | %% Frequency Droop Control Dynamics
35 | x0 = [delta0; w0; v_od_star0];
36 | [ddelta, dw, dv_od_star] = Dyn_Freq_Droop(w0, v_od_star0, P_star, Q_star, P0, Q0, Tf, Df, Tv, Dv, w_n, V_star);
37 | % next step
38 | k1 = dt*[ddelta; dw; dv_od_star];
39 | x1 = x0 + 0.5*k1;
40 | [ddelta, dw, dv_od_star] = Dyn_Freq_Droop(x1(2), x1(3), P_star, Q_star, P0, Q0, Tf, Df, Tv, Dv, w_n, V_star);
41 | k2 = dt*[ddelta; dw; dv_od_star];
42 | x2 = x0 + 0.5*k2;
43 | [ddelta, dw, dv_od_star] = Dyn_Freq_Droop(x2(2), x2(3), P_star, Q_star, P0, Q0, Tf, Df, Tv, Dv, w_n, V_star);
44 | k3 = dt*[ddelta; dw; dv_od_star];
45 | x3 = x0 + k2;
46 | [ddelta, dw, dv_od_star] = Dyn_Freq_Droop(x3(2), x3(3), P_star, Q_star, P0, Q0, Tf, Df, Tv, Dv, w_n, V_star);
47 | k4 = dt*[ddelta; dw; dv_od_star];
48 | x = x0 + 1/6*(k1 + 2*k2 + 2*k3 +k4);
49 | delta = x(1);
50 | w = x(2);
51 | v_od_star = x(3);
52 | v_oq_star = 0;
53 | end
54 |
55 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/inverter/PowCtr_Freq_Simple.m:
--------------------------------------------------------------------------------
1 | function [delta,w,v] = PowCtr_Freq_Simple(delta0, w0, v0, P0, Q0, ...
2 | V_star, P_star, Q_star, para,dt, w_n)
3 | %This is a simple version of the angle droop controller; inner loop
4 | %controllers are not modeled in this function.
5 | Tf = para.Tf;
6 | Df = para.Df;
7 | Tv = para.Tv;
8 | Dv = para.Dv;
9 |
10 |
11 | % Update delta and v using RK4
12 | x0 = [delta0; w0; v0];
13 | [ddelta, dw, dv] = Dyn_Freq_Droop_Simple( x0(2), x0(3), P_star, Q_star, P0, Q0, Tf, Df, Tv, Dv, w_n, V_star);
14 | k1 = dt*[ddelta; dw; dv];
15 | x1 = x0 + 0.5*k1;
16 | [ddelta, dw, dv] = Dyn_Freq_Droop_Simple(x1(2), x1(3), P_star, Q_star, P0, Q0, Tf, Df, Tv, Dv, w_n, V_star);
17 | k2 = dt*[ddelta; dw; dv];
18 | x2 = x0 + 0.5*k2;
19 | [ddelta, dw, dv] = Dyn_Freq_Droop_Simple(x2(2), x2(3), P_star, Q_star, P0, Q0, Tf, Df, Tv, Dv, w_n, V_star);
20 | k3 = dt*[ddelta;dw; dv];
21 | x3 = x0 + k3;
22 | [ddelta, dw, dv] = Dyn_Freq_Droop_Simple(x3(2), x3(3), P_star, Q_star, P0, Q0, Tf, Df, Tv, Dv, w_n, V_star);
23 | k4 = dt*[ddelta; dw; dv];
24 | x = x0 + 1/6*(k1 + 2*k2 + 2*k3 +k4);
25 | delta = x(1);
26 | w = x(2);
27 | v = x(3);
28 | end
29 |
30 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/inverter/Test1_InitCdt_v2.m:
--------------------------------------------------------------------------------
1 | %% Test 1: Investigating parameters and initial conditions
2 | Sb = 10e3; % base rating VA;
3 | Vb = 220*sqrt(3); % voltage base
4 | f = 50; % 50 Hz system
5 | w_n = 2*pi*f;
6 | r_line1 = 0.35; % Ohm
7 | L_line1 = 0.58/w_n; % H
8 | % include the coupling impedance to the line parameters
9 | Lc = 0.35e-3; % H
10 | r_Lc = 0.03;
11 | r_line = r_line1 + r_Lc;
12 | L_line = L_line1 + Lc;
13 |
14 | %% Parameters for LC filter
15 | para.L_f = 1.35e-3;
16 | para.C_f = 50e-6;
17 | para.r_f = 0.1;
18 |
19 | %% parameters for power, voltage, & current controllers
20 | para.w_c = 31.41;
21 | para.K_pv = 0.05;
22 | para.K_iv = 390;
23 | para.K_pc = 10.5;
24 | para.K_ic = 16e3;
25 | para.F = 0.75;
26 | para.Tf = 1.5*Sb; % freq droop controller: converting to the actual value (P and V is in p.u., while w is rad/s)
27 | para.Ta = 1; % angle droop controller: no need to convert to the actual value
28 | para.Tv = 10;
29 | para.Df = 1.5*Sb; % previous value 0.8
30 | para.Da = 0.2/Sb;
31 | para.Dv = 0.2/7*Vb/Sb;
32 |
33 | %% Initial conditions dispatched by ISO
34 | V_infd = 379; % V
35 | V_infq = -20; % V
36 | v_od0 = 381.8;
37 | v_oq0 = 0;
38 |
39 | %% Initialization
40 | % Power Controller
41 | delta0 = 0;
42 |
43 | [i_od0,i_oq0] = LinePlusInfBus(r_line, L_line, w_n, V_infd, ...
44 | V_infq, v_od0, v_oq0,delta0);
45 | P0 = v_od0*i_od0 + v_oq0*i_oq0;
46 | Q0 = -v_od0*i_oq0 + v_oq0*i_od0;
47 | P_star = P0;
48 | Q_star = Q0;
49 | V_star = v_od0;
50 | w0 = w_n;
51 |
52 |
53 |
54 | % LC filter
55 | i_ld0 = -w_n*para.C_f*v_oq0 + i_od0;
56 | i_lq0 = w_n*para.C_f*v_od0 + i_oq0;
57 | V_idq0 = [para.r_f, -w_n*para.L_f; w_n*para.L_f, para.r_f]*[i_ld0; i_lq0] + [v_od0; v_oq0];
58 | v_id0 = V_idq0(1);
59 | v_iq0 = V_idq0(2);
60 | v_id0_star = v_id0;
61 | v_iq0_star = v_iq0;
62 |
63 | % current controller
64 | gamma_d0 = 1/para.K_ic*(v_id0 + w_n*para.L_f*i_lq0);
65 | gamma_q0 = 1/para.K_ic*(v_iq0 - w_n*para.L_f*i_ld0);
66 |
67 | % voltage controller
68 | phi_d0 = 1/para.K_iv*(i_ld0 - para.F*i_od0 + w_n*para.C_f*v_oq0);
69 | phi_q0 = 1/para.K_iv*(i_lq0 - para.F*i_oq0 - w_n*para.C_f*v_od0);
70 |
71 |
72 | save('Test1_IntCdt.mat', 'P0','Q0','delta0', 'w0','phi_d0', 'phi_q0', 'gamma_d0', 'gamma_q0',...
73 | 'v_od0', 'v_oq0','i_od0', 'i_oq0', 'i_ld0','i_lq0','P_star','Q_star', 'V_star', 'v_id0','v_iq0');
74 | save('Test1_Para.mat','para','w_n','r_line', 'L_line');
75 |
76 |
77 |
78 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/inverter/Test1_IntCdt.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tamu-engineering-research/Open-source-power-dataset/7b3644d75fd493b82f4129e6b01f36cd91e26dd4/Code/Joint Simulation/code/inverter/Test1_IntCdt.mat
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/inverter/Test1_Para.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tamu-engineering-research/Open-source-power-dataset/7b3644d75fd493b82f4129e6b01f36cd91e26dd4/Code/Joint Simulation/code/inverter/Test1_Para.mat
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/inverter/Test5_CreateFault.m:
--------------------------------------------------------------------------------
1 | %% Test 5: This experiment introduces a three-phase to ground fault
2 | clear;
3 | load Test1_IntCdt.mat;
4 | load Test1_Para.mat;
5 | dt = 0.00001;
6 | t_end = 10; % simulation time
7 | t = 0:dt:t_end;
8 | m = length(t);
9 |
10 | %% Create a 3-phi to ground fault
11 | V_infd = 379*ones(m,1); % V
12 | V_infq = -20*ones(m,1); % V
13 | t_fault_start = 1; % fault happens at 5 second
14 | t_dur = 2000*dt*3; % 3 cycle
15 | fault_seg = V_infd(t>=t_fault_start & t<=(t_fault_start+t_dur));
16 | V_infd(t>=t_fault_start & t<=(t_fault_start+t_dur)) = zeros(length(fault_seg),1);
17 | V_infq(t>=t_fault_start & t<=(t_fault_start+t_dur)) = zeros(length(fault_seg),1);
18 | %% create array to store data
19 | delta = zeros(m,1); delta(1) = delta0;
20 | w = zeros(m,1); w(1) = w0;
21 | P = zeros(m,1); Q = zeros(m,1);
22 | P(1) = P0; Q(1) = Q0;
23 | phi_d = zeros(m,1); phi_q = zeros(m,1);
24 | phi_d(1) = phi_d0; phi_q(1) = phi_q0;
25 | i_ld_star = zeros(m,1); i_lq_star = zeros(m,1);
26 | i_ld_star(1) = i_ld0; i_lq_star(1) = i_lq0;
27 | gamma_d = zeros(m,1); gamma_q = zeros(m,1);
28 | gamma_d(1) = gamma_d0; gamma_q(1) = gamma_q0;
29 | v_id = zeros(m,1); v_iq = zeros(m,1);
30 | v_id(1) = v_id0; v_iq(1) = v_iq0;
31 | i_ld = zeros(m,1); i_lq = zeros(m,1);
32 | i_ld(1) = i_ld0; i_lq(1) = i_lq0;
33 | v_od = zeros(m,1); v_oq = zeros(m,1);
34 | v_od(1) = v_od0; v_oq(1) = v_oq0;
35 | v_od_star = v_od; v_oq_star = v_od;
36 | i_od = zeros(m,1); i_oq = zeros(m,1);
37 | i_od(1) = i_od0; i_oq(1) = i_oq0;
38 | %% do simulation
39 | seg_num = 20; % every 5% iteration, output an indicator
40 | seg_k = floor(m/seg_num);
41 | for k = 2:m
42 | %% Next-step prediction using differential equation
43 | % Power Controler
44 | [delta(k), v_od_star(k), v_oq_star(k), w(k), P(k), Q(k)] = PowCtr_Freq_RK4(v_od(k-1),v_oq(k-1), i_od(k-1), i_oq(k-1), para, ...
45 | P(k-1), Q(k-1), delta(k-1), v_od_star(k-1), w(k-1), w_n, P_star, Q_star,...
46 | V_star,dt);
47 | % Voltage Controler
48 | [phi_d(k), phi_q(k)] = VolCtr_diff(v_od_star(k-1),v_oq_star(k), v_od(k-1), v_oq(k-1), dt, phi_d(k-1), phi_q(k-1));
49 | % Current Controler
50 | [gamma_d(k), gamma_q(k)] = CurCtr_diff(i_ld_star(k-1),i_lq_star(k-1), i_ld(k-1), i_lq(k-1), dt, gamma_d(k-1), gamma_q(k-1));
51 | % LC filter
52 | [i_ld(k), i_lq(k), v_od(k), v_oq(k)] = LC_Filter_RK4(para, i_ld(k-1), i_lq(k-1), v_od(k-1), v_oq(k-1), v_id(k-1), v_iq(k-1), i_od(k-1), i_oq(k-1),w(k-1),dt);
53 | %% Update algebraic variables using network equations
54 | [i_od(k),i_oq(k)] = LinePlusInfBus(r_line, L_line, w_n, V_infd(k), ...
55 | V_infq(k), v_od(k), v_oq(k), delta(k));
56 | [i_ld_star(k), i_lq_star(k)] = VolCtr_alg(para, i_od(k), i_oq(k), v_od(k), ...
57 | v_oq(k), v_od_star(k), v_oq_star(k), phi_d(k), phi_q(k),w_n);
58 | [v_id(k), v_iq(k)] = CurCtr_alg(para, w_n, i_ld(k), i_lq(k), ...
59 | i_ld_star(k), i_lq_star(k), gamma_d(k), gamma_q(k));
60 | %% Print an indicator suggesting the simulation progress
61 | if rem(k, seg_k) ==0
62 | fprintf('Current Progess %d percent\n', k/seg_k*5);
63 | end
64 | end
65 | figure;
66 | plot(t, i_ld,'LineWidth',1);
67 | xlabel('time (sec)');
68 | ylabel('i_ld');
69 | grid on;
70 | figure;
71 | plot(t, i_lq,'LineWidth',1);
72 | xlabel('time (sec)');
73 | ylabel('i_lq');
74 | grid on;
75 |
76 | figure;
77 | plot(t, v_od,'LineWidth',1);
78 | xlabel('time (sec)');
79 | ylabel('v_od');
80 | grid on;
81 | figure;
82 | plot(t, v_oq,'LineWidth',1);
83 | xlabel('time (sec)');
84 | ylabel('v_oq');
85 | grid on;
86 |
87 | figure;
88 | plot(t, P,'LineWidth',1);
89 | xlabel('time (sec)');
90 | ylabel('P');
91 | grid on;
92 | figure;
93 | plot(t, Q,'LineWidth',1);
94 | xlabel('time (sec)');
95 | ylabel('Q');
96 | grid on;
97 |
98 | figure;
99 | plot(t, delta,'LineWidth',1);
100 | xlabel('time (sec)');
101 | ylabel('delta');
102 | grid on;
103 | figure;
104 | plot(t, w,'LineWidth',1);
105 | xlabel('time (sec)');
106 | ylabel('w');
107 | grid on;
108 |
109 | save('Test5_DetailedModel_Freq.mat', 'w', 'delta', 'P','Q','i_ld','i_lq','v_od','v_oq','t');
110 |
111 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/inverter/VolCtr_alg.m:
--------------------------------------------------------------------------------
1 | function [i_ld_star, i_lq_star] = VolCtr_alg(para_vc, i_od, i_oq, v_od, ...
2 | v_oq, v_od_star, v_oq_star, phi_d, phi_q,w_n)
3 | %Algbraic equations for the voltage controler
4 | % Given all state variables updates, obtain the output variables
5 | F = para_vc.F;
6 | C_f = para_vc.C_f;
7 | K_pv = para_vc.K_pv;
8 | K_iv = para_vc.K_iv;
9 | i_ld_star = F*i_od - w_n*C_f*v_oq + K_pv*(v_od_star - v_od) + K_iv*phi_d;
10 | i_lq_star = F*i_oq + w_n*C_f*v_od + K_pv*(v_oq_star - v_oq) + K_iv*phi_q;
11 | end
12 |
13 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/inverter/VolCtr_diff.m:
--------------------------------------------------------------------------------
1 | function [phi_d, phi_q] = VolCtr_diff(v_od_star,v_oq_star, v_od0, v_oq0, dt, phi_d0, phi_q0)
2 | %This function updates the state variables in the voltage controllors
3 | % dt: time step
4 | % phi_d0, phi_q0: state variables in the last step
5 | % Euler approach is applied
6 | dphi_d = v_od_star - v_od0;
7 | dphi_q = v_oq_star - v_oq0;
8 | phi_d = phi_d0 + dt* dphi_d;
9 | phi_q = phi_q0 + dt* dphi_q;
10 | end
11 |
12 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/main.py:
--------------------------------------------------------------------------------
1 | from cosim import cosim, simdata
2 | import psspy as ps
3 | #case_T = r'C:\Users\Dongqi Wu\OneDrive\Work\USC\case_T\PSSE23_wind\savnw_wind_tuned.raw'
4 | case_T = r'C:\Users\Dongqi Wu\OneDrive\Work\USC\case_T\Re_wind_v3\savnw_wind_scale_down.raw'
5 | #case_T = r'C:\Users\Dongqi Wu\OneDrive\Work\USC\case_T\PSSE\IEEE_39_bus.raw'
6 | case_D13 = r'C:\Users\Dongqi Wu\OneDrive\Work\USC\case_D\13Bus\IEEE13Nodeckt_scaled.dss'
7 | data_path = r'C:\Users\Dongqi Wu\OneDrive\Work\USC\data\psse_data_test.csv'
8 |
9 | data = simdata(data_path)
10 | env = cosim(case_T, [case_D13, case_D13], [3005, 3008], data)
11 | print("env created")
12 | #env.solve_ss(1)
13 | trans,dist,info = env.dynsim(1, 800)
14 | print(info)
15 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/one.py:
--------------------------------------------------------------------------------
1 | from cosim import cosim, simdata
2 | import psspy as ps
3 | import sys, os
4 | import gc
5 | import csv
6 |
7 | ep = int(sys.argv[1])
8 |
9 |
10 | #case_T = r'C:\Users\Dongqi Wu\OneDrive\Work\USC\case_T\PSSE23_wind\savnw_wind_tuned.raw'
11 | case_T = r'C:\Users\Dongqi Wu\OneDrive\Work\USC\case_T\Re_wind_v3\savnw_wind_scale_down.raw'
12 | #case_T = r'C:\Users\Dongqi Wu\OneDrive\Work\USC\case_T\PSSE\IEEE_39_bus.raw'
13 | case_D13 = r'C:\Users\Dongqi Wu\OneDrive\Work\USC\case_D\13Bus\IEEE13Nodeckt_scaled.dss'
14 | data_path = r'C:\Users\Dongqi Wu\OneDrive\Work\USC\data\psse_data_out.csv'
15 | out_path = r'C:\Users\Dongqi Wu\OneDrive\Work\USC\output_fig'
16 |
17 | #env.solve_ss(1)
18 | sec_num = 4
19 | data = simdata(data_path)
20 |
21 | env = cosim(case_T, [case_D13, case_D13], [3005, 3008], data)
22 | step_num = int(sec_num / env.dyn_ts)
23 | # find a row in the data file
24 | row_idx = ep
25 | # simulate
26 | trans_ts, dist_ts, info = env.dynsim(row_idx, step_num)
27 |
28 | # create path
29 | epi_dir = out_path + '\\row_' + str(row_idx)
30 | if not os.path.exists(epi_dir):
31 | os.mkdir(epi_dir)
32 | trans_out_path = epi_dir + '\\trans.csv'
33 | dist_out_path = epi_dir + '\\dist.csv'
34 | info_out_path = epi_dir + '\\info.csv'
35 |
36 |
37 | ## trans
38 | keys, vals = [], []
39 | for key, val in trans_ts.items():
40 | keys.append(key)
41 | vals.append(val)
42 | keys_num = len(keys)
43 | # process header and value array
44 | with open(trans_out_path, 'w') as fh:
45 | # header
46 | line = ''
47 | line += keys[-1]
48 | for k in range(keys_num-1):
49 | line += ', '
50 | line += keys[k]
51 | line += '\n'
52 | fh.write(line)
53 | # data
54 | for t in range(step_num):
55 | line = ''
56 | line += str(vals[-1][t])
57 | for k in range(keys_num-1):
58 | line += ', '
59 | line += str(vals[k][t])
60 | line += '\n'
61 | fh.write(line)
62 | del keys
63 | del val
64 | gc.collect()
65 |
66 | ## dist
67 | keys, vals = [], []
68 | for key, val in dist_ts.items():
69 | keys.append(key)
70 | vals.append(val)
71 | keys_num = len(keys)
72 | # process header and value array
73 | with open(dist_out_path, 'w') as fh:
74 | # header
75 | line = ''
76 | for k in range(keys_num-1):
77 | line += keys[k]
78 | line += ', '
79 | line += keys[keys_num-1]
80 | line += '\n'
81 | fh.write(line)
82 | # data
83 | for t in range(step_num):
84 | line = ''
85 | for k in range(keys_num-1):
86 | line += str(vals[k][t])
87 | line += ', '
88 | line += str(vals[-1][t])
89 | line += '\n'
90 | fh.write(line)
91 | del keys
92 | del val
93 | del env
94 | gc.collect()
95 |
96 | # info
97 | with open(info_out_path, 'w') as fh:
98 | for key, val in info.items():
99 | fh.write(key +', '+ str(val) + '\n')
100 |
101 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/pvmodel/test_inf_bus.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import numpy as np
3 | from numpy.linalg import inv
4 | from solar_inverter import solar_inverter
5 |
6 | def inf_bus_simu(V_inf_D, V_inf_Q, V_mag, V_ang):
7 | # V_inf_D = 379
8 | # V_inf_Q = -20
9 | w_n = 50*2*np.pi
10 | r = 0.35+0.03
11 | L = 0.58/w_n+0.35e-3
12 | Z = np.array([[r, -w_n*L],[w_n*L, r]])
13 | Z_inv = inv(Z)
14 | V_D = V_mag*np.cos(V_ang)
15 | V_Q = V_mag*np.sin(V_ang)
16 | I_D = Z_inv[0,0]*(V_D - V_inf_D) + Z_inv[0,1]*(V_Q - V_inf_Q)
17 | I_Q = Z_inv[1,0]*(V_D - V_inf_D) + Z_inv[1,1]*(V_Q - V_inf_Q)
18 | I_mag = np.sqrt(I_D**2+I_Q**2)
19 | I_ang = np.angle(I_D + 1j *I_Q)
20 | return I_mag, I_ang
21 |
22 | # parameter setting of solar inverter
23 | para = {}
24 | para['dt'] = 0.0001 #### simulation step size
25 | para['Sb'] = 10e3 #### nominal power
26 | para['Vb'] = 220*np.sqrt(3)#### nominal voltage
27 | para['f'] = 50
28 | para['w_n'] = para['f']*2*np.pi
29 | para['L_f'] =1.35e-3
30 | para['C_f'] =50e-6
31 | para['r_f'] =0.1
32 | para['w_c'] = 31.41
33 | para['K_pv'] = 0.05
34 | para['K_iv'] = 390
35 | para['K_pc'] = 10.5
36 | para['K_ic'] = 16e3
37 | para['F'] = 0.75
38 | para['Tf'] = 1.5*para['Sb']
39 | para['Ta'] = 1
40 | para['Tv'] = 10
41 | para['Df'] = 1.5*para['Sb']
42 | para['Da'] = 0.2/para['Sb']
43 | para['Dv'] = 0.2/7*para['Vb']/para['Sb']
44 |
45 | # simulation first to get initial V and I
46 | # P = XXX #################
47 | # Q = XXX #################
48 | V_mag = 381.8
49 | V_ang = 0.1
50 | V_inf_D = 379
51 | V_inf_Q = -20
52 | I_mag, I_ang = inf_bus_simu(V_inf_D, V_inf_Q, V_mag, V_ang) ################# equivalent PQ bus
53 |
54 | # initialize class solar inverter
55 | system_13bus_solar = solar_inverter( I_mag, I_ang, V_mag, V_ang, para=para)
56 |
57 | # start dynamical simulation
58 | for i in range(2000):
59 | print(i)
60 | # update internal state
61 | system_13bus_solar.cal_next_step()
62 | # update new terminal voltage
63 | V_mag, V_ang = system_13bus_solar.get_latest_terminal_voltage()
64 | # create fault
65 | if (i>=20) & (i<=int(0.01/para['dt'])):
66 | V_inf_D = 379*0.8
67 | V_inf_Q = -20*1.2
68 | else:
69 | V_inf_D = 379
70 | V_inf_Q = -20
71 | # get corresponding new terminal current
72 | I_mag, I_ang = inf_bus_simu(V_inf_D, V_inf_Q, V_mag, V_ang)
73 | # update some setting variables
74 | system_13bus_solar.cal_next_step_algebraic(I_mag, I_ang)
75 |
76 | delta = system_13bus_solar.time_varying_state['delta']
77 | v_od = system_13bus_solar.time_varying_state['v_od']
78 | v_oq = system_13bus_solar.time_varying_state['v_oq']
79 | w = system_13bus_solar.time_varying_state['w']
80 | plt.figure()
81 | plt.plot(delta)
82 | plt.show()
83 |
84 | a=0
85 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/run_scenarios.bat:
--------------------------------------------------------------------------------
1 | for /l %%i in (361, 1, 1000) do (
2 | echo %%i
3 | python32 one.py %%i
4 | ping 127.0.0.1 -n 2 > nul
5 | )
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/run_sspf.bat:
--------------------------------------------------------------------------------
1 | for /l %%i in (1, 1, 100) do (
2 | echo %%i
3 | python32 ss_data.py
4 | )
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/ss_data.py:
--------------------------------------------------------------------------------
1 | from cosim import cosim, simdata
2 | import psspy as ps
3 | import sys, os, time
4 | import csv
5 |
6 | #case_T = r'C:\Users\Dongqi Wu\OneDrive\Work\USC\case_T\PSSE23_wind\savnw_wind_tuned.raw'
7 | case_T = r'C:\Users\Dongqi Wu\OneDrive\Work\USC\case_T\Re_wind_v3\savnw_wind_scale_down.raw'
8 | #case_T = r'C:\Users\Dongqi Wu\OneDrive\Work\USC\case_T\PSSE\IEEE_39_bus.raw'
9 | case_D13 = r'C:\Users\Dongqi Wu\OneDrive\Work\USC\case_D\13Bus\IEEE13Nodeckt_scaled.dss'
10 | out_path = r'C:\Users\Dongqi Wu\OneDrive\Work\USC\output_ss\pf_result_2.csv'
11 | data_path = r'C:\Users\Dongqi Wu\OneDrive\Work\USC\data\v3data_2.csv'
12 |
13 | print('Reading files..')
14 | data = simdata(data_path, version=3)
15 | print('Start simulating!')
16 | env = cosim(case_T, [case_D13, case_D13], [3005, 3008], data)
17 |
18 | #end = len(env.ts)
19 |
20 | # size of current csv
21 | with open(out_path) as fh:
22 | existing_row_num = sum(1 for line in fh)
23 | start = max(0, existing_row_num - 1)
24 | end = start + 10000
25 |
26 | if existing_row_num == 0:
27 | empty = 1
28 | else:
29 | empty = 0
30 |
31 | print('Running from row '+str(start)+' to '+str(end))
32 | with open(out_path, 'a') as fh:
33 | for row in range(start, end):
34 | if row % 5000 == 0:
35 | print(str(row)+ ' lines completed')
36 | env.solve_ss(row)
37 | #time.sleep(0.2)
38 | # write header
39 | if empty:
40 | header = ''
41 | header += 'time, '
42 | for bus in range(env.trans.bus_num):
43 | header += 'Vm_'+str(env.trans.bus_ids[bus])
44 | header += ', '
45 | for bus in range(env.trans.bus_num):
46 | header += 'Va_'+str(env.trans.bus_ids[bus])
47 | header += ', '
48 | for brn in range(env.trans.line_num):
49 | header += 'P_'+str(env.trans.line_T[brn][0])
50 | header += '_'+str(env.trans.line_T[brn][1])
51 | header += '_'+env.trans.line_ids[brn]
52 | header += ', '
53 | for brn in range(env.trans.line_num):
54 | header += 'Q_'+str(env.trans.line_T[brn][0])
55 | header += '_'+str(env.trans.line_T[brn][1])
56 | header += '_'+env.trans.line_ids[brn]
57 | header += ', '
58 | header += '\n'
59 | fh.write(header)
60 | empty = 0
61 |
62 | # collect data
63 | all_vm = ps.abusreal(-1, 2, 'PU')[1][0]
64 | all_va = ps.abusreal(-1, 2, 'ANGLE')[1][0]
65 | all_p = ps.abrnreal(-1,1,3,4,1,'P')[1][0]
66 | all_q = ps.abrnreal(-1,1,3,4,1,'P')[1][0]
67 |
68 | # append
69 | curr_line = ''
70 | curr_line += env.ts[row]
71 | curr_line += ', '
72 | for bus in range(env.trans.bus_num):
73 | curr_line += str(all_vm[bus])
74 | curr_line += ', '
75 | for bus in range(env.trans.bus_num):
76 | curr_line += str(all_va[bus])
77 | curr_line += ', '
78 | for brn in range(env.trans.line_num):
79 | curr_line += str(all_p[brn])
80 | curr_line += ', '
81 | for brn in range(env.trans.line_num):
82 | curr_line += str(all_q[brn])
83 | curr_line += ', '
84 | curr_line += '\n'
85 | fh.write(curr_line)
86 |
87 | env.trans.close()
88 | env.dist.reset()
89 |
90 |
91 |
92 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/code/utils.py:
--------------------------------------------------------------------------------
1 | from contextlib import contextmanager
2 | import os, sys
3 |
4 | @contextmanager
5 | def silence():
6 | with open(os.devnull, "w") as devnull:
7 | old_stdout = sys.stdout
8 | sys.stdout = devnull
9 | try:
10 | yield
11 | finally:
12 | sys.stdout = old_stdout
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/Code/Joint Simulation/requirements.txt:
--------------------------------------------------------------------------------
1 | pywin32==301
2 | pandas==1.3.3
3 | matplotlib==3.4.3
4 | numpy==1.20.3
5 |
--------------------------------------------------------------------------------
/Code/README.md:
--------------------------------------------------------------------------------
1 | ## Code Navigation
2 | `Please see detailed explanation and comments in each subfolder.`
3 | - **BenchmarkModel**
4 | - *Event Classification*: baseline models for event detection, classification and localization
5 | - *Load Forecasting*: baseline models for hierarchical load and renewable point forecast and prediction interval
6 | - *Synthetic Data Generation*: baseline models for synthetic data generation of physical-laws-constrained PMU measurement time series
7 | - **Joint Simulation**: python codes for joint steady-state and transient simulation between transmission and distribution systems
8 | - **Data Processing**: python codes for collecting the real-world load and weather data
9 |
--------------------------------------------------------------------------------
/Code/dataloader.py:
--------------------------------------------------------------------------------
1 | import warnings
2 | warnings.filterwarnings('ignore')
3 | from BenchmarkModel.EventClassification.processing import ClassificationDataset
4 | from BenchmarkModel.LoadForecasting.processing import ForecastingDataset
5 | from BenchmarkModel.SyntheticDataGeneration.processing import GenerationDataset
6 |
7 |
8 | class TimeSeriesLoader():
9 | def __init__(self, task, root='./../PSML/'):
10 | """ Initiate data loading for each task
11 | """
12 | self.task = task
13 | if self.task == 'forecasting':
14 | # Returns load and renewable energy forecasting data
15 | self.dataset = ForecastingDataset(root)
16 | elif self.task == 'classification':
17 | # Returns event detection, classification and localization data
18 | self.dataset = ClassificationDataset(root)
19 | elif self.task == 'generation':
20 | # Returns PMU stream data
21 | self.dataset = GenerationDataset(root)
22 | else:
23 | raise Exception
24 |
25 | def load(self, batch_size, shuffle, sliding_window=120, loc=None, year=None):
26 | if self.task == 'forecasting':
27 | if loc is None:
28 | loc = 'CAISO_zone_1'
29 | year = 2018
30 | train_loader, test_loader = self.dataset.load(sliding_window, loc, year, batch_size, shuffle)
31 | return train_loader, test_loader
32 | elif self.task == 'classification':
33 | train_loader, test_loader = self.dataset.load(batch_size, shuffle)
34 | return train_loader, test_loader
35 | elif self.task == 'generation':
36 | train_loader, test_loader = self.dataset.load(batch_size, shuffle)
37 | return train_loader, test_loader
38 |
39 |
40 | def _test_classification_loader():
41 | loader_ins = TimeSeriesLoader('classification', root='/meladyfs/newyork/nanx/Datasets/PSML')
42 | train_loader, test_loader = loader_ins.load(batch_size=32, shuffle=True)
43 |
44 | print(f'train_loader: {len(train_loader)}')
45 | for i in train_loader:
46 | feature, label = i
47 | print(f'feature: {feature.shape}')
48 | print(f'label: {label.shape}')
49 | break
50 |
51 | print(f'test_loader: {len(test_loader)}')
52 | for i in test_loader:
53 | print(f'feature: {i.shape}')
54 | break
55 |
56 | return
57 |
58 | def _test_forecasting_loader():
59 | loader_ins = TimeSeriesLoader('forecasting', root='/meladyfs/newyork/nanx/Datasets/PSML')
60 | train_loader, test_loader = loader_ins.load(batch_size=32, shuffle=True)
61 |
62 | print(f'train_loader: {len(train_loader)}')
63 | for i in train_loader:
64 | x, y, flag = i
65 | print(f'x: {x.shape}')
66 | print(f'y: {y.shape}')
67 | print(f'flag: {flag.shape}')
68 | break
69 |
70 | print(f'test_loader: {len(test_loader)}')
71 | for i in test_loader:
72 | ID, x = i
73 | print(f'ID: {ID.shape}')
74 | print(f'x: {x.shape}')
75 | break
76 |
77 | return
78 |
79 | def _test_generation_loader():
80 | loader_ins = TimeSeriesLoader('generation', root='/meladyfs/newyork/nanx/Datasets/PSML')
81 | train_loader, test_loader = loader_ins.load(batch_size=32, shuffle=True)
82 |
83 | print(f'train_loader: {len(train_loader)}')
84 | for i in train_loader:
85 | x, y = i
86 | print(f'x: {x.shape}')
87 | print(f'y: {y.shape}')
88 | break
89 |
90 | print(f'test_loader: {len(test_loader)}')
91 | for i in train_loader:
92 | x, y = i
93 | print(f'x: {x.shape}')
94 | print(f'y: {y.shape}')
95 | break
96 |
97 | return
98 |
99 | if __name__ == '__main__':
100 | #_test_classification_loader()
101 | #_test_forecasting_loader()
102 | #_test_generation_loader()
103 | print()
104 |
105 |
106 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | --find-links https://download.pytorch.org/whl/torch_stable.html
2 | torch==1.8.1+cu111
3 | scikit-learn==0.24.2
4 | pandas==1.2.4
--------------------------------------------------------------------------------