├── LICENSE ├── README.md ├── data └── energydata_complete.csv ├── main.py ├── models.py ├── results ├── plots │ ├── Appliances Energy Prediction using AttentionLSTM.png │ ├── Appliances Energy Prediction using CNN.png │ ├── Appliances Energy Prediction using DNN.png │ ├── Appliances Energy Prediction using GRU.png │ ├── Appliances Energy Prediction using LSTM.png │ ├── Appliances Energy Prediction using RNN.png │ ├── Appliances Energy Prediction using RecursiveLSTM.png │ └── Appliances Energy Prediction.png └── weights │ ├── BEST_AttentionLSTM.pkl │ ├── BEST_CNN.pkl │ ├── BEST_DNN.pkl │ ├── BEST_GRU.pkl │ ├── BEST_LSTM.pkl │ ├── BEST_RNN.pkl │ └── BEST_RecursiveLSTM.pkl └── utils.py /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 hee9joon 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Univariate-Time-Series-Prediction-using-Deep-Learning 2 | Univariate Time Series Prediction using Deep Learning and PyTorch 3 | 4 | ### 0. Introduction 5 | This repository provides **Univariate Time Series Prediction** using deep learning models including **DNN**, **CNN**, **RNN**, **LSTM**, **GRU**, **Recursive LSTM**, and **Attention LSTM**. 6 | 7 | The dataset used is **Appliances Energy Prediction Data Set** and can be found [here](https://archive.ics.uci.edu/ml/datasets/Appliances+energy+prediction). 8 | 9 | ### 1. Quantitative Analysis 10 | 11 | According to the table below, **CNN using 1D Convolutional layer** outperformed the other models. 12 | | Model | MAE↓ | MSE↓ | RMSE↓ | MPE↓ | MAPE↓ | R Squared↑ | 13 | |:---:|:---:|:---:|:---:|:---:|:---:|:---:| 14 | | DNN | 31.0077 | 4039.9806 | 57.8505 | -16.4529 | 27.5759 | 0.4355 | 15 | | **CNN** | **28.4919** | **3869.6289** | **56.6529** | -**11.5615** | **24.3810** | **0.4567** | 16 | | RNN | 30.7757 | 3997.9815 | 57.8951 | -19.2878 | 28.4873 | 0.4297 | 17 | | LSTM | 29.8795 | 3949.6140 | 57.5196 | -17.5516 | 27.2467 | 0.4393 | 18 | | GRU | 29.9521 | 3939.7874 | 57.4498 | -17.9298 | 27.4501 | 0.4402 | 19 | | Recursive LSTM | 29.8795 | 3949.6140 | 57.5196 | -17.5516 | 27.2467 | 0.4393 | 20 | | Attention LSTM | 30.6609 | 3923.0855 | 57.2503 | -17.8343 | 28.1153 | 0.4372 | 21 | 22 | ### 2. Qualitative Analysis 23 | It definitely suffers from the typical lagging issue. 24 | 25 | 26 | 27 | ### 3. Run the Codes 28 | If you want to train *Attention LSTM*, 29 | #### 1) Train 30 | ``` 31 | python main.py --model 'attention' 32 | ``` 33 | 34 | #### 2) Test 35 | ``` 36 | python main.py --model 'attention' --mode 'test' 37 | ``` 38 | 39 | To handle more arguments, you can refer to `main.py` 40 | 41 | 42 | 43 | 44 | ### Development Environment 45 | ``` 46 | - Windows 10 Home 47 | - NVIDIA GFORCE RTX 2060 48 | - CUDA 10.2 49 | - torch 1.6.0 50 | - torchvision 0.7.0 51 | - etc 52 | ``` 53 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import os 2 | import random 3 | import argparse 4 | import numpy as np 5 | 6 | from sklearn.preprocessing import MinMaxScaler 7 | from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score 8 | 9 | import torch 10 | 11 | from models import DNN, RNN, LSTM, GRU, RecursiveLSTM, AttentionLSTM, CNN 12 | from utils import make_dirs, load_data, plot_full, data_loader, get_lr_scheduler 13 | from utils import mean_percentage_error, mean_absolute_percentage_error, plot_pred_test 14 | 15 | # Reproducibility # 16 | torch.backends.cudnn.deterministic = True 17 | torch.backends.cudnn.benchmark = False 18 | 19 | # Device Configuration # 20 | device = 'cuda' if torch.cuda.is_available() else 'cpu' 21 | 22 | 23 | def main(config): 24 | 25 | # Fix Seed # 26 | random.seed(config.seed) 27 | np.random.seed(config.seed) 28 | torch.manual_seed(config.seed) 29 | torch.cuda.manual_seed(config.seed) 30 | 31 | # Weights and Plots Path # 32 | paths = [config.weights_path, config.plots_path] 33 | 34 | for path in paths: 35 | make_dirs(path) 36 | 37 | # Prepare Data # 38 | data = load_data(config.which_data)[[config.feature]] 39 | data = data.copy() 40 | 41 | # Plot Time-Series Data # 42 | if config.plot_full: 43 | plot_full(config.plots_path, data, config.feature) 44 | 45 | scaler = MinMaxScaler() 46 | data[config.feature] = scaler.fit_transform(data) 47 | 48 | train_loader, val_loader, test_loader = \ 49 | data_loader(data, config.seq_length, config.train_split, config.test_split, config.batch_size) 50 | 51 | # Lists # 52 | train_losses, val_losses = list(), list() 53 | val_maes, val_mses, val_rmses, val_mapes, val_mpes, val_r2s = list(), list(), list(), list(), list(), list() 54 | test_maes, test_mses, test_rmses, test_mapes, test_mpes, test_r2s = list(), list(), list(), list(), list(), list() 55 | 56 | # Constants # 57 | best_val_loss = 100 58 | best_val_improv = 0 59 | 60 | # Prepare Network # 61 | if config.network == 'dnn': 62 | model = DNN(config.seq_length, config.hidden_size, config.output_size).to(device) 63 | elif config.network == 'cnn': 64 | model = CNN(config.seq_length, config.batch_size).to(device) 65 | elif config.network == 'rnn': 66 | model = RNN(config.input_size, config.hidden_size, config.num_layers, config.output_size).to(device) 67 | elif config.network == 'lstm': 68 | model = LSTM(config.input_size, config.hidden_size, config.num_layers, config.output_size, config.bidirectional).to(device) 69 | elif config.network == 'gru': 70 | model = GRU(config.input_size, config.hidden_size, config.num_layers, config.output_size).to(device) 71 | elif config.network == 'recursive': 72 | model = RecursiveLSTM(config.input_size, config.hidden_size, config.num_layers, config.output_size).to(device) 73 | elif config.network == 'attention': 74 | model = AttentionLSTM(config.input_size, config.key, config.query, config.value, config.hidden_size, config.num_layers, config.output_size, config.bidirectional).to(device) 75 | else: 76 | raise NotImplementedError 77 | 78 | # Loss Function # 79 | criterion = torch.nn.MSELoss() 80 | 81 | # Optimizer # 82 | optim = torch.optim.Adam(model.parameters(), lr=config.lr, betas=(0.5, 0.999)) 83 | optim_scheduler = get_lr_scheduler(config.lr_scheduler, optim) 84 | 85 | # Train and Validation # 86 | if config.mode == 'train': 87 | 88 | # Train # 89 | print("Training {} started with total epoch of {}.".format(model.__class__.__name__, config.num_epochs)) 90 | 91 | for epoch in range(config.num_epochs): 92 | for i, (data, label) in enumerate(train_loader): 93 | 94 | # Prepare Data # 95 | data = data.to(device, dtype=torch.float32) 96 | label = label.to(device, dtype=torch.float32) 97 | 98 | # Forward Data # 99 | pred = model(data) 100 | 101 | # Calculate Loss # 102 | train_loss = criterion(pred, label) 103 | 104 | # Initialize Optimizer, Back Propagation and Update # 105 | optim.zero_grad() 106 | train_loss.backward() 107 | optim.step() 108 | 109 | # Add item to Lists # 110 | train_losses.append(train_loss.item()) 111 | 112 | # Print Statistics # 113 | if (epoch+1) % config.print_every == 0: 114 | print("Epoch [{}/{}]".format(epoch+1, config.num_epochs)) 115 | print("Train Loss {:.4f}".format(np.average(train_losses))) 116 | 117 | # Learning Rate Scheduler # 118 | optim_scheduler.step() 119 | 120 | # Validation # 121 | with torch.no_grad(): 122 | for i, (data, label) in enumerate(val_loader): 123 | 124 | # Prepare Data # 125 | data = data.to(device, dtype=torch.float32) 126 | label = label.to(device, dtype=torch.float32) 127 | 128 | # Forward Data # 129 | pred_val = model(data) 130 | 131 | # Calculate Loss # 132 | val_loss = criterion(pred_val, label) 133 | val_mae = mean_absolute_error(label.cpu(), pred_val.cpu()) 134 | val_mse = mean_squared_error(label.cpu(), pred_val.cpu(), squared=True) 135 | val_rmse = mean_squared_error(label.cpu(), pred_val.cpu(), squared=False) 136 | val_mpe = mean_percentage_error(label.cpu(), pred_val.cpu()) 137 | val_mape = mean_absolute_percentage_error(label.cpu(), pred_val.cpu()) 138 | val_r2 = r2_score(label.cpu(), pred_val.cpu()) 139 | 140 | # Add item to Lists # 141 | val_losses.append(val_loss.item()) 142 | val_maes.append(val_mae.item()) 143 | val_mses.append(val_mse.item()) 144 | val_rmses.append(val_rmse.item()) 145 | val_mpes.append(val_mpe.item()) 146 | val_mapes.append(val_mape.item()) 147 | val_r2s.append(val_r2.item()) 148 | 149 | if (epoch + 1) % config.print_every == 0: 150 | 151 | # Print Statistics # 152 | print("Val Loss {:.4f}".format(np.average(val_losses))) 153 | print("Val MAE : {:.4f}".format(np.average(val_maes))) 154 | print("Val MSE : {:.4f}".format(np.average(val_mses))) 155 | print("Val RMSE : {:.4f}".format(np.average(val_rmses))) 156 | print("Val MPE : {:.4f}".format(np.average(val_mpes))) 157 | print("Val MAPE : {:.4f}".format(np.average(val_mapes))) 158 | print("Val R^2 : {:.4f}".format(np.average(val_r2s))) 159 | 160 | # Save the model Only if validation loss decreased # 161 | curr_val_loss = np.average(val_losses) 162 | 163 | if curr_val_loss < best_val_loss: 164 | best_val_loss = min(curr_val_loss, best_val_loss) 165 | torch.save(model.state_dict(), os.path.join(config.weights_path, 'BEST_{}.pkl'.format(model.__class__.__name__))) 166 | 167 | print("Best model is saved!\n") 168 | best_val_improv = 0 169 | 170 | elif curr_val_loss >= best_val_loss: 171 | best_val_improv += 1 172 | print("Best Validation has not improved for {} epochs.\n".format(best_val_improv)) 173 | 174 | elif config.mode == 'test': 175 | 176 | # Load the Model Weight # 177 | model.load_state_dict(torch.load(os.path.join(config.weights_path, 'BEST_{}.pkl'.format(model.__class__.__name__)))) 178 | 179 | # Test # 180 | with torch.no_grad(): 181 | for i, (data, label) in enumerate(test_loader): 182 | 183 | # Prepare Data # 184 | data = data.to(device, dtype=torch.float32) 185 | label = label.to(device, dtype=torch.float32) 186 | 187 | # Forward Data # 188 | pred_test = model(data) 189 | 190 | # Convert to Original Value Range # 191 | pred_test = pred_test.data.cpu().numpy() 192 | label = label.data.cpu().numpy().reshape(-1, 1) 193 | 194 | pred_test = scaler.inverse_transform(pred_test) 195 | label = scaler.inverse_transform(label) 196 | 197 | # Calculate Loss # 198 | test_mae = mean_absolute_error(label, pred_test) 199 | test_mse = mean_squared_error(label, pred_test, squared=True) 200 | test_rmse = mean_squared_error(label, pred_test, squared=False) 201 | test_mpe = mean_percentage_error(label, pred_test) 202 | test_mape = mean_absolute_percentage_error(label, pred_test) 203 | test_r2 = r2_score(label, pred_test) 204 | 205 | # Add item to Lists # 206 | test_maes.append(test_mae.item()) 207 | test_mses.append(test_mse.item()) 208 | test_rmses.append(test_rmse.item()) 209 | test_mpes.append(test_mpe.item()) 210 | test_mapes.append(test_mape.item()) 211 | test_r2s.append(test_r2.item()) 212 | 213 | # Print Statistics # 214 | print("Test {}".format(model.__class__.__name__)) 215 | print("Test MAE : {:.4f}".format(np.average(test_maes))) 216 | print("Test MSE : {:.4f}".format(np.average(test_mses))) 217 | print("Test RMSE : {:.4f}".format(np.average(test_rmses))) 218 | print("Test MPE : {:.4f}".format(np.average(test_mpes))) 219 | print("Test MAPE : {:.4f}".format(np.average(test_mapes))) 220 | print("Test R^2 : {:.4f}".format(np.average(test_r2s))) 221 | 222 | # Plot Figure # 223 | plot_pred_test(pred_test, label, config.plots_path, config.feature, model) 224 | 225 | 226 | if __name__ == "__main__": 227 | parser = argparse.ArgumentParser() 228 | 229 | parser.add_argument('--seed', type=int, default=42, help='seed for reproducibility') 230 | parser.add_argument('--feature', type=str, default='Appliances', help='extract which feature for prediction') 231 | 232 | parser.add_argument('--seq_length', type=int, default=5, help='window size') 233 | parser.add_argument('--batch_size', type=int, default=128, help='mini-batch size') 234 | 235 | parser.add_argument('--network', type=str, default='dnn', 236 | choices=['dnn', 'cnn', 'rnn', 'lstm', 'gru', 'recursive', 'attention']) 237 | 238 | parser.add_argument('--input_size', type=int, default=1, help='input_size') 239 | parser.add_argument('--hidden_size', type=int, default=10, help='hidden_size') 240 | parser.add_argument('--num_layers', type=int, default=1, help='num_layers') 241 | parser.add_argument('--output_size', type=int, default=1, help='output_size') 242 | parser.add_argument('--bidirectional', type=bool, default=False, help='use bidirectional or not') 243 | 244 | parser.add_argument('--key', type=int, default=8, help='key') 245 | parser.add_argument('--query', type=int, default=8, help='query') 246 | parser.add_argument('--value', type=int, default=8, help='value') 247 | 248 | parser.add_argument('--which_data', type=str, default='./data/energydata_complete.csv', help='which data to use') 249 | parser.add_argument('--weights_path', type=str, default='./results/weights/', help='weights path') 250 | parser.add_argument('--plots_path', type=str, default='./results/plots/', help='plots path') 251 | 252 | parser.add_argument('--train_split', type=float, default=0.8, help='train_split') 253 | parser.add_argument('--test_split', type=float, default=0.5, help='test_split') 254 | 255 | parser.add_argument('--num_epochs', type=int, default=200, help='total epoch') 256 | parser.add_argument('--print_every', type=int, default=10, help='print statistics for every default epoch') 257 | 258 | parser.add_argument('--lr', type=float, default=1e-3, help='learning rate') 259 | parser.add_argument('--lr_scheduler', type=str, default='cosine', help='learning rate scheduler', choices=['step', 'plateau', 'cosine']) 260 | 261 | parser.add_argument('--plot_full', type=bool, default=False, help='plot full graph or not') 262 | parser.add_argument('--mode', type=str, default='train', choices=['train', 'test']) 263 | 264 | config = parser.parse_args() 265 | 266 | torch.cuda.empty_cache() 267 | main(config) -------------------------------------------------------------------------------- /models.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | import math 5 | 6 | device = 'cuda' if torch.cuda.is_available() else 'cpu' 7 | 8 | 9 | class DNN(nn.Module): 10 | """Deep Neural Network""" 11 | def __init__(self, input_size, hidden_size, output_size): 12 | super(DNN, self).__init__() 13 | 14 | self.main = nn.Sequential( 15 | nn.Linear(input_size, hidden_size), 16 | nn.ReLU(inplace=True), 17 | nn.Linear(hidden_size, output_size) 18 | ) 19 | 20 | def forward(self, x): 21 | x = x.squeeze(dim=2) 22 | out = self.main(x) 23 | return out 24 | 25 | 26 | class RNN(nn.Module): 27 | """Vanilla RNN""" 28 | def __init__(self, input_size, hidden_size, num_layers, output_size): 29 | super(RNN, self).__init__() 30 | 31 | self.input_size = input_size 32 | self.hidden_size = hidden_size 33 | self.num_layers = num_layers 34 | self.output_size = output_size 35 | 36 | self.rnn = nn.RNN(input_size=input_size, 37 | hidden_size=hidden_size, 38 | num_layers=num_layers, 39 | batch_first=True) 40 | 41 | self.fc = nn.Linear(hidden_size, output_size) 42 | 43 | def forward(self, x): 44 | out, _ = self.rnn(x) 45 | out = out[:, -1, :] 46 | out = self.fc(out) 47 | 48 | return out 49 | 50 | 51 | class LSTM(nn.Module): 52 | """Long Short Term Memory""" 53 | def __init__(self, input_size, hidden_size, num_layers, output_size, bidirectional=False): 54 | super(LSTM, self).__init__() 55 | 56 | self.input_size = input_size 57 | self.hidden_size = hidden_size 58 | self.num_layers = num_layers 59 | self.output_size = output_size 60 | self.bidirectional = bidirectional 61 | 62 | self.lstm = nn.LSTM(input_size=input_size, 63 | hidden_size=hidden_size, 64 | num_layers=num_layers, 65 | batch_first=True, 66 | bidirectional=bidirectional) 67 | 68 | if self.bidirectional: 69 | self.fc = nn.Linear(hidden_size * 2, output_size) 70 | else: 71 | self.fc = nn.Linear(hidden_size, output_size) 72 | 73 | def forward(self, x): 74 | out, _ = self.lstm(x) 75 | out = out[:, -1, :] 76 | out = self.fc(out) 77 | 78 | return out 79 | 80 | 81 | class GRU(nn.Module): 82 | """Gat e Recurrent Unit""" 83 | def __init__(self, input_size, hidden_size, num_layers, output_size): 84 | super(GRU, self).__init__() 85 | 86 | self.input_size = input_size 87 | self.hidden_size = hidden_size 88 | self.num_layers = num_layers 89 | self.output_size = output_size 90 | 91 | self.gru = nn.GRU(input_size=input_size, 92 | hidden_size=hidden_size, 93 | num_layers=num_layers, 94 | batch_first=True) 95 | 96 | self.fc = nn.Linear(hidden_size, output_size) 97 | 98 | def forward(self, x): 99 | out, _ = self.gru(x) 100 | out = out[:, -1, :] 101 | out = self.fc(out) 102 | 103 | return out 104 | 105 | 106 | class RecursiveLSTM(nn.Module): 107 | """Recursive LSTM""" 108 | def __init__(self, input_size, hidden_size, num_layers, output_size): 109 | super(RecursiveLSTM, self).__init__() 110 | 111 | self.input_size = input_size 112 | self.hidden_size = hidden_size 113 | self.num_layers = num_layers 114 | self.output_size = output_size 115 | 116 | self.lstm = nn.LSTM(input_size=input_size, 117 | hidden_size=hidden_size, 118 | num_layers=num_layers, 119 | batch_first=True) 120 | 121 | self.fc = nn.Linear(hidden_size, output_size) 122 | 123 | def forward(self, x): 124 | pred = torch.empty([x.shape[0], self.output_size]).to(device) 125 | for i in range(self.output_size): 126 | out, _ = self.lstm(x) 127 | out = out[:, -1, :] 128 | out = self.fc(out) 129 | 130 | pred[:, i] = torch.squeeze(out, -1) 131 | out = torch.unsqueeze(out, -1) 132 | x = torch.cat([x, out], 1)[:, 1:, :] 133 | 134 | return pred 135 | 136 | 137 | class AttentionLSTM(nn.Module): 138 | """LSTM with Attention""" 139 | def __init__(self, input_size, key, query, value, hidden_size, num_layers, output_size, bidirectional=False): 140 | super(AttentionLSTM, self).__init__() 141 | 142 | self.input_size = input_size 143 | self.key = key 144 | self.query = query 145 | self.value = value 146 | self.hidden_size = hidden_size 147 | self.num_layers = num_layers 148 | self.output_size = output_size 149 | 150 | self.query = nn.Linear(input_size, query) 151 | self.key = nn.Linear(input_size, key) 152 | self.value = nn.Linear(input_size, value) 153 | 154 | self.attn = nn.Linear(value, input_size) 155 | self.scale = math.sqrt(query) 156 | 157 | self.lstm = nn.LSTM(input_size=input_size, 158 | hidden_size=hidden_size, 159 | num_layers=num_layers, 160 | batch_first=True, 161 | bidirectional=bidirectional) 162 | 163 | if bidirectional: 164 | self.fc = nn.Linear(hidden_size * 2, output_size) 165 | else: 166 | self.fc = nn.Linear(hidden_size, output_size) 167 | 168 | def forward(self, x): 169 | 170 | Q = self.query(x) 171 | K = self.key(x) 172 | V = self.value(x) 173 | 174 | dot_product = torch.matmul(Q, K.permute(0, 2, 1)) / self.scale 175 | scores = torch.softmax(dot_product, dim=-1) 176 | scaled_x = torch.matmul(scores, V) + x 177 | 178 | new_x = self.attn(scaled_x) + x 179 | out, _ = self.lstm(new_x) 180 | out = out[:, -1, :] 181 | out = self.fc(out) 182 | 183 | return out 184 | 185 | 186 | class CNN(nn.Module): 187 | """Convolutional Neural Networks""" 188 | def __init__(self, in_channels, out_channels): 189 | super(CNN, self).__init__() 190 | 191 | self.main = nn.Sequential( 192 | nn.Conv1d(in_channels=in_channels, out_channels=out_channels, kernel_size=1), 193 | nn.ReLU(), 194 | nn.Flatten(), 195 | nn.Linear(out_channels, 10), 196 | nn.Linear(10, 1) 197 | ) 198 | 199 | def forward(self, x): 200 | out = self.main(x) 201 | return out -------------------------------------------------------------------------------- /results/plots/Appliances Energy Prediction using AttentionLSTM.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wtwong316/Univariate-Time-Series-Prediction-using-Deep-Learning/5caaa3da06d0b2eb34db3023f2e9802d44f43041/results/plots/Appliances Energy Prediction using AttentionLSTM.png -------------------------------------------------------------------------------- /results/plots/Appliances Energy Prediction using CNN.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wtwong316/Univariate-Time-Series-Prediction-using-Deep-Learning/5caaa3da06d0b2eb34db3023f2e9802d44f43041/results/plots/Appliances Energy Prediction using CNN.png -------------------------------------------------------------------------------- /results/plots/Appliances Energy Prediction using DNN.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wtwong316/Univariate-Time-Series-Prediction-using-Deep-Learning/5caaa3da06d0b2eb34db3023f2e9802d44f43041/results/plots/Appliances Energy Prediction using DNN.png -------------------------------------------------------------------------------- /results/plots/Appliances Energy Prediction using GRU.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wtwong316/Univariate-Time-Series-Prediction-using-Deep-Learning/5caaa3da06d0b2eb34db3023f2e9802d44f43041/results/plots/Appliances Energy Prediction using GRU.png -------------------------------------------------------------------------------- /results/plots/Appliances Energy Prediction using LSTM.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wtwong316/Univariate-Time-Series-Prediction-using-Deep-Learning/5caaa3da06d0b2eb34db3023f2e9802d44f43041/results/plots/Appliances Energy Prediction using LSTM.png -------------------------------------------------------------------------------- /results/plots/Appliances Energy Prediction using RNN.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wtwong316/Univariate-Time-Series-Prediction-using-Deep-Learning/5caaa3da06d0b2eb34db3023f2e9802d44f43041/results/plots/Appliances Energy Prediction using RNN.png -------------------------------------------------------------------------------- /results/plots/Appliances Energy Prediction using RecursiveLSTM.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wtwong316/Univariate-Time-Series-Prediction-using-Deep-Learning/5caaa3da06d0b2eb34db3023f2e9802d44f43041/results/plots/Appliances Energy Prediction using RecursiveLSTM.png -------------------------------------------------------------------------------- /results/plots/Appliances Energy Prediction.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wtwong316/Univariate-Time-Series-Prediction-using-Deep-Learning/5caaa3da06d0b2eb34db3023f2e9802d44f43041/results/plots/Appliances Energy Prediction.png -------------------------------------------------------------------------------- /results/weights/BEST_AttentionLSTM.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wtwong316/Univariate-Time-Series-Prediction-using-Deep-Learning/5caaa3da06d0b2eb34db3023f2e9802d44f43041/results/weights/BEST_AttentionLSTM.pkl -------------------------------------------------------------------------------- /results/weights/BEST_CNN.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wtwong316/Univariate-Time-Series-Prediction-using-Deep-Learning/5caaa3da06d0b2eb34db3023f2e9802d44f43041/results/weights/BEST_CNN.pkl -------------------------------------------------------------------------------- /results/weights/BEST_DNN.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wtwong316/Univariate-Time-Series-Prediction-using-Deep-Learning/5caaa3da06d0b2eb34db3023f2e9802d44f43041/results/weights/BEST_DNN.pkl -------------------------------------------------------------------------------- /results/weights/BEST_GRU.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wtwong316/Univariate-Time-Series-Prediction-using-Deep-Learning/5caaa3da06d0b2eb34db3023f2e9802d44f43041/results/weights/BEST_GRU.pkl -------------------------------------------------------------------------------- /results/weights/BEST_LSTM.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wtwong316/Univariate-Time-Series-Prediction-using-Deep-Learning/5caaa3da06d0b2eb34db3023f2e9802d44f43041/results/weights/BEST_LSTM.pkl -------------------------------------------------------------------------------- /results/weights/BEST_RNN.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wtwong316/Univariate-Time-Series-Prediction-using-Deep-Learning/5caaa3da06d0b2eb34db3023f2e9802d44f43041/results/weights/BEST_RNN.pkl -------------------------------------------------------------------------------- /results/weights/BEST_RecursiveLSTM.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wtwong316/Univariate-Time-Series-Prediction-using-Deep-Learning/5caaa3da06d0b2eb34db3023f2e9802d44f43041/results/weights/BEST_RecursiveLSTM.pkl -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pandas as pd 3 | import numpy as np 4 | from matplotlib import pyplot as plt 5 | 6 | import torch 7 | from torch.utils.data import TensorDataset, DataLoader 8 | 9 | from sklearn.model_selection import train_test_split 10 | 11 | 12 | def make_dirs(path): 13 | """Make Directory If not Exists""" 14 | if not os.path.exists(path): 15 | os.makedirs(path) 16 | 17 | 18 | def load_data(data): 19 | """Data Loader""" 20 | data_dir = os.path.join(data) 21 | 22 | data = pd.read_csv(data_dir, 23 | # infer_datetime_format=True, 24 | parse_dates=['date'] 25 | ) 26 | 27 | data.index = data['date'] 28 | data = data.drop('date', axis=1) 29 | 30 | return data 31 | 32 | 33 | def plot_full(path, data, feature): 34 | """Plot Full Graph of Energy Dataset""" 35 | data.plot(y=feature, figsize=(16, 8)) 36 | plt.xlabel('DateTime', fontsize=10) 37 | plt.xticks(rotation=45) 38 | plt.ylabel(feature, fontsize=10) 39 | plt.grid() 40 | plt.title('{} Energy Prediction'.format(feature)) 41 | plt.savefig(os.path.join(path, '{} Energy Prediction.png'.format(feature))) 42 | plt.show() 43 | 44 | 45 | def data_loader(data, seq_length, train_split, test_split, batch_size): 46 | """Prepare data by applying sliding windows and return data loader""" 47 | 48 | data = data.values 49 | 50 | # Scaling Window # 51 | x, y = list(), list() 52 | 53 | for i in range(len(data) - seq_length-1): 54 | data_x = data[i:i + seq_length] 55 | data_y = data[i + seq_length] 56 | x.append(data_x) 57 | y.append(data_y) 58 | 59 | x, y = np.array(x), np.array(y) 60 | 61 | # Split to Train, Validation and Test Set # 62 | train_seq, test_seq, train_label, test_label = train_test_split(x, y, train_size=train_split, shuffle=False) 63 | val_seq, test_seq, val_label, test_label = train_test_split(test_seq, test_label, train_size=test_split, shuffle=False) 64 | 65 | # Convert to Tensor # 66 | train_set = TensorDataset(torch.from_numpy(train_seq), torch.from_numpy(train_label)) 67 | val_set = TensorDataset(torch.from_numpy(val_seq), torch.from_numpy(val_label)) 68 | test_set = TensorDataset(torch.from_numpy(test_seq), torch.from_numpy(test_label)) 69 | 70 | # Data Loader # 71 | train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=False) 72 | val_loader = DataLoader(val_set, batch_size=batch_size, shuffle=False) 73 | test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False) 74 | 75 | return train_loader, val_loader, test_loader 76 | 77 | 78 | def get_lr_scheduler(lr_scheduler, optimizer): 79 | """Learning Rate Scheduler""" 80 | if lr_scheduler == 'step': 81 | scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.5) 82 | elif lr_scheduler == 'plateau': 83 | scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5) 84 | elif lr_scheduler == 'cosine': 85 | scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=10, eta_min=0) 86 | else: 87 | raise NotImplementedError 88 | return scheduler 89 | 90 | 91 | def percentage_error(actual, predicted): 92 | """Percentage Error""" 93 | res = np.empty(actual.shape) 94 | for j in range(actual.shape[0]): 95 | if actual[j] != 0: 96 | res[j] = (actual[j] - predicted[j]) / actual[j] 97 | else: 98 | res[j] = predicted[j] / np.mean(actual) 99 | return res 100 | 101 | def mean_percentage_error(y_true, y_pred): 102 | """Mean Percentage Error""" 103 | return np.mean(percentage_error(np.asarray(y_true), np.asarray(y_pred))) * 100 104 | 105 | 106 | def mean_absolute_percentage_error(y_true, y_pred): 107 | """Mean Absolute Percentage Error""" 108 | return np.mean(np.abs(percentage_error(np.asarray(y_true), np.asarray(y_pred)))) * 100 109 | 110 | 111 | def plot_pred_test(pred, actual, path, feature, model): 112 | """Plot Test set Prediction""" 113 | plt.figure(figsize=(10, 8)) 114 | plt.plot(pred, label='Pred') 115 | plt.plot(actual, label='Actual') 116 | plt.xlabel('Time', fontsize=18) 117 | plt.ylabel('{}'.format(feature), fontsize=18) 118 | plt.legend(loc='best') 119 | plt.grid() 120 | plt.title('{} Energy Prediction using {}'.format(feature, model.__class__.__name__), fontsize=18) 121 | plt.savefig(os.path.join(path, '{} Energy Prediction using {}.png'.format(feature, model.__class__.__name__))) --------------------------------------------------------------------------------