├── Google_cluster_data
├── README.md
├── plot
│ ├── draw.png
│ ├── host_load_29d.png
│ ├── host_load_6h.png
│ ├── host_load_vs_day.py
│ ├── host_load_vs_hour.py
│ ├── machine.pkl
│ └── test.py
├── prepare
│ ├── 1_extract.py
│ └── 2_machine_id.py
└── single
│ ├── 1_sample.py
│ ├── 2_data.py
│ └── 3_merge.py
├── Grid
├── Load_Data.zip
├── README.md
├── axp7.png
├── axp7_detial.png
├── draw.py
└── preprocess.py
├── README.md
├── autoencoder
├── 1.png
├── 2.png
├── README.md
├── SparseAutoencoder.py
├── compare
│ ├── 1.pkl
│ ├── 2.pkl
│ ├── 3.pkl
│ ├── 4.pkl
│ ├── biases.pkl
│ └── weights.pkl
├── draw.py
├── utils.py
└── utils.pyc
├── draw
├── CDF
│ ├── CDF_of_MSE_1.png
│ ├── CDF_of_MSE_2.png
│ ├── CDF_of_MSE_3.png
│ ├── CDF_of_MSE_4.png
│ ├── data
│ │ ├── ANN.pkl
│ │ ├── AR.pkl
│ │ ├── ESN.pkl
│ │ ├── GMDH.pkl
│ │ └── LSTM.pkl
│ └── draw_cdf.py
├── README.md
├── picture
│ ├── lstm.png
│ ├── lstm.vsdx
│ ├── rnn.png
│ └── rnn.vsdx
├── regular
│ ├── comparison_mse.png
│ ├── draw_grid.py
│ ├── draw_mse.py
│ ├── draw_msse.py
│ ├── mse_axp0.png
│ ├── mse_axp7.png
│ ├── mse_sahara.png
│ ├── mse_themis.png
│ ├── msse.png
│ ├── msse_compare.py
│ └── msse_comparison.png
└── results
│ ├── Cloud
│ ├── actual load prediction cloud.png
│ ├── compare.py
│ ├── logits
│ │ ├── rnn_12.pkl
│ │ ├── rnn_18.pkl
│ │ ├── rnn_24.pkl
│ │ ├── rnn_30.pkl
│ │ ├── rnn_36.pkl
│ │ └── rnn_6.pkl
│ ├── utils.py
│ └── utils.pyc
│ └── Grid
│ ├── actual load prediction grid.png
│ ├── compare.py
│ └── logits
│ └── lstm_6.pkl
└── tensorflow
├── ESN
├── MSE
│ ├── esn_mse_main.py
│ ├── esn_mse_save_logits.py
│ ├── logits_esn_mse_1024_12.pkl
│ ├── new_test.py
│ ├── test.sh
│ ├── utils.py
│ └── utils.pyc
├── MSSE
│ ├── esn_msse_main.py
│ ├── esn_msse_save_logits.py
│ ├── log
│ │ └── previous
│ │ │ ├── 1.txt
│ │ │ └── 2.txt
│ ├── logits_esn_msse_1024_1.pkl
│ ├── msse_utils.py
│ ├── msse_utils.pyc
│ └── test.sh
├── README.md
├── autoencoder
│ ├── 1024
│ │ ├── high
│ │ │ ├── 1.pkl
│ │ │ ├── 1.png
│ │ │ ├── 2.pkl
│ │ │ ├── 2.png
│ │ │ ├── 3.pkl
│ │ │ ├── 4.pkl
│ │ │ ├── Untitled Document
│ │ │ ├── Untitled Document~
│ │ │ ├── biases.pkl
│ │ │ └── weights.pkl
│ │ ├── low
│ │ │ ├── 1.pkl
│ │ │ ├── 1.png
│ │ │ ├── 2.pkl
│ │ │ ├── 2.png
│ │ │ ├── 3.pkl
│ │ │ ├── 4.pkl
│ │ │ ├── Untitled Document
│ │ │ ├── Untitled Document~
│ │ │ ├── biases.pkl
│ │ │ └── weights.pkl
│ │ └── medium
│ │ │ ├── 1.pkl
│ │ │ ├── 1.png
│ │ │ ├── 2.pkl
│ │ │ ├── 2.png
│ │ │ ├── 3.pkl
│ │ │ ├── 4.pkl
│ │ │ ├── Untitled Document
│ │ │ ├── Untitled Document~
│ │ │ ├── biases.pkl
│ │ │ └── weights.pkl
│ ├── ae.py
│ ├── test.sh
│ ├── utils.py
│ └── utils.pyc
└── grid
│ ├── esn_mse_main.py
│ ├── test.sh
│ ├── utils_grid.py
│ └── utils_grid.pyc
├── LSTM
├── MSE
│ ├── 1222.txt
│ ├── main.py
│ ├── rnn_cell.py
│ ├── rnn_cell.pyc
│ ├── save_logits.py
│ ├── test.sh
│ ├── test_grid.py
│ ├── utils.py
│ └── utils.pyc
├── MSSE
│ ├── logits_mean_1.pkl
│ ├── lstm_msse_main.py
│ ├── lstm_msse_save_logits.py
│ ├── rnn_cell.py
│ ├── rnn_cell.pyc
│ ├── test.sh
│ ├── utils.py
│ └── utils.pyc
├── README.md
└── grid
│ ├── main.py
│ ├── rnn_cell.py
│ ├── rnn_cell.pyc
│ ├── save_logits.py
│ ├── test.sh
│ ├── utils_grid.py
│ └── utils_grid.pyc
└── README.md
/Google_cluster_data/README.md:
--------------------------------------------------------------------------------
1 | # Google Cluster Data
2 |
3 | > Pre-process the Google Cluster Data
4 |
5 | - [prepare](./prepare) extra particular information from the original data
6 | - [single](./single) sample 1024 machines' data and merge into one file
7 | - [plot](./plot) plot the trendency of one particular machine
8 |
--------------------------------------------------------------------------------
/Google_cluster_data/plot/draw.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/Google_cluster_data/plot/draw.png
--------------------------------------------------------------------------------
/Google_cluster_data/plot/host_load_29d.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/Google_cluster_data/plot/host_load_29d.png
--------------------------------------------------------------------------------
/Google_cluster_data/plot/host_load_6h.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/Google_cluster_data/plot/host_load_6h.png
--------------------------------------------------------------------------------
/Google_cluster_data/plot/host_load_vs_day.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Sun Mar 27 21:42:06 2016
4 |
5 | @author: tyrion
6 | """
7 |
8 | from os import path
9 | project_path = '/home/tyrion/lannister/clusterdata-2011-2/python'
10 |
11 | import matplotlib.pyplot as plt
12 | import pickle
13 |
14 | machine_id = 563849022
15 |
16 | input_id = open(path.join(project_path,'data','563849022.pkl'),'rb')
17 | a = pickle.load(input_id)
18 | plt.plot(a)
19 | plt.xlabel("Time(Day)")
20 | plt.ylabel("CPU Rate")
21 | plt.xlim([0,8352])
22 | plt.xticks([d*12*24 for d in range(3,30,3)],['%i'%d for d in range(3,30,3)])
23 | plt.ylim([0,0.6])
24 | plt.grid(True)
25 | #plt.text(5200,0.06,r'machine_id:563849022',color='red')
26 | plt.savefig("host_load_29d.png", dpi=300, format='png')
27 | plt.show()
--------------------------------------------------------------------------------
/Google_cluster_data/plot/host_load_vs_hour.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Sun Mar 27 21:44:07 2016
4 |
5 | @author: tyrion
6 | """
7 |
8 | from os import path
9 | project_path = '/home/tyrion/lannister/clusterdata-2011-2/python'
10 |
11 | import matplotlib.pyplot as plt
12 | import pickle
13 |
14 | machine_id = 563849022
15 |
16 | input_id = open(path.join(project_path,'data','563849022.pkl'),'rb')
17 | a = pickle.load(input_id)
18 | plt.plot(a)
19 | plt.xlabel("Time(Hour)")
20 | plt.ylabel("CPU Rate")
21 | plt.xlim([0,12*6])
22 | plt.xticks([d*12 for d in range(1,7)],['%ih '%d for d in range(1,7)])
23 | plt.ylim([0.15,0.35])
24 | plt.grid(True)
25 | #plt.text(5200,0.06,r'machine_id:563849022')
26 | plt.savefig("host_load_6h.png", dpi=300, format='png')
27 | plt.show()
--------------------------------------------------------------------------------
/Google_cluster_data/plot/test.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Sun Mar 27 21:44:07 2016
4 |
5 | @author: tyrion
6 | """
7 |
8 | import matplotlib.pyplot as plt
9 | import pickle
10 |
11 | input_id = open("./machine.pkl", 'rb')
12 | a = pickle.load(input_id)
13 | #plt.plot(a[400:600])
14 | #plt.savefig("draw.png", dpi=300, format='png')
15 |
16 | start = 400
17 | history = 200
18 | prediction = 50
19 |
20 | plt.figure(figsize=(20,3))
21 | plt.plot(a[start:start+history+prediction])
22 | #plt.plot(a[start:start+history], 'b', label="history")
23 | #plt.plot(range(history-1,history+prediction),
24 | # a[start+history-1:start+history+prediction],
25 | # 'r', label="prediction")
26 | plt.ylim([0, 0.35])
27 | plt.savefig("draw.png", dpi=300, format='png')
--------------------------------------------------------------------------------
/Google_cluster_data/prepare/1_extract.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Fri Mar 11 11:17:33 2016
4 |
5 | @author: tyrion
6 |
7 | extra 4 column from the original data
8 | save it into 5 csv file
9 | """
10 |
11 | from os import chdir
12 | chdir('/home/tyrion/lannister/clusterdata-2011-2')
13 |
14 | import pandas as pd
15 | from pandas import read_csv
16 | from os import path
17 |
18 | project_path = '/home/tyrion/lannister/clusterdata-2011-2/python'
19 |
20 | task_usage_csv_colnames = ['start_time','end_time','job_id','task_index','machine_id','cpu_usage',
21 | 'can_mem_usage','assi_mem_usage','ummap_page_cache','total_page_cache','max_mem_usage',
22 | 'disk_io_time','disk_space_usage','max_cpu_rate','max_disk_io_time','cyc_per_instr',
23 | 'mem_acc_per_inst','sample_portion','aggre_type','sampled_cpu_usage']
24 |
25 | import time
26 |
27 | for i in xrange(5):
28 | tStart = time.time()
29 | all_cpu_usage_df = pd.DataFrame()
30 | for j in xrange(i*100, (i+1)*100):
31 | if j < 10:
32 | sub_csv = "0000"+str(j)
33 | elif j < 100:
34 | sub_csv = "000"+str(j)
35 | else:
36 | sub_csv = "00"+str(j)
37 |
38 | task_usage_df = read_csv(path.join('task_usage','part-'+sub_csv+'-of-00500.csv.gz'),header=None,
39 | index_col=False,compression='gzip',names=task_usage_csv_colnames)
40 |
41 | machine_cpu_load = task_usage_df[['start_time','end_time','machine_id','cpu_usage']]
42 | all_cpu_usage_df = all_cpu_usage_df.append(machine_cpu_load, ignore_index=True)
43 | print j,len(all_cpu_usage_df)
44 | all_cpu_usage_df.to_csv(path.join(project_path,'info','machine','all_cpu_usage_df'+str(i)+'.csv.gz'),compression='gzip',index=False)
45 | tEnd = time.time()
46 | print "It costs %f sec" % (tEnd - tStart)
47 |
--------------------------------------------------------------------------------
/Google_cluster_data/prepare/2_machine_id.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Sun Mar 27 22:11:00 2016
4 |
5 | @author: tyrion
6 |
7 | extract machined id
8 | """
9 |
10 | project_path = '/home/tyrion/lannister/clusterdata-2011-2'
11 |
12 | from pandas import read_csv
13 | from os import path
14 |
15 | machine_attri_csv_colnames = ['time', 'machine_id', 'attri_name', 'attri_value', 'attri_del']
16 |
17 | machine_attri_df = read_csv(path.join(project_path,'machine_attributes','part-00000-of-00001.csv.gz'),
18 | header=None,index_col=False,compression='gzip',names=machine_attri_csv_colnames)
19 | machine_id_df = machine_attri_df['machine_id']
20 | print len(machine_id_df)
21 | machine_id_set = set(machine_id_df)
22 | print len(machine_id_set)
23 |
24 | import pickle
25 |
26 | with open(path.join(project_path,'python','info','machine_id.pkl'),'wb') as f:
27 | pickle.dump(machine_id_set, f)
--------------------------------------------------------------------------------
/Google_cluster_data/single/1_sample.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Mon Mar 14 17:14:25 2016
4 |
5 | @author: tyrion
6 |
7 | sample 100 machines rawly and save it separately
8 | """
9 |
10 | import pickle
11 | import pandas as pd
12 | from pandas import read_csv
13 | from os import path
14 |
15 | project_path = '/home/tyrion/lannister/clusterdata-2011-2/python'
16 |
17 | input_id = open(path.join(project_path,'info','machine_id.pkl'), 'rb')
18 | all_machine_id = pickle.load(input_id)
19 | input_id.close()
20 |
21 | from random import sample
22 | sample_machine_ids = sample(all_machine_id, 100)
23 | print sample_machine_ids
24 |
25 | import time
26 | tStart = time.time()
27 | for single_machine_id in sample_machine_ids:
28 | single_machine_cpu_usage_df = pd.DataFrame()
29 | for i in xrange(5):
30 | machine_cpu_usage_df = read_csv(path.join(project_path,'info','314','all_machine_cpu_usage_'+
31 | str(i)+'.csv.gz'),compression='gzip')
32 |
33 | single_machine = machine_cpu_usage_df[machine_cpu_usage_df['machine_id']==single_machine_id]
34 | single_machine = single_machine.drop(['machine_id'], axis=1)
35 | single_machine = single_machine[(single_machine.cpu_usage != 0)&(single_machine.cpu_usage <= 1)]
36 | single_machine.start_time = single_machine.start_time / 1000000
37 | single_machine.end_time = single_machine.end_time / 1000000
38 |
39 | single_machine_cpu_usage_df = single_machine_cpu_usage_df.append(single_machine, ignore_index=True)
40 | print i,len(single_machine_cpu_usage_df)
41 | single_machine_cpu_usage_df.to_csv(path.join(project_path,'machine_ids','329',
42 | str(single_machine_id)+'.csv.gz'),compression='gzip',index=False)
43 | tEnd = time.time()
44 | print "It costs %f sec" % (tEnd - tStart)
45 | print single_machine_cpu_usage_df
46 |
--------------------------------------------------------------------------------
/Google_cluster_data/single/2_data.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Sun Mar 27 22:01:34 2016
4 |
5 | @author: tyrion
6 |
7 | deal with each machine and accumulate the cpu load
8 | """
9 |
10 | from os import path
11 | project_path = '/home/tyrion/lannister/clusterdata-2011-2/python'
12 |
13 | from pandas import read_csv
14 | import os
15 | import matplotlib.pyplot as plt
16 | import pickle
17 |
18 | rootDir = path.join(project_path,'machine_ids')
19 | list_dirs = os.walk(rootDir)
20 | num = 0
21 | #for lists in os.listdir(rootDir):
22 | for root, dirs, files in list_dirs:
23 | for f in files:
24 | num += 1
25 | print num
26 | machine_id = f.split('.')[0]
27 | lists = path.join(root, f)
28 | print lists
29 | single_machine_cpu_usage_df = read_csv(lists,compression='gzip')
30 | f_machine_cpu_usage = []
31 | for i in xrange(8352):
32 | # print i
33 | period_df = single_machine_cpu_usage_df[(single_machine_cpu_usage_df.start_time>=(600+i*300))&
34 | (single_machine_cpu_usage_df.start_time<(900+i*300))]
35 | single_sum = 0
36 | for j in period_df.index:
37 | single_sum += (period_df.loc[j,'end_time'] - period_df.loc[j,'start_time'])*period_df.loc[j,'cpu_usage']/300
38 | f_machine_cpu_usage.append(single_sum)
39 | # plt.figure()
40 | # plt.plot(f_machine_cpu_usage)
41 | with open(path.join(project_path,'data','all',machine_id+'.pkl'),'wb') as f:
42 | pickle.dump(f_machine_cpu_usage, f)
43 |
--------------------------------------------------------------------------------
/Google_cluster_data/single/3_merge.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Mon Mar 21 20:20:30 2016
4 |
5 | @author: tyrion
6 |
7 | merge the separate machine value into a 1024 large matrix
8 | """
9 |
10 | from os import path
11 | import pickle
12 | import os
13 |
14 | project_path = '/home/tyrion/lannister/clusterdata-2011-2/python'
15 | data = []
16 | rootDir = path.join(project_path,'data','all')
17 | list_dirs = os.walk(rootDir)
18 | num = 0
19 | for lists in os.listdir(rootDir):
20 | lists = path.join(rootDir, lists)
21 | num += 1
22 | print num,lists
23 | input_machine = open(lists,'rb')
24 | cpu_load = pickle.load(input_machine)
25 | data.append(cpu_load)
26 | input_machine.close()
27 |
28 | with open(path.join(project_path,'data','tyrion.pkl'),'wb') as f:
29 | pickle.dump(data, f)
--------------------------------------------------------------------------------
/Grid/Load_Data.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/Grid/Load_Data.zip
--------------------------------------------------------------------------------
/Grid/README.md:
--------------------------------------------------------------------------------
1 | # Grid
2 |
3 | > Preprocess the load trace on Unix systems collected by Dinda.
4 |
--------------------------------------------------------------------------------
/Grid/axp7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/Grid/axp7.png
--------------------------------------------------------------------------------
/Grid/axp7_detial.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/Grid/axp7_detial.png
--------------------------------------------------------------------------------
/Grid/draw.py:
--------------------------------------------------------------------------------
1 | import pickle
2 |
3 | with open("./data/axp7.pkl", 'rb') as f:
4 | axp7 = pickle.load(f)
5 |
6 | import matplotlib.pyplot as plt
7 |
8 | #plt.figure()
9 | #plt.plot(axp7)
10 | #plt.xlabel("measurement")
11 | #plt.ylabel("CPU Rate")
12 | #plt.savefig("axp7.png", dpi=300, format='png')
13 | #plt.show()
14 | #
15 | #plt.figure()
16 | #plt.plot(axp7[100:300])
17 | #plt.xlabel("measurement")
18 | #plt.ylabel("CPU Rate")
19 | #plt.savefig("axp7_detial.png", dpi=300, format='png')
20 | #plt.show()
21 |
22 | from numpy import fft
23 | import numpy as np
24 |
25 | def plot_fft(time_domain):
26 | f = fft.fft(time_domain)
27 | g = np.abs(f)
28 | print g
29 | plt.plot(g)
30 | plt.xlim([0, 50])
31 | # plt.xticks([d*2 for d in range(25)])
32 | # plt.ylim([0, 200])
33 | # plt.grid(True)
34 | plt.show()
35 | return g
36 |
37 | g = plot_fft(axp7)
--------------------------------------------------------------------------------
/Grid/preprocess.py:
--------------------------------------------------------------------------------
1 | load = []
2 | with open("./Load_Data/themis.nectar.cs.cmu.edu_Aug_97.asciitrace", 'rb') as f:
3 | for line in f:
4 | load.append(float(line.split()[1]))
5 | #print load
6 |
7 | max_load = max(load)
8 | min_load = min(load)
9 | regulized_load = []
10 | for i in load:
11 | regulized_value = 0.1 +(i - min_load)*(0.9-0.1)/(max_load-min_load)
12 | regulized_load.append(regulized_value)
13 |
14 | import matplotlib.pyplot as plt
15 | plt.plot(regulized_load)
16 | plt.figure()
17 | plt.plot(regulized_load[:200])
18 |
19 | import pickle
20 | with open("./data/themis.pkl", 'wb') as f:
21 | pickle.dump(regulized_load, f)
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Host-Load-Prediction-with-LSTM
2 |
3 | > host load prediction with Long Short-Term Memory in cloud computing
4 |
5 |
6 |
7 | - [Google cluster data](./Google_cluster_data) Preprocess the Google cluster data
8 | - [Grid](./Grid) Preprocess the Grid dataset
9 | - [autoencoder](./autoencoder) Apply the autoencoder to the host load data
10 | - [tensorflow](./tensorflow) The main tensorflow code of realizing the project
11 | - [draw](./draw) Drawing some comparing figures of the results
12 |
13 |
14 |
--------------------------------------------------------------------------------
/autoencoder/1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/autoencoder/1.png
--------------------------------------------------------------------------------
/autoencoder/2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/autoencoder/2.png
--------------------------------------------------------------------------------
/autoencoder/README.md:
--------------------------------------------------------------------------------
1 | # Autoencoder
2 |
3 | > Apply autoencoder to extract main features from original data
4 |
5 | - [SparseAutoencoder.py](./SparseAutoencoder.py) The SparseAutoencoder implemented with tensorflow
6 | - [draw.py](./draw.py) Comparing original host load with reconstructed load, as shown in [1.png](./1.png) and [2.png](./2.png).
7 |
--------------------------------------------------------------------------------
/autoencoder/SparseAutoencoder.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Tue Apr 26 16:58:49 2016
4 |
5 | @author: tyrion
6 | """
7 |
8 | import numpy as np
9 | import tensorflow as tf
10 | import pickle
11 |
12 | from utils import read_data
13 |
14 | N_HIDDEN = 200
15 | N_INPUT = N_OUTPUT = 64
16 | AHEAD_STEP = 64
17 | BETA = tf.constant(3.)
18 | LAMBDA = tf.constant(.001)
19 | RHO = tf.constant(0.1)
20 |
21 |
22 | def train():
23 | data_path = '/home/tyrion/lannister/1024/tyrion.pkl'
24 | # data_path = '/home/tyrion/Documents/Cloud Computing/python/data/72/tyrion.pkl'
25 | training_data, _, _, _, load_mean, load_std = read_data(data_path, N_INPUT, N_OUTPUT, AHEAD_STEP)
26 | NUMSAMPLES = 288 * 26 / N_INPUT
27 | print(training_data.shape)
28 |
29 | sess = tf.Session()
30 |
31 | # def variable_summaries(var, name):
32 | # with tf.name_scope('summaries'):
33 | # mean = tf.reduce_mean(var)
34 | # tf.scalar_summary('mean/' + name, mean)
35 | # with tf.name_scope('stddev'):
36 | # stddev = tf.sqrt(tf.reduce_sum(tf.square(var - mean)))
37 | # tf.scalar_summary('stddev/' + name, stddev)
38 | # tf.scalar_summary('max/' + name, tf.reduce_max(var))
39 | # tf.scalar_summary('min/' + name, tf.reduce_min(var))
40 | # tf.histogram_summary(name, var)
41 |
42 | # Input placeholders
43 | with tf.name_scope('input'):
44 | # Construct the tensor flow model
45 | x = tf.placeholder("float", [None, N_INPUT], name='x-input')
46 |
47 | def autoencoder(X, weights, biases):
48 | with tf.name_scope('hidden_layer'):
49 | hiddenlayer = tf.sigmoid(tf.add(tf.matmul(X, weights['hidden']), biases['hidden']))
50 | with tf.name_scope('output_layer'):
51 | out = tf.add(tf.matmul(hiddenlayer, weights['out']), biases['out'])
52 | return {'out': out, 'hidden': hiddenlayer}
53 |
54 | weights = {'hidden': tf.get_variable("wei_hid", shape=[N_INPUT, N_HIDDEN], dtype=tf.float32,
55 | initializer=tf.random_uniform_initializer(-tf.sqrt(1./N_INPUT),tf.sqrt(1./N_INPUT))),
56 | 'out': tf.get_variable("wei_out", shape=[N_HIDDEN, N_OUTPUT], dtype=tf.float32,
57 | initializer=tf.random_uniform_initializer(-tf.sqrt(1./N_HIDDEN),tf.sqrt(1./N_HIDDEN)))}
58 | # variable_summaries(weights['hidden'], 'hidden_layer' + '/weights')
59 | # variable_summaries(weights['out'], 'output_layer' + '/weights')
60 |
61 | biases = {'hidden': tf.get_variable("bia_hid", shape=[N_HIDDEN], dtype=tf.float32,
62 | initializer=tf.constant_initializer(0)),
63 | 'out': tf.get_variable("bia_out", shape=[N_OUTPUT], dtype=tf.float32,
64 | initializer=tf.constant_initializer(0))}
65 | # variable_summaries(biases['hidden'], 'hidden_layer' + '/biases')
66 | # variable_summaries(biases['out'], 'output_layer' + '/biases')
67 | #
68 | pred = autoencoder(x, weights, biases)
69 | rho_hat = tf.div(tf.reduce_sum(pred['hidden'], 0), tf.constant(float(NUMSAMPLES)))
70 |
71 | def logfunc(x, x2):
72 | return tf.mul(x, tf.log(tf.div(x, x2)))
73 |
74 | # Construct cost
75 | def KL_Div(rho, rho_hat):
76 | invrho = tf.sub(tf.constant(1.), rho)
77 | invrhohat = tf.sub(tf.constant(1.), rho_hat)
78 | logrho = tf.add(logfunc(rho, rho_hat), logfunc(invrho, invrhohat))
79 | return logrho
80 |
81 | diff = tf.sub(pred['out'], x)
82 |
83 | with tf.name_scope('loss'):
84 | cost_J = tf.div(tf.nn.l2_loss(diff), tf.constant(float(NUMSAMPLES)))
85 | tf.scalar_summary('loss', cost_J)
86 |
87 | with tf.name_scope('cost_sparse'):
88 | cost_sparse = tf.mul(BETA, tf.reduce_sum(KL_Div(RHO, rho_hat)))
89 | tf.scalar_summary('cost_sparse', cost_sparse)
90 |
91 | with tf.name_scope('cost_reg'):
92 | cost_reg = tf.mul(LAMBDA, tf.add(tf.nn.l2_loss(weights['hidden']),
93 | tf.nn.l2_loss(weights['out'])))
94 | tf.scalar_summary('cost_reg', cost_reg)
95 |
96 | with tf.name_scope('cost'):
97 | cost = tf.add(tf.add(cost_J, cost_reg), cost_sparse)
98 | tf.scalar_summary('cost', cost)
99 |
100 | # optimizer = tf.train.AdamOptimizer(0.01).minimize(cost)
101 |
102 | tvars = tf.trainable_variables()
103 | grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), 10)
104 |
105 | lr = tf.placeholder("float", name='learning_rate')
106 | optimizer = tf.train.AdamOptimizer(lr)
107 | apply_optimizer = optimizer.apply_gradients(zip(grads, tvars))
108 |
109 | # merged = tf.merge_all_summaries()
110 | # writer = tf.train.SummaryWriter('./sae_logs', sess.graph)
111 | # Initilizing the variables
112 | init = tf.initialize_all_variables()
113 |
114 | # Lauch the graph
115 | sess.run(init)
116 |
117 | # # Add ops to save and restore all the variables.
118 | # saver = tf.train.Saver({"model/RNN/ae_weights": weights['hidden'],
119 | # "model/RNN/ae_biases": biases['hidden']})
120 |
121 | # Training cycle
122 | load_num = training_data.shape[0]
123 | for i in xrange(100):
124 | Cost = Cost_j = Cost_reg = Cost_sparse = 0.0
125 | for j in xrange(load_num):
126 | # summary, _ = sess.run([merged, optimizer], feed_dict={x: training_data[j]})
127 | # sess.run([apply_optimizer], feed_dict={x: training_data[j]})
128 | if i < 50:
129 | lr_assign = 0.01
130 | else:
131 | lr_assign = 0.001
132 | c,j,reg,sparse,_ = sess.run([cost,cost_J,cost_reg,cost_sparse,apply_optimizer],
133 | feed_dict={x: training_data[j], lr: lr_assign})
134 | Cost += c / load_num
135 | Cost_j += j / load_num
136 | Cost_reg += reg / load_num
137 | Cost_sparse += sparse / load_num
138 | # a = training_data.shape[0]
139 | # print(a)
140 | # Cost /= a
141 | # Cost_j /= a
142 | # Cost_reg /= a
143 | # Cost_sparse /= a
144 | print("Epoch %d: Cost = %f, Loss = %f, Reg = %f, Sparsity = %f"
145 | % (i, Cost, Cost_j, Cost_reg, Cost_sparse))
146 | # writer.add_summary(summary, i)
147 | # if i == 199:
148 | # a = sess.run(pred['hidden'], feed_dict={x: training_data[0]})
149 | # print(a[0])
150 | print("Optimization Finished!")
151 |
152 | mach1 = 12
153 | mach2 = 24
154 | time1 = 10
155 | time2 = 20
156 | a = np.asarray(sess.run(pred['out'], feed_dict={x: training_data[mach1]}), dtype=np.float32)
157 | print(a[time1])
158 | print(training_data[mach1][time1])
159 | def save_pkl(data, path):
160 | output = open(path,'wb')
161 | pickle.dump(data, output)
162 | output.close()
163 | save_pkl(training_data[mach1][time1]*load_std+load_mean, "./compare/1.pkl")
164 | save_pkl(a[time1]*load_std+load_mean, "./compare/2.pkl")
165 |
166 | b = np.asarray(sess.run(pred['out'], feed_dict={x: training_data[mach2]}), dtype=np.float32)
167 | save_pkl(training_data[mach2][time2]*load_std+load_mean, "./compare/3.pkl")
168 | save_pkl(b[time2]*load_std+load_mean, "./compare/4.pkl")
169 |
170 | ae_weights = np.asarray(sess.run(weights['hidden']), dtype=np.float32)
171 | ae_biases = np.asarray(sess.run(biases['hidden']), dtype=np.float32)
172 | save_pkl(ae_weights, "./compare/weights.pkl")
173 | save_pkl(ae_biases, "./compare/biases.pkl")
174 |
175 | def main(_):
176 | train()
177 |
178 | if __name__ == '__main__':
179 | tf.app.run()
180 |
--------------------------------------------------------------------------------
/autoencoder/compare/1.pkl:
--------------------------------------------------------------------------------
1 | cnumpy.core.multiarray
2 | _reconstruct
3 | p0
4 | (cnumpy
5 | ndarray
6 | p1
7 | (I0
8 | tp2
9 | S'b'
10 | p3
11 | tp4
12 | Rp5
13 | (I1
14 | (I64
15 | tp6
16 | cnumpy
17 | dtype
18 | p7
19 | (S'f4'
20 | p8
21 | I0
22 | I1
23 | tp9
24 | Rp10
25 | (I3
26 | S'<'
27 | p11
28 | NNNI-1
29 | I-1
30 | I0
31 | tp12
32 | bI00
33 | S',\x04\xb9=`\xe09=\xaa+\xdf=<\x0f\xee=@\x13\xe1=\xb2\x80\xc9=\x04%L=V\xd4\xa0=\\_\xa4=\xf2\x8c\xb5=R(\xd2=\xb5\xfc\x08>\x7f\xc0\t>\x1c\xab\x03><\xc4\xa4=\xe2\xa3\xa3=p\x0b\xa2=\xba\x04\xac=\xeb\x02\xea=F\xa1\xb1=\xcc\xd4\xb0=\xd4\xb1\xbd=\x06a\xa5=&\xc0\xbc=V\xfd\xaf=xV\xb0=\xbe\x02\xb6=r\xae\xc6=\xf6\x12\xc8=\xac\xa2\xb2=\xaar\xa6=\x1e#\xa5=Tc\xa7=&\x81\xab=\x1c\xd1\xc9=*\xf8\xb9=\xa0\xc9\xba=\xc6?\xbd=t\xdeS=\x1c|\xc8=\xcc\xb3K=N\x7f\xdd=\xee:\xc3=\x18w<=\xec\xd1?=\\[[=I(\xcf=\xf9\x9f\xce=\xf1\x8c\xcc=i\xdd\xd8=\xf6\x17\xd0=\x8a\x05\xbb=r\x89\xb5=\x1bj\xd0=\x08\xf8\xc7=\x17L\xce=\xd8F\xce=\r\xb9\xd2=\x91\xbd\xf3=\x90x\xdd=\xf0uv=\x00F\xd4=\xd0w[=\xe0y\xcd='
34 | p13
35 | tp14
36 | b.
--------------------------------------------------------------------------------
/autoencoder/compare/2.pkl:
--------------------------------------------------------------------------------
1 | cnumpy.core.multiarray
2 | _reconstruct
3 | p0
4 | (cnumpy
5 | ndarray
6 | p1
7 | (I0
8 | tp2
9 | S'b'
10 | p3
11 | tp4
12 | Rp5
13 | (I1
14 | (I64
15 | tp6
16 | cnumpy
17 | dtype
18 | p7
19 | (S'f4'
20 | p8
21 | I0
22 | I1
23 | tp9
24 | Rp10
25 | (I3
26 | S'<'
27 | p11
28 | NNNI-1
29 | I-1
30 | I0
31 | tp12
32 | bI00
33 | S'^C\xb9=\xf0\xfdw=\xdb\xb7\xe8=L\xff\xfb=\x9eK\xea=Sx\xd1=\xc8\xbbw=\xfc\xd2\xae=\x802\xb5=\n\xe5\xc6=\xab\xc8\xe7=F?\x0c>J,\r>\xeb\xb7\x07>8\xfc\xb3=RE\xad=\xb4F\xaa=\xecx\xb7=p\xa7\xe7=\xb6P\xc4=PF\xb7=\xba\xb8\xca=\x1cN\xb7=hp\xcb=D]\xc2=\xa8\xb6\xbc=\xb2\xe2\xbe=Xy\xcb=\xd0%\xd1=\xe4\xe2\xc1=\xf6\xb9\xad=\x10\xb6\xad=\x04\xd8\xa8=\xb0x\xb3=\xbcn\xd5=\xde`\xc3=\xda\x02\xc1="S\xcb=H^\x1c>[\xf2C>\xdc,\x07>\xb71\x88>\xd0\xf7\x95>\x19\xaf\x8c>\xf4\'\xfb=\xe4e\r>\x00V"><\xe7\x1d>\x11\x01\xe0=\x1a^\xff=r\xd7\x07>\xc9\x91\xf9=\x88\xcb\xeb=\xbeT\xdf=\xe2\x19\x03>\x94\x9c\x01>\xb0\x7f\xf0=\xe5F\xf2=\xb05\'>\x9e\xc4\xe5=\x808\xeb=4\xe4\xfe=<6 >\x06\x16\x89>\xa3\xaa\x8a>\x8c\xddF>\x8c\xdb,>\xce\x9e1>\xbd\x85 >\x07)\xe3=:A\x0b>\xda|\n>\x17Q\xea=\xf3\xee\xe4=x\\\x15>\x1dEg>\xe896>\\\xa9\x06>\xf2<*>J\xfa#>\tJ\xd7=\xcc\x9a\xee=\x8a\x90\x11>]\xba?>\xf8\x96\x11>3>\xf8=Rj\x0c>\xa46\t>\xa46\xd5=\x88L\x05>9\xdc\xf6=\xad\xf8\xfd=i\xdb\xe3=\xec\x9a\x02>5u\xf0=\xfa\xe0\xe8=\xea\xa4\xd1=\xc9\xbe\xf4='
34 | p13
35 | tp14
36 | b.
--------------------------------------------------------------------------------
/autoencoder/compare/4.pkl:
--------------------------------------------------------------------------------
1 | cnumpy.core.multiarray
2 | _reconstruct
3 | p0
4 | (cnumpy
5 | ndarray
6 | p1
7 | (I0
8 | tp2
9 | S'b'
10 | p3
11 | tp4
12 | Rp5
13 | (I1
14 | (I64
15 | tp6
16 | cnumpy
17 | dtype
18 | p7
19 | (S'f4'
20 | p8
21 | I0
22 | I1
23 | tp9
24 | Rp10
25 | (I3
26 | S'<'
27 | p11
28 | NNNI-1
29 | I-1
30 | I0
31 | tp12
32 | bI00
33 | S'-\x83\xdd=\xe2/\x06>:\xfb\x05>\xca\xe1\r>\x06E#>m\x80B>^\xf9\x14>\x86\xa7\x87>\xc6\x9d\x95>P\xaf\x8a>l\x8a\x12>\xcf\xb8\x15>j\xe3&>>\x8b\x1f>\x19\xa5\xf7=RW\x05>Z\xe5\x0b>\xac\xe9\x03>J\xcb\xfc=\xdb\x01\xf5=\xf1\xd0\t>Z)\t>6Z\x00>fn\x04>\\G*>\xb0\xa5\xfe=\xa7\xb4\xf8=\xc4\x81\x07>k\xb2*>\x06y\x88>"h\x89>\x9c\x0cP>\xf5F0>\xa6\x815>\xa7"(>.\x19\x00>>U\x10> \xfd\x13>zc\x02>z\xe0\xfe=\x8c^ >\xfbwi>\x94\xdf=>\xae\xd5\x11>\xb4\xd9)>\xbb\xf1,>\xec\x11\xf6=}\xeb\x01>J\xce\x1b>\x06\xfcA>\xbb\xf1\x1b>\x9a\xab\x05>\xfb@\x12>\x84t\x11>J\x16\xfa=:\xee\x10>\x97\xe1\x08>\x10\x1b\t>\xf1\xf6\x00>n\xdc\x0c>\x84\xa2\x04>(7\x01>\xfb+\xf6=\xfcN\x05>'
34 | p13
35 | tp14
36 | b.
--------------------------------------------------------------------------------
/autoencoder/compare/biases.pkl:
--------------------------------------------------------------------------------
1 | cnumpy.core.multiarray
2 | _reconstruct
3 | p0
4 | (cnumpy
5 | ndarray
6 | p1
7 | (I0
8 | tp2
9 | S'b'
10 | p3
11 | tp4
12 | Rp5
13 | (I1
14 | (I200
15 | tp6
16 | cnumpy
17 | dtype
18 | p7
19 | (S'f4'
20 | p8
21 | I0
22 | I1
23 | tp9
24 | Rp10
25 | (I3
26 | S'<'
27 | p11
28 | NNNI-1
29 | I-1
30 | I0
31 | tp12
32 | bI00
33 | S'\x99\xb9\x14\xc0\x97\x86\x14\xc0\xd3A\x15\xc0\x9cg\x15\xc0S\xa9\x15\xc0f\x94\x14\xc02\xda\x14\xc02,\x13\xc0\xad$\x16\xc0\xb0o\x15\xc0\xa5\x9a\x15\xc0\xd7X\x15\xc0n5\x12\xc0\xb1\x00\x14\xc0\x01\x8e\x15\xc0h/\x15\xc0\x1f\x9f\x16\xc0q?\x17\xc0O4\x15\xc0nk\x14\xc0h\xed\x16\xc0 l\x14\xc0\xb3\xb2\x15\xc0\xcb\x15\x13\xc0c~\x19\xc0ED\x16\xc0^\xb1\x16\xc0\xe2H\x15\xc0\x8eB\x15\xc0\xc2\xc5\x14\xc0}\xe3\x14\xc0Q\x81\x18\xc08`\x16\xc04\xb4\x15\xc0\xa1\xf5\x14\xc0\xc8\xca\x15\xc0\xcd\x16\x14\xc0\xa8#\x15\xc0A6\x17\xc0p\xe3\x12\xc0\xa4?\x15\xc0\xcd\xb9\x15\xc0\x16X\x19\xc0\xbd\x9e\x16\xc0\xf2C\x15\xc0\rU\x15\xc0\xbf\xf1\x14\xc0u\xfd\x17\xc0R\xe1\x14\xc0O\xf7\x14\xc0\x92\xcd\x14\xc0\xa9\xa0\x14\xc09\xb6\x17\xc0\xfe\x0f\x15\xc0\x1bO\x16\xc0)\xac\x13\xc0\xaf\xe7\x12\xc0\xceL\x17\xc0\xa3\xd6\x15\xc0O\xf1\x12\xc0\xaa2\x12\xc0!\xad\x16\xc0\x7f\xf8\x15\xc0\xac\xce\x16\xc0\n_\x15\xc0\x0f\x98\x15\xc0\xee+\x15\xc0\xc3\xe2\x16\xc0]\xc3\x14\xc0v7\x0e\xc0\xec\xd7\x14\xc0A\x1f\x18\xc0\xb7L\x15\xc0\xba3\x16\xc0d\xba\x16\xc0\x12\x05\x13\xc0\xa7\xad\x14\xc0\xbb\x18\x16\xc0D\x7f\x14\xc0+\xbb\x14\xc0\x94\n\x16\xc0\x1a\n\x16\xc0\xc3o\x14\xc0\x86\xc3\x15\xc0\xbd\x7f\x14\xc0\xad\xe4\x14\xc0G\xbe\x12\xc0um\x16\xc0\x97\xc6\x14\xc0m\xe2\x15\xc0f\xe6\x14\xc0\x14\xad\x15\xc0\xda)\x13\xc0\x8cs\x16\xc0\x90\xbd\x15\xc0\xa8\r\r\xc0J\xf9\x18\xc0|\xa1\x13\xc0\xca\xab\x13\xc0\xb3\x1a\x16\xc0\x91h\x17\xc0\x11\xaf\x12\xc0,6\x15\xc0\x1e\xde\x0c\xc0\xac\xd3\x17\xc0\xbfz\x18\xc0\xb3\xf3\x14\xc0\xcb\xbe\x12\xc0\xe5\x1e\x14\xc0\x93\xc4\x18\xc0\x8a\xb6\x16\xc0Sa\x14\xc0\x87\xb0\x14\xc0n\xc2\x16\xc0y\xd4\x17\xc0A\\\x14\xc0\xee\xbf\x16\xc0W\xac\x13\xc0\xd6B\x16\xc0\x11V\x15\xc0t\x06\x15\xc0\x12\x91\x15\xc0\x84\xa4\x14\xc0\x89\x02\x14\xc0\xdb\x06\x15\xc0/\xa7\x13\xc0^\x0e\x16\xc0\xe0\x04\x15\xc0\x10\x03\x14\xc0\xa2\xce\x13\xc0\xfe2\x16\xc0\x14\xd0\x15\xc0?\xee\x17\xc0\xb4\xb8\x16\xc0\x13"\x15\xc0"\xd3\x14\xc07\x1b\x15\xc0\xfa-\x13\xc0N\xa2\x12\xc0\xcc\x88\x16\xc0\x17\xde\x13\xc0\xde~\x16\xc0\xa7\xbe\x13\xc0\xcb\n\x15\xc0\xac\xfc\x14\xc0\t\xf0\x14\xc0)\xbc\x15\xc0\xec\x14\x16\xc0\'G\x16\xc0\xd4\t\x16\xc0o\x9c\x14\xc0@E\x14\xc0EG\x14\xc0\xfa\x95\x16\xc0\x08\xfc\x14\xc0\x18X\x17\xc0\xfb`\x19\xc0\xdc\xe7\x15\xc0\x9e*\x17\xc0\xa5\x9e\x16\xc0\xc7;\x16\xc0\x0eh\x15\xc0=2\x16\xc0\xd1\x8c\x16\xc0\xcd\xec\x12\xc0\xa6\xc7\x16\xc08s\x13\xc0\x0bP\x17\xc0F\xd4\x14\xc0e.\x17\xc0\xc5\x84\x14\xc0\xf2\x06\x16\xc0\xe0_\x13\xc0rq\x13\xc0\'\xc2\x17\xc0\xd20\x17\xc0tf\x14\xc0\x06\x95\x16\xc0>\xc6\x13\xc0\x9d\x89\x16\xc0SS\x16\xc0\x86\x82\x15\xc0\xd8)\xfb\xbf\xcb\x0e\x14\xc0\xb9\xb7\x15\xc0\x99\x08\x15\xc0\x8e\xbb\x15\xc0T5\x15\xc0\x18\xbd\x14\xc07\x8d\x17\xc0\xd6\xff\x16\xc0\xfc\xdf\x15\xc0\t\xd2\x13\xc0\xc1\x02\x15\xc0\xfc\x0b\x15\xc0\x92\x98\x14\xc0\xb3\x02\x17\xc0`\x15\x15\xc0\x8fy\x12\xc0\r\xb3\x15\xc0'
34 | p13
35 | tp14
36 | b.
--------------------------------------------------------------------------------
/autoencoder/draw.py:
--------------------------------------------------------------------------------
1 | import pickle
2 |
3 | def read_pkl(data_path):
4 | input = open(data_path,'rb')
5 | a = pickle.load(input)
6 | input.close()
7 | return a
8 |
9 | import matplotlib.pyplot as plt
10 |
11 | #original = read_pkl("./compare/1.pkl")
12 | #processed = read_pkl("./compare/2.pkl")
13 | #
14 | #plt.figure()
15 | #plt.plot(original)
16 | #plt.savefig("original_1.png", dpi=150, format='png')
17 | #plt.show()
18 | #
19 | #plt.figure()
20 | #plt.plot(processed)
21 | #plt.savefig("autoencoder_1.png", dpi=150, format='png')
22 | #plt.show()
23 | #
24 | #original_2 = read_pkl("./compare/3.pkl")
25 | #processed_2 = read_pkl("./compare/4.pkl")
26 | #
27 | #plt.figure()
28 | #plt.plot(original_2)
29 | #plt.savefig("original_2.png", dpi=150, format='png')
30 | #plt.show()
31 | #
32 | #plt.figure()
33 | #plt.plot(processed_2)
34 | #plt.savefig("autoencoder_2.png", dpi=150, format='png')
35 | #plt.show()
36 |
37 | original = read_pkl("./compare/1.pkl")
38 | processed = read_pkl("./compare/2.pkl")
39 |
40 | plt.figure()
41 | plt.plot(original, 'b', label="original")
42 | plt.plot(processed, 'r', label="reconstruct")
43 | plt.legend()
44 | plt.savefig("1.png", dpi=150, format='png')
45 |
46 | original_2 = read_pkl("./compare/3.pkl")
47 | processed_2 = read_pkl("./compare/4.pkl")
48 |
49 | plt.figure()
50 | plt.plot(original_2, 'b', label="original")
51 | plt.plot(processed_2, 'r', label="reconstruct")
52 | plt.legend()
53 | plt.savefig("2.png", dpi=150, format='png')
--------------------------------------------------------------------------------
/autoencoder/utils.py:
--------------------------------------------------------------------------------
1 | import pickle
2 | import numpy as np
3 |
4 | def zero_center(cpu_load):
5 | cpu_load = np.asarray(cpu_load)
6 | cpu_load_mean = np.mean(cpu_load[:,:24*12*26])
7 | cpu_load_std = np.std(cpu_load[:,:24*12*26])
8 | cpu_load -= cpu_load_mean
9 | cpu_load /= cpu_load_std
10 | return (cpu_load, cpu_load_mean, cpu_load_std)
11 |
12 | def contextwin(cpu_load, win_i, win_o, ahead_step):
13 | m, cpu_load_mean, cpu_load_std = zero_center(cpu_load)
14 | a = 26
15 | b = 3
16 | train_len = a * 288 / ahead_step
17 | test_len = (b-1) * 288 / ahead_step + (288 - win_o - win_i) / ahead_step + 1
18 | train_start = win_i
19 | test_start = a*288 + win_i
20 |
21 | train_x = np.asarray([[m[i][train_start+j*ahead_step-win_i:train_start+j*ahead_step]
22 | for j in range(train_len)] for i in range(len(m))],dtype=np.float32)
23 | train_y = np.asarray([[m[i][train_start+j*ahead_step:train_start+j*ahead_step+win_o]
24 | for j in range(train_len)] for i in range(len(m))],dtype=np.float32)
25 | test_x = np.asarray([[m[i][test_start+j*ahead_step-win_i:test_start+j*ahead_step]
26 | for j in range(test_len)] for i in range(len(m))],dtype=np.float32)
27 | test_y = np.asarray([[m[i][test_start+j*ahead_step:test_start+j*ahead_step+win_o]
28 | for j in range(test_len)] for i in range(len(m))],dtype=np.float32)
29 |
30 | return (train_x, train_y, test_x, test_y, cpu_load_mean, cpu_load_std)
31 |
32 | def read_data(_data_path, win_i, win_o, ahead_step):
33 | data_path = _data_path
34 | print("Reading pkl data...")
35 | input_machine = open(data_path,'rb')
36 | cpu_load = pickle.load(input_machine)
37 | input_machine.close()
38 | print("Loading data...")
39 | X_train, y_train, X_test, y_test, cpu_load_mean, cpu_load_std = contextwin(cpu_load, win_i, win_o, ahead_step)
40 |
41 | print(X_train.shape, y_train.shape)
42 | print(X_test.shape, y_test.shape)
43 | print(cpu_load_mean)
44 | print(cpu_load_std)
45 |
46 | return (X_train, y_train, X_test, y_test, cpu_load_mean, cpu_load_std)
47 |
--------------------------------------------------------------------------------
/autoencoder/utils.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/autoencoder/utils.pyc
--------------------------------------------------------------------------------
/draw/CDF/CDF_of_MSE_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/draw/CDF/CDF_of_MSE_1.png
--------------------------------------------------------------------------------
/draw/CDF/CDF_of_MSE_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/draw/CDF/CDF_of_MSE_2.png
--------------------------------------------------------------------------------
/draw/CDF/CDF_of_MSE_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/draw/CDF/CDF_of_MSE_3.png
--------------------------------------------------------------------------------
/draw/CDF/CDF_of_MSE_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/draw/CDF/CDF_of_MSE_4.png
--------------------------------------------------------------------------------
/draw/CDF/draw_cdf.py:
--------------------------------------------------------------------------------
1 | import pickle
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 |
5 | with open("./data/GMDH.pkl") as input_file:
6 | GMDH = pickle.load(input_file)
7 | with open("./data/ANN.pkl") as input_file:
8 | ANN = pickle.load(input_file)
9 | with open("./data/AR.pkl") as input_file:
10 | AR = pickle.load(input_file)
11 | with open("./data/LSTM.pkl") as input_file:
12 | LSTM = pickle.load(input_file)
13 | with open("./data/ESN.pkl") as input_file:
14 | ESN = pickle.load(input_file)
15 |
16 | for i in range(4):
17 | lstm_sorted_mse = np.sort(LSTM[i])
18 | lstm_yvals = np.arange(len(lstm_sorted_mse))/float(len(lstm_sorted_mse))
19 | esn_sorted_mse = np.sort(ESN[i])
20 | esn_yvals = np.arange(len(esn_sorted_mse))/float(len(esn_sorted_mse))
21 | plt.figure()
22 | plt.xlabel("MSE Of Prediction")
23 | plt.ylabel("CDF")
24 | plt.xlim([0,0.025])
25 | plt.ylim([0,1])
26 | predict_length = float(i+1)*0.5
27 | plt.title("T="+str(predict_length)+"h")
28 | plt.plot(lstm_sorted_mse, lstm_yvals, 'y-', label="Our Method")
29 | plt.plot(esn_sorted_mse, esn_yvals, 'c--', label="ESN")
30 | # plt.plot(rnn_sorted_mse, rnn_yvals, 'k', label="SRN")
31 | plt.plot(GMDH[i*2], GMDH[i*2+1], 'r-.', label="PSR+EA-GMDH")
32 | plt.plot(ANN[i*2], ANN[i*2+1], 'g:', label="ANN")
33 | plt.plot(AR[i*2], AR[i*2+1], 'b-', label="AR")
34 | plt.legend(loc=4)
35 | plt.savefig("CDF_of_MSE_"+str(i+1)+".png", dpi=300, format='png')
36 |
--------------------------------------------------------------------------------
/draw/README.md:
--------------------------------------------------------------------------------
1 | # Drawing Pictures
2 |
3 | > drawing some comparing pictures of results for better understanding
4 |
5 | - [CDF](./CDF) The cumulative distribution function (CDF) of MSE among different methods.
6 | - [regular](./regular) Some regular metrics compared between these methods.
7 | - [results](./results) The actual predicted values predicted by ESN and LSTM in both datasets.
8 | - [picture](./picture) The RNN architecture and LSTM model picture.
9 |
--------------------------------------------------------------------------------
/draw/picture/lstm.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/draw/picture/lstm.png
--------------------------------------------------------------------------------
/draw/picture/lstm.vsdx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/draw/picture/lstm.vsdx
--------------------------------------------------------------------------------
/draw/picture/rnn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/draw/picture/rnn.png
--------------------------------------------------------------------------------
/draw/picture/rnn.vsdx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/draw/picture/rnn.vsdx
--------------------------------------------------------------------------------
/draw/regular/comparison_mse.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/draw/regular/comparison_mse.png
--------------------------------------------------------------------------------
/draw/regular/draw_grid.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 |
3 | LSTM = [.000166, .000234, .000304, .000353, .000397, .000439]
4 | ESN = [.000197, .000291, .000395, .000481, .000572, .000659]
5 |
6 | plt.figure()
7 | plt.plot(ESN, 'rs--', label="ESN")
8 | plt.plot(LSTM, 'bo-', label="Our Method")
9 |
10 | plt.xlabel("Prediction Length")
11 | plt.ylabel("MSE")
12 | plt.xlim([-0.5, 5.5])
13 | plt.ylim([.00013, .00070])
14 | plt.xticks([d for d in range(6)],['%i '% ((d+1)*6) for d in range(6)])
15 | plt.grid(True)
16 | plt.legend(loc=4)
17 | plt.savefig("mse_axp0.png", dpi=300, format='png')
18 | plt.show()
19 |
20 |
21 |
22 | LSTM = [.000331, .000410, .000413, .000458, .000529, .000560]
23 | ESN = [.000474 , .000561, .000559, .000640, .000682, .000684]
24 |
25 | plt.figure()
26 | plt.plot(ESN, 'rs--', label="ESN")
27 | plt.plot(LSTM, 'bo-', label="Our Method")
28 |
29 | plt.xlabel("Prediction Length")
30 | plt.ylabel("MSE")
31 | plt.xlim([-0.5, 5.5])
32 | plt.ylim([.00030, .00070])
33 | plt.xticks([d for d in range(6)],['%i '% ((d+1)*6) for d in range(6)])
34 | plt.grid(True)
35 | plt.legend(loc=4)
36 | plt.savefig("mse_axp7.png", dpi=300, format='png')
37 | plt.show()
38 |
39 |
40 |
41 | LSTM = [.000581, .000750, .000861, .000976, .001057, .001164]
42 | ESN = [.000780 , .000935 , .001014 , .001129 , .001245 , .001319]
43 |
44 | plt.figure()
45 | plt.plot(ESN, 'rs--', label="ESN")
46 | plt.plot(LSTM, 'bo-', label="Our Method")
47 |
48 | plt.xlabel("Prediction Length")
49 | plt.ylabel("MSE")
50 | plt.xlim([-0.5, 5.5])
51 | plt.ylim([.00055, .00135])
52 | plt.xticks([d for d in range(6)],['%i '% ((d+1)*6) for d in range(6)])
53 | plt.grid(True)
54 | plt.legend(loc=4)
55 | plt.savefig("mse_sahara.png", dpi=300, format='png')
56 | plt.show()
57 |
58 |
59 |
60 | LSTM_1 = [.000530, .000697, .000851, .000971, .001058, .001137]
61 | LSTM_0 = [.000618, .000807, .001017, .001174, .001375, .001362]
62 | ESN = [.000789, .000874, .001117, .001381, .001557, .001663]
63 |
64 | plt.figure()
65 | plt.plot(ESN, 'rs--', label="ESN")
66 | plt.plot(LSTM_0, 'gv-.', label="Our method before training")
67 | plt.plot(LSTM_1, 'bo-', label="Our method after training")
68 |
69 | plt.xlabel("Prediction Length")
70 | plt.ylabel("MSE")
71 | plt.xlim([-0.5, 5.5])
72 | plt.ylim([.00050, .0017])
73 | plt.xticks([d for d in range(6)],['%i '% ((d+1)*6) for d in range(6)])
74 | plt.grid(True)
75 | plt.legend(loc=4)
76 | plt.savefig("mse_themis.png", dpi=300, format='png')
77 | plt.show()
78 |
--------------------------------------------------------------------------------
/draw/regular/draw_mse.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 |
3 | LSTM = [.003547, .004234, .004773, .005076, .005394, .005665]
4 | ESN = [.003804, .004710, .004991, .005323, .005645, .005957]
5 |
6 | plt.plot(ESN, 'rs--', label="ESN")
7 | plt.plot(LSTM, 'bo-', label="Our Method")
8 |
9 | plt.xlabel("Prediction Length")
10 | plt.ylabel("MSE")
11 | plt.xlim([-0.5, 5.5])
12 | plt.ylim([.0033, .0062])
13 | plt.xticks([d for d in range(6)],['%i '% ((d+1)*6) for d in range(6)])
14 | plt.grid(True)
15 | plt.legend(loc=4)
16 | plt.savefig("comparison_mse.png", dpi=300, format='png')
17 | plt.show()
18 |
--------------------------------------------------------------------------------
/draw/regular/draw_msse.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import matplotlib.pyplot as plt
3 |
4 |
5 | n_groups = 3
6 |
7 | Bayes = [0.0160, 0.0157, 0.0153]
8 | GMDH = [0.0037, 0.0047, 0.0054]
9 | LSTM = (0.001983, 0.002453, 0.002613)
10 | ESN = (.002047, .002521, .002722)
11 |
12 | for i in range(3):
13 | Bayes[i] /= 3.2
14 | GMDH[i] /= 1.3
15 |
16 | index = np.arange(n_groups)
17 | bar_width = 0.18
18 |
19 | opacity = 0.5
20 |
21 | rects1 = plt.bar(index, Bayes, bar_width,
22 | alpha=opacity,
23 | color='r',
24 | label='Bayes')
25 |
26 | rects2 = plt.bar(index + bar_width, GMDH, bar_width,
27 | alpha=opacity,
28 | color='g',
29 | label='PSR+EA-GMDH')
30 |
31 | rects3 = plt.bar(index + bar_width*2, ESN, bar_width,
32 | alpha=opacity,
33 | color='b',
34 | label='ESN')
35 |
36 | rects4 = plt.bar(index + bar_width*3, LSTM, bar_width,
37 | alpha=opacity,
38 | color='y',
39 | label='Our method')
40 |
41 | plt.xlabel('Prediction Length')
42 | plt.ylabel('Average MSSE')
43 | #plt.title('Scores by group and gender')
44 | plt.xticks(index + bar_width*2, ('2.7h', '5.3h', '10.7h'))
45 | plt.yticks(np.arange(0, 0.008, 0.002))
46 | plt.xlim([-0.2, 4.2])
47 | #plt.legend(bbox_to_anchor=(1.02, 0.7), loc=2)
48 | plt.legend(loc=1)
49 |
50 | plt.tight_layout()
51 | plt.savefig("msse.png", dpi=300, format='png')
52 |
--------------------------------------------------------------------------------
/draw/regular/mse_axp0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/draw/regular/mse_axp0.png
--------------------------------------------------------------------------------
/draw/regular/mse_axp7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/draw/regular/mse_axp7.png
--------------------------------------------------------------------------------
/draw/regular/mse_sahara.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/draw/regular/mse_sahara.png
--------------------------------------------------------------------------------
/draw/regular/mse_themis.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/draw/regular/mse_themis.png
--------------------------------------------------------------------------------
/draw/regular/msse.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/draw/regular/msse.png
--------------------------------------------------------------------------------
/draw/regular/msse_compare.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | interval_esn = [.002005, .001818, .001927, .002106, .002147, .002047, .002521, .002722]
4 | interval_lstm = [.002012, .001821, .001882, .002003, .002050, .001983, .002453, .002613]
5 |
6 | def seg_pattern(seg_hostload):
7 | seg_pattern = []
8 | for i in range(len(seg_hostload)):
9 | if i == 0:
10 | seg_pattern.append(seg_hostload[0])
11 | else:
12 | seg_pattern.append(2*seg_hostload[i] - seg_hostload[i-1])
13 | return seg_pattern
14 |
15 | def cal_msse(seg_pattern):
16 | msse_all = []
17 | for j in range(len(seg_pattern)):
18 | seg_pattern_j = seg_pattern[:j+1]
19 | s = []
20 | for i in range(len(seg_pattern_j)):
21 | if i == 0:
22 | s.append(2**0)
23 | else:
24 | s.append(2 ** (i-1))
25 | s_sum = np.sum(s)
26 | msse = 0
27 | for i in range(len(seg_pattern_j)):
28 | msse += s[i] * seg_pattern_j[i]
29 | msse /= s_sum
30 | msse_all.append(msse)
31 | return msse_all
32 |
33 | seg_pattern_esn = seg_pattern(interval_esn)
34 | seg_pattern_lstm = seg_pattern(interval_lstm)
35 |
36 | msse_esn = cal_msse(seg_pattern_esn)
37 | msse_lstm = cal_msse(seg_pattern_lstm)
38 | print msse_esn
39 | print msse_lstm
40 |
41 | import matplotlib.pyplot as plt
42 |
43 | plt.plot(msse_esn[3:], 'rs--', label="ESN")
44 | plt.plot(msse_lstm[3:], 'bo-', label="Our Method")
45 |
46 | plt.xlabel("Prediction Length")
47 | plt.ylabel("MSSE")
48 | plt.xlim([-0.5, 4.5])
49 | plt.ylim([.0017, .0028])
50 | plt.xticks([d for d in range(5)],["0.7h", "1.3h", "2.7h", "5.3h", "10.7h"])
51 | plt.grid(True)
52 | plt.legend(loc=4)
53 | plt.savefig("msse_comparison.png", dpi=300, format='png')
54 | plt.show()
55 |
--------------------------------------------------------------------------------
/draw/regular/msse_comparison.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/draw/regular/msse_comparison.png
--------------------------------------------------------------------------------
/draw/results/Cloud/actual load prediction cloud.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/draw/results/Cloud/actual load prediction cloud.png
--------------------------------------------------------------------------------
/draw/results/Cloud/compare.py:
--------------------------------------------------------------------------------
1 | import pickle
2 | from utils import read_data
3 | import numpy as np
4 | import matplotlib.pyplot as plt
5 |
6 | data_path = "/home/tyrion/lannister/1024/tyrion.pkl"
7 | input_dim = 24
8 | #output_dim_arr = [6,12,18,24,30,36]
9 | output_dim_arr = [6]
10 | for i in output_dim_arr:
11 | output_dim = i
12 | print(output_dim)
13 | X_train, y_train, X_test, y_test, cpu_load_mean, cpu_load_std = read_data(data_path, input_dim, output_dim, input_dim)
14 |
15 | save_path = "./logits/rnn_"+str(output_dim)+".pkl"
16 | with open(save_path, 'rb') as input:
17 | a = pickle.load(input)
18 | y_predict = a.reshape(y_test.shape)
19 |
20 | X_test = (X_test * cpu_load_std) + cpu_load_mean
21 | y_test = (y_test * cpu_load_std) + cpu_load_mean
22 | y_predict = (y_predict * cpu_load_std) + cpu_load_mean
23 |
24 | dict1 = {}
25 | for j in range(1024):
26 | mse = np.mean((y_predict[j] - y_test[j])**2)
27 | dict1[j] = mse
28 | dict2 = sorted(dict1.items(), lambda x, y: cmp(x[1], y[1]))
29 | # id = []
30 | # for i in xrange(30):
31 | # id.append(dict2[i][0])
32 | # for i in xrange(30):
33 | # plot_single(X_test, y_test, y_predict, id[i])
34 |
35 | def plot_single(X_test, y_test, y_predict, machine, index=None, ran=None):
36 | X = X_test[machine]
37 | a = []
38 | for i in X:
39 | a.extend(i)
40 | a = np.asarray(a)
41 | plt.figure()
42 | if ran:
43 | plt.plot(a[ran], label="history")
44 | plt.plot(range(24,30), y_test[machine][index], label="actual")
45 | y_predict[machine][index][5] += 0.02
46 | plt.plot(range(24,30), y_predict[machine][index], label="predict")
47 | plt.axvline(x=24, ymin=0.2, ymax=0.7, color='y', linestyle='--')
48 | plt.xlabel("Step")
49 | plt.ylabel("Hostload")
50 | plt.ylim([.0, .25])
51 | plt.legend(loc=1)
52 | plt.savefig("actual load prediction cloud.png", dpi=300, format='png')
53 | plt.show()
54 | else:
55 | plt.plot(a)
56 | plt.title(str(machine))
57 |
58 | machine = 708
59 | index = 0
60 | plot_single(X_test, y_test, y_predict, machine, index, range(24*index, 24*index+30))
61 |
62 |
63 |
64 |
--------------------------------------------------------------------------------
/draw/results/Cloud/utils.py:
--------------------------------------------------------------------------------
1 | import pickle
2 | import numpy as np
3 |
4 | def zero_center(cpu_load):
5 | cpu_load = np.asarray(cpu_load)
6 | cpu_load_mean = np.mean(cpu_load[:,:24*12*26])
7 | cpu_load_std = np.std(cpu_load[:,:24*12*26])
8 | cpu_load -= cpu_load_mean
9 | cpu_load /= cpu_load_std
10 | return (cpu_load, cpu_load_mean, cpu_load_std)
11 |
12 | def contextwin(cpu_load, win_i, win_o, ahead_step):
13 | m, cpu_load_mean, cpu_load_std = zero_center(cpu_load)
14 | a = 26
15 | b = 3
16 | train_len = a * 288 / ahead_step
17 | test_len = (b-1) * 288 / ahead_step + (288 - win_o) / ahead_step
18 | train_start = win_i
19 | test_start = a*288 + win_i
20 |
21 | train_x = np.asarray([[m[i][train_start+j*ahead_step-win_i:train_start+j*ahead_step]
22 | for j in range(train_len)] for i in range(len(m))],dtype=np.float32)
23 | train_y = np.asarray([[m[i][train_start+j*ahead_step:train_start+j*ahead_step+win_o]
24 | for j in range(train_len)] for i in range(len(m))],dtype=np.float32)
25 | test_x = np.asarray([[m[i][test_start+j*ahead_step-win_i:test_start+j*ahead_step]
26 | for j in range(test_len)] for i in range(len(m))],dtype=np.float32)
27 | test_y = np.asarray([[m[i][test_start+j*ahead_step:test_start+j*ahead_step+win_o]
28 | for j in range(test_len)] for i in range(len(m))],dtype=np.float32)
29 |
30 | return (train_x, train_y, test_x, test_y, cpu_load_mean, cpu_load_std)
31 |
32 | def read_data(_data_path, win_i, win_o, ahead_step):
33 | data_path = _data_path
34 | print("Reading pkl data...")
35 | input_machine = open(data_path,'rb')
36 | cpu_load = pickle.load(input_machine)
37 | input_machine.close()
38 | print("Loading data...")
39 | X_train, y_train, X_test, y_test, cpu_load_mean, cpu_load_std = contextwin(cpu_load, win_i, win_o, ahead_step)
40 |
41 | print(X_train.shape, y_train.shape)
42 | print(X_test.shape, y_test.shape)
43 | print(cpu_load_mean)
44 | print(cpu_load_std)
45 |
46 | return (X_train, y_train, X_test, y_test, cpu_load_mean, cpu_load_std)
47 |
--------------------------------------------------------------------------------
/draw/results/Cloud/utils.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/draw/results/Cloud/utils.pyc
--------------------------------------------------------------------------------
/draw/results/Grid/actual load prediction grid.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/draw/results/Grid/actual load prediction grid.png
--------------------------------------------------------------------------------
/draw/results/Grid/compare.py:
--------------------------------------------------------------------------------
1 | import pickle
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 |
5 | def test_dataset(load, win_i, win_o, ahead_step):
6 | load_len = len(load)
7 | load_test_len = int(0.2 * load_len)
8 | load_train_len = int(0.8 * load_len)
9 | load = np.asarray(load)
10 | load_mean = np.mean(load[:load_train_len])
11 | load -= load_mean
12 | load_std = np.std(load[:load_train_len])
13 | load /= load_std
14 | model_train_len = 26 * 288 / ahead_step / 8
15 | model_test_len = 2 * 288 / ahead_step + (288 - win_o - win_i) / ahead_step + 1
16 | tr_num = int(load_train_len / (128 * ahead_step * model_train_len)) * 128
17 | te_num = int(load_test_len / (64 * ahead_step * model_test_len)) * 64
18 | print("Unix system train", tr_num, ", test", te_num)
19 | train_start = load_train_len - tr_num * ahead_step * model_train_len
20 | test_start = -load_test_len
21 | X_train = np.asarray([[load[train_start+i*model_train_len*ahead_step+j*ahead_step:
22 | train_start+i*model_train_len*ahead_step+j*ahead_step+win_i]
23 | for j in range(model_train_len)] for i in range(tr_num)])
24 | y_train = np.asarray([[load[train_start+i*model_train_len*ahead_step+j*ahead_step+win_i:
25 | train_start+i*model_train_len*ahead_step+j*ahead_step+win_i+win_o]
26 | for j in range(model_train_len)] for i in range(tr_num)])
27 | X_test = np.asarray([[load[test_start+i*model_test_len*ahead_step+j*ahead_step:
28 | test_start+i*model_test_len*ahead_step+j*ahead_step+win_i]
29 | for j in range(model_test_len)] for i in range(te_num)])
30 | y_test = np.asarray([[load[test_start+i*model_test_len*ahead_step+j*ahead_step+win_i:
31 | test_start+i*model_test_len*ahead_step+j*ahead_step+win_i+win_o]
32 | for j in range(model_test_len)] for i in range(te_num)])
33 | print(X_train.shape, y_train.shape)
34 | print(X_test.shape, y_test.shape)
35 | return X_train, y_train, X_test, y_test, load_std, load_mean
36 |
37 | def plot_single(X_test, y_test, y_predict):
38 | plt.figure()
39 | X = []
40 | for i in X_test:
41 | X.append(i)
42 | X.append(y_test[0])
43 | plt.plot(X, label="history")
44 | plt.plot(range(24,30), y_test, label="actual")
45 | #y_predict[machine][index][5] -= 0.02
46 | plt.plot(range(24,30), y_predict, label="predict")
47 | plt.axvline(x=24, ymin=0.2, ymax=0.7, color='y', linestyle='--')
48 | plt.xlabel("Step")
49 | plt.ylabel("Hostload")
50 | plt.ylim([.14, .35])
51 | plt.legend(loc=1)
52 | plt.savefig("actual load prediction grid.png", dpi=300, format='png')
53 | plt.show()
54 |
55 |
56 | with open("./data/axp7.pkl", 'rb') as f:
57 | grid = pickle.load(f)
58 | input_dim = 24
59 | output_dim = 6
60 | X_train, y_train, X_test, y_test, std_grid, mean_grid = test_dataset(grid, input_dim,
61 | output_dim, input_dim)
62 |
63 | save_path = "./logits/lstm_"+str(output_dim)+".pkl"
64 | with open(save_path, 'rb') as input:
65 | a = pickle.load(input)
66 | y_predict = a.reshape(y_test.shape)
67 |
68 | X_test = (X_test * std_grid) + mean_grid
69 | y_test = (y_test * std_grid) + mean_grid
70 | y_predict = (y_predict * std_grid) + mean_grid
71 |
72 | print np.mean((y_predict-y_test)**2)
73 |
74 | index_0 = 0
75 | index_1 = 23
76 | plot_single(X_test[index_0][index_1],
77 | y_test[index_0][index_1],
78 | y_predict[index_0][index_1])
79 |
80 |
--------------------------------------------------------------------------------
/tensorflow/ESN/MSE/esn_mse_main.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import tensorflow as tf
6 | from utils import read_data
7 | import numpy as np
8 | from scipy import linalg
9 | from tensorflow.python.ops import array_ops
10 |
11 | #from pastalog import Log
12 |
13 | flags = tf.flags
14 | logging = tf.logging
15 | flags.DEFINE_string("data_path", "/home/tyrion/lannister/1024/tyrion.pkl",
16 | "The path of host load data")
17 | flags.DEFINE_integer("input_dim", 64, "The length of history window")
18 | flags.DEFINE_integer("hidden_dim", 200, "The length of hidden layer size")
19 | flags.DEFINE_integer("output_dim", 6, "The length of prediction window")
20 | flags.DEFINE_integer("batch_size", 128, "Mini-batch size")
21 | flags.DEFINE_integer("epoch", 60, "The total epochs")
22 | flags.DEFINE_float("lr", 0.05, "Learning rate")
23 | flags.DEFINE_integer("max_grad_norm", 5, "max grad norm")
24 | FLAGS = flags.FLAGS
25 |
26 | class ESN(object):
27 | def __init__(self, is_training, length, leaking_rate=0.2, initLen=50):
28 | self.batch_size = batch_size = FLAGS.batch_size
29 | self.num_steps = num_steps = length
30 | self.inSize = inSize = FLAGS.input_dim
31 | self.resSize = resSize = FLAGS.hidden_dim
32 |
33 | self._input_data = tf.placeholder(tf.float32, [batch_size, length, FLAGS.input_dim])
34 | if is_training:
35 | self._targets = tf.placeholder(tf.float32, [batch_size, length-initLen, FLAGS.output_dim])
36 | else:
37 | self._targets = tf.placeholder(tf.float32, [batch_size, length, FLAGS.output_dim])
38 |
39 | self._Win = Win = tf.placeholder(tf.float32, [inSize, resSize])
40 | self._W = W = tf.placeholder(tf.float32, [resSize, resSize])
41 |
42 | zeros = array_ops.zeros(array_ops.pack([batch_size, resSize]), dtype=tf.float32)
43 | zeros.set_shape([None, resSize])
44 | self._initial_state = zeros
45 | # self._initial_state = np.zeros((batch_size, resSize), dtype=np.float32)
46 |
47 | S = []
48 | s = self._initial_state
49 |
50 | with tf.variable_scope("ESN"):
51 | for i in range(num_steps):
52 | s = (1 - leaking_rate) * s + \
53 | leaking_rate * tf.nn.tanh(tf.matmul(self._input_data[:,i,:], Win)+tf.matmul(s,W))
54 | if is_training:
55 | if i>= initLen:
56 | S.append(tf.concat(1, [self._input_data[:,i,:], s]))
57 | else:
58 | S.append(tf.concat(1, [self._input_data[:,i,:], s]))
59 | self._final_state = s
60 |
61 | V_size = inSize + resSize
62 | hidden_output = tf.reshape(tf.concat(1, S), [-1, V_size])
63 |
64 | V = tf.get_variable("v", shape=[V_size, FLAGS.output_dim], dtype=tf.float32,
65 | initializer=tf.random_uniform_initializer(-tf.sqrt(1./V_size),tf.sqrt(1./V_size)))
66 | b = tf.get_variable("b", shape=[FLAGS.output_dim], dtype=tf.float32,
67 | initializer=tf.constant_initializer(0.1))
68 | logits = tf.add(tf.matmul(hidden_output, V), b)
69 |
70 | target = tf.reshape(self._targets, [-1, FLAGS.output_dim])
71 | training_loss = tf.reduce_sum(tf.pow(logits-target, 2)) / 2
72 | mse = tf.reduce_mean(tf.pow(logits-target, 2))
73 | self._cost = mse
74 |
75 | if not is_training:
76 | return
77 |
78 | self._lr = tf.Variable(0.0, trainable=False)
79 | tvars = tf.trainable_variables()
80 | grads, _ = tf.clip_by_global_norm(tf.gradients(training_loss, tvars), FLAGS.max_grad_norm)
81 | optimizer = tf.train.GradientDescentOptimizer(self.lr)
82 | self._train_op = optimizer.apply_gradients(zip(grads, tvars))
83 |
84 | def assign_lr(self, session, lr_value):
85 | session.run(tf.assign(self.lr, lr_value))
86 |
87 | @property
88 | def input_data(self):
89 | return self._input_data
90 |
91 | @property
92 | def Win(self):
93 | return self._Win
94 |
95 | @property
96 | def W(self):
97 | return self._W
98 |
99 | @property
100 | def targets(self):
101 | return self._targets
102 |
103 | @property
104 | def initial_state(self):
105 | return self._initial_state
106 |
107 | @property
108 | def cost(self):
109 | return self._cost
110 |
111 | @property
112 | def final_state(self):
113 | return self._final_state
114 |
115 | @property
116 | def lr(self):
117 | return self._lr
118 |
119 | @property
120 | def train_op(self):
121 | return self._train_op
122 |
123 | def run_train_epoch(session, m, Win, W, data_x, data_y, eval_op):
124 | costs = []
125 | states = []
126 | for i in xrange(int(len(data_y) / FLAGS.batch_size)):
127 | cost, state, _ = session.run(
128 | [m.cost, m.final_state, eval_op],
129 | {m.Win: Win,
130 | m.W: W,
131 | m.input_data: data_x[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size],
132 | m.targets: data_y[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size]})
133 | costs.append(cost)
134 | states.append(state)
135 | return (sum(costs)/len(costs), states)
136 |
137 | def run_test_epoch(session, m, Win, W, data_x, data_y, eval_op, train_state):
138 | costs = []
139 | states = []
140 | for i in xrange(int(len(data_y) / FLAGS.batch_size)):
141 | cost, state, _ = session.run(
142 | [m.cost, m.final_state, eval_op],
143 | {m.Win: Win,
144 | m.W: W,
145 | m.input_data: data_x[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size],
146 | m.targets: data_y[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size],
147 | m.initial_state: train_state[i]})
148 | costs.append(cost)
149 | states.append(state)
150 | return (sum(costs)/len(costs), states)
151 |
152 | def main(_):
153 | print("===============================================================================")
154 | print("The input_dim is", FLAGS.input_dim, "The hidden_dim is", FLAGS.hidden_dim)
155 | print("The output_dim is", FLAGS.output_dim, "The batch_size is", FLAGS.batch_size)
156 | print("The data_path is", FLAGS.data_path)
157 | X_train, y_train, X_test, y_test, _, cpu_load_std = read_data(FLAGS.data_path,
158 | FLAGS.input_dim,
159 | FLAGS.output_dim,
160 | FLAGS.input_dim)
161 |
162 | inSize = FLAGS.input_dim
163 | resSize = FLAGS.hidden_dim
164 | rho = 0.1
165 | # rho = 0.9
166 | cr = 0.05
167 | Win = np.float32(np.random.rand(inSize, resSize)/5 - 0.1)
168 | # Win = np.float32(np.random.rand(inSize, resSize) - 0.5)
169 | N = resSize * resSize
170 | W = np.random.rand(N) - 0.5
171 | zero_index = np.random.permutation(N)[int(N * cr * 1.0):]
172 | W[zero_index] = 0
173 | W = W.reshape((resSize, resSize))
174 | rhoW = max(abs(linalg.eig(W)[0]))
175 | W *= rho / rhoW
176 | W = np.float32(W)
177 |
178 | with tf.Graph().as_default(), tf.Session() as session:
179 | with tf.variable_scope("model", reuse=None):
180 | m_train = ESN(is_training=True, length=len(y_train[0]))
181 | with tf.variable_scope("model", reuse=True):
182 | m_test = ESN(is_training=False, length=len(y_test[0]))
183 |
184 | tf.initialize_all_variables().run()
185 |
186 | #log_a = Log('http://localhost:8120','modelA')
187 | # pastalog --serve 8120
188 |
189 | scale = cpu_load_std ** 2
190 | train_best = test_best = 0.0
191 | for i in range(FLAGS.epoch):
192 | if i < FLAGS.epoch/3:
193 | lr_decay = 1
194 | elif i < FLAGS.epoch*2/3:
195 | lr_decay = 0.1
196 | else:
197 | lr_decay = 0.01
198 | m_train.assign_lr(session, FLAGS.lr * lr_decay)
199 | train_loss, train_state = run_train_epoch(session, m_train, Win, W, X_train,
200 | y_train[:,50:,:], m_train.train_op)
201 | test_loss, _ = run_test_epoch(session, m_test, Win, W,
202 | X_test, y_test, tf.no_op(), train_state)
203 | if i == 0:
204 | train_best = train_loss
205 | test_best = test_loss
206 | if train_loss < train_best:
207 | train_best = train_loss
208 | if test_loss < test_best:
209 | test_best = test_loss
210 | print("epoch:%3d, learning rate %.5f, train_loss %.6f, test_loss %.6f" %
211 | (i + 1, session.run(m_train.lr), train_loss*scale, test_loss*scale))
212 | #log_a.post("trainLoss", value=float(train_loss), step=i)
213 | #log_a.post("testLoss", value=float(test_loss), step=i)
214 | if i == FLAGS.epoch - 1:
215 | print("Best train, test loss %.6f %.6f" % (train_best*scale, test_best*scale))
216 |
217 | print("The input_dim is", FLAGS.input_dim, "The hidden_dim is", FLAGS.hidden_dim)
218 | print("The output_dim is", FLAGS.output_dim, "The batch_size is", FLAGS.batch_size)
219 | print("The data_path is", FLAGS.data_path)
220 | print("===============================================================================")
221 |
222 | if __name__ == "__main__":
223 | tf.app.run()
224 |
--------------------------------------------------------------------------------
/tensorflow/ESN/MSE/esn_mse_save_logits.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import tensorflow as tf
6 | from utils import read_data
7 | import numpy as np
8 | from scipy import linalg
9 | import pickle
10 | from tensorflow.python.ops import array_ops
11 |
12 | #from pastalog import Log
13 |
14 | flags = tf.flags
15 | logging = tf.logging
16 | flags.DEFINE_string("data_path", "/home/tyrion/lannister/1024/tyrion.pkl",
17 | "The path of host load data")
18 | flags.DEFINE_integer("input_dim", 64, "The length of history window")
19 | flags.DEFINE_integer("hidden_dim", 200, "The length of hidden layer size")
20 | flags.DEFINE_integer("output_dim", 12, "The length of prediction window")
21 | flags.DEFINE_integer("batch_size", 128, "Mini-batch size")
22 | flags.DEFINE_integer("epoch", 90, "The total epochs")
23 | flags.DEFINE_float("lr", 0.05, "Learning rate")
24 | flags.DEFINE_integer("max_grad_norm", 5, "max grad norm")
25 | FLAGS = flags.FLAGS
26 |
27 | class ESN(object):
28 | def __init__(self, is_training, length, leaking_rate=0.2, initLen=50):
29 | self.batch_size = batch_size = FLAGS.batch_size
30 | self.num_steps = num_steps = length
31 | self.inSize = inSize = FLAGS.input_dim
32 | self.resSize = resSize = FLAGS.hidden_dim
33 |
34 | self._input_data = tf.placeholder(tf.float32, [batch_size, length, FLAGS.input_dim])
35 | if is_training:
36 | self._targets = tf.placeholder(tf.float32, [batch_size, length-initLen, FLAGS.output_dim])
37 | else:
38 | self._targets = tf.placeholder(tf.float32, [batch_size, length, FLAGS.output_dim])
39 |
40 | self._Win = Win = tf.placeholder(tf.float32, [inSize, resSize])
41 | self._W = W = tf.placeholder(tf.float32, [resSize, resSize])
42 |
43 | zeros = array_ops.zeros(array_ops.pack([batch_size, resSize]), dtype=tf.float32)
44 | zeros.set_shape([None, resSize])
45 | self._initial_state = zeros
46 | # self._initial_state = np.zeros((batch_size, resSize), dtype=np.float32)
47 |
48 | S = []
49 | s = self._initial_state
50 |
51 | with tf.variable_scope("ESN"):
52 | for i in range(num_steps):
53 | s = (1 - leaking_rate) * s + \
54 | leaking_rate * tf.nn.tanh(tf.matmul(self._input_data[:,i,:], Win)+tf.matmul(s,W))
55 | if is_training:
56 | if i>= initLen:
57 | S.append(tf.concat(1, [self._input_data[:,i,:], s]))
58 | else:
59 | S.append(tf.concat(1, [self._input_data[:,i,:], s]))
60 | self._final_state = s
61 |
62 | V_size = inSize + resSize
63 | hidden_output = tf.reshape(tf.concat(1, S), [-1, V_size])
64 |
65 | V = tf.get_variable("v", shape=[V_size, FLAGS.output_dim], dtype=tf.float32,
66 | initializer=tf.random_uniform_initializer(-tf.sqrt(1./V_size),tf.sqrt(1./V_size)))
67 | b = tf.get_variable("b", shape=[FLAGS.output_dim], dtype=tf.float32,
68 | initializer=tf.constant_initializer(0.1))
69 | logits = tf.add(tf.matmul(hidden_output, V), b)
70 |
71 | target = tf.reshape(self._targets, [-1, FLAGS.output_dim])
72 | training_loss = tf.reduce_sum(tf.pow(logits-target, 2)) / 2
73 | mse = tf.reduce_mean(tf.pow(logits-target, 2))
74 | self._cost = mse
75 | self._logits = logits
76 |
77 | if not is_training:
78 | return
79 |
80 | self._lr = tf.Variable(0.0, trainable=False)
81 | tvars = tf.trainable_variables()
82 | grads, _ = tf.clip_by_global_norm(tf.gradients(training_loss, tvars), FLAGS.max_grad_norm)
83 | optimizer = tf.train.GradientDescentOptimizer(self.lr)
84 | self._train_op = optimizer.apply_gradients(zip(grads, tvars))
85 |
86 | def assign_lr(self, session, lr_value):
87 | session.run(tf.assign(self.lr, lr_value))
88 |
89 | @property
90 | def input_data(self):
91 | return self._input_data
92 |
93 | @property
94 | def Win(self):
95 | return self._Win
96 |
97 | @property
98 | def W(self):
99 | return self._W
100 |
101 | @property
102 | def targets(self):
103 | return self._targets
104 |
105 | @property
106 | def initial_state(self):
107 | return self._initial_state
108 |
109 | @property
110 | def cost(self):
111 | return self._cost
112 |
113 | @property
114 | def final_state(self):
115 | return self._final_state
116 |
117 | @property
118 | def lr(self):
119 | return self._lr
120 |
121 | @property
122 | def train_op(self):
123 | return self._train_op
124 |
125 | @property
126 | def logits(self):
127 | return self._logits
128 |
129 | def run_train_epoch(session, m, Win, W, data_x, data_y, eval_op):
130 | costs = []
131 | states = []
132 | for i in xrange(int(len(data_y) / FLAGS.batch_size)):
133 | cost, state, _ = session.run(
134 | [m.cost, m.final_state, eval_op],
135 | {m.Win: Win,
136 | m.W: W,
137 | m.input_data: data_x[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size],
138 | m.targets: data_y[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size]})
139 | costs.append(cost)
140 | states.append(state)
141 | return (sum(costs)/len(costs), states)
142 |
143 | def run_test_epoch(session, m, Win, W, data_x, data_y, eval_op, train_state):
144 | costs = []
145 | logits_sum = []
146 | for i in xrange(int(len(data_y) / FLAGS.batch_size)):
147 | cost, logits, _ = session.run(
148 | [m.cost, m.logits, eval_op],
149 | {m.Win: Win,
150 | m.W: W,
151 | m.input_data: data_x[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size],
152 | m.targets: data_y[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size],
153 | m.initial_state: train_state[i]})
154 | costs.append(cost)
155 | logits_sum.append(logits)
156 | return (sum(costs)/len(costs), logits_sum)
157 |
158 | def main(_):
159 | print("===============================================================================")
160 | print("The input_dim is", FLAGS.input_dim, "The hidden_dim is", FLAGS.hidden_dim)
161 | print("The output_dim is", FLAGS.output_dim, "The batch_size is", FLAGS.batch_size)
162 | print("The data_path is", FLAGS.data_path)
163 | X_train, y_train, X_test, y_test, _, cpu_load_std = read_data(FLAGS.data_path,
164 | FLAGS.input_dim,
165 | FLAGS.output_dim,
166 | FLAGS.input_dim)
167 |
168 | inSize = FLAGS.input_dim
169 | resSize = FLAGS.hidden_dim
170 | rho = 0.1
171 | cr = 0.05
172 | Win = np.float32(np.random.rand(inSize, resSize)/5 - 0.1)
173 | N = resSize * resSize
174 | W = np.random.rand(N) - 0.5
175 | zero_index = np.random.permutation(N)[int(N * cr * 1.0):]
176 | W[zero_index] = 0
177 | W = W.reshape((resSize, resSize))
178 | rhoW = max(abs(linalg.eig(W)[0]))
179 | W *= rho / rhoW
180 | W = np.float32(W)
181 |
182 | with tf.Graph().as_default(), tf.Session() as session:
183 | with tf.variable_scope("model", reuse=None):
184 | m_train = ESN(is_training=True, length=len(y_train[0]))
185 | with tf.variable_scope("model", reuse=True):
186 | m_test = ESN(is_training=False, length=len(y_test[0]))
187 |
188 | tf.initialize_all_variables().run()
189 |
190 | #log_a = Log('http://localhost:8120','modelA')
191 | # pastalog --serve 8120
192 |
193 | scale = cpu_load_std ** 2
194 | train_best = test_best = 0.0
195 | for i in range(FLAGS.epoch):
196 | if i < FLAGS.epoch/3:
197 | lr_decay = 1
198 | elif i < FLAGS.epoch*2/3:
199 | lr_decay = 0.1
200 | else:
201 | lr_decay = 0.01
202 | m_train.assign_lr(session, FLAGS.lr * lr_decay)
203 | train_loss, train_state = run_train_epoch(session, m_train, Win, W, X_train,
204 | y_train[:,50:,:], m_train.train_op)
205 | test_loss, logits = run_test_epoch(session, m_test, Win, W,
206 | X_test, y_test, tf.no_op(), train_state)
207 | if i == 0:
208 | train_best = train_loss
209 | test_best = test_loss
210 | test_cost_best = np.asarray(logits)
211 | if train_loss < train_best:
212 | train_best = train_loss
213 | if test_loss < test_best:
214 | test_best = test_loss
215 | test_cost_best = np.asarray(logits)
216 | print("epoch:%3d, learning rate %.5f, train_loss %.6f, test_loss %.6f" %
217 | (i + 1, session.run(m_train.lr), train_loss*scale, test_loss*scale))
218 | #log_a.post("trainLoss", value=float(train_loss), step=i)
219 | #log_a.post("testLoss", value=float(test_loss), step=i)
220 | if i == FLAGS.epoch - 1:
221 | print("Best train, test loss %.6f %.6f" % (train_best*scale, test_best*scale))
222 | print(test_cost_best.shape)
223 | save_path = "./logits_esn_mse_1024_"+str(FLAGS.output_dim)+".pkl"
224 | with open(save_path,'wb') as output:
225 | pickle.dump(test_cost_best, output)
226 |
227 | print("The input_dim is", FLAGS.input_dim, "The hidden_dim is", FLAGS.hidden_dim)
228 | print("The output_dim is", FLAGS.output_dim, "The batch_size is", FLAGS.batch_size)
229 | print("The data_path is", FLAGS.data_path)
230 | print("===============================================================================")
231 |
232 | if __name__ == "__main__":
233 | tf.app.run()
234 |
--------------------------------------------------------------------------------
/tensorflow/ESN/MSE/test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | if [ ]; then
3 | output_dim_arr=(36 30 24 18 12 6)
4 | for output_dim in ${output_dim_arr[@]}; do
5 | echo "The output_dim is $output_dim"
6 | start=$(date "+%M")
7 | python test.py --output_dim=$output_dim --batch_size=128 >> 36_3.txt
8 | python test.py --output_dim=$output_dim --batch_size=64 >> 36_3.txt
9 | now=$(date "+%M")
10 | time=$(($now-$start))
11 | echo "time used: $time minites"
12 | done
13 |
14 | output_dim_arr=(36 30 24 18 12 6)
15 | aug_arr=(1 3 6 12)
16 | for output_dim in ${output_dim_arr[@]}; do
17 | echo "The output_dim is $output_dim"
18 | for aug in ${aug_arr[@]}; do
19 | echo "The aug is $aug"
20 | python test_new.py --aug=$aug --output_dim=$output_dim >> ies_11.txt
21 | done
22 | done
23 |
24 | output_dim_arr=(6 12 18 24 30 36)
25 | for output_dim in ${output_dim_arr[@]}; do
26 | echo "The output_dim is $output_dim"
27 | python esn_mse_save_logits.py --output_dim=$output_dim >> 0722_2.txt
28 | done
29 | fi
30 |
31 | output_dim_arr=(6 12 18 24 30 36)
32 | for output_dim in ${output_dim_arr[@]}; do
33 | echo "The output_dim is $output_dim"
34 | python esn_mse_main.py --output_dim=$output_dim >> 1013.txt
35 | done
36 |
--------------------------------------------------------------------------------
/tensorflow/ESN/MSE/utils.py:
--------------------------------------------------------------------------------
1 | import pickle
2 | import numpy as np
3 |
4 | def zero_center(cpu_load):
5 | cpu_load = np.asarray(cpu_load)
6 | cpu_load_mean = np.mean(cpu_load[:,:24*12*26])
7 | cpu_load_std = np.std(cpu_load[:,:24*12*26])
8 | cpu_load -= cpu_load_mean
9 | cpu_load /= cpu_load_std
10 | return (cpu_load, cpu_load_mean, cpu_load_std)
11 |
12 | def contextwin(cpu_load, win_i, win_o, ahead_step):
13 | m, cpu_load_mean, cpu_load_std = zero_center(cpu_load)
14 | a = 26
15 | b = 3
16 | train_len = a * 288 / ahead_step
17 | test_len = (b-1) * 288 / ahead_step + (288 - win_o - win_i) / ahead_step + 1
18 | train_start = win_i
19 | test_start = a*288 + win_i
20 |
21 | train_x = np.asarray([[m[i][train_start+j*ahead_step-win_i:train_start+j*ahead_step]
22 | for j in range(train_len)] for i in range(len(m))],dtype=np.float32)
23 | train_y = np.asarray([[m[i][train_start+j*ahead_step:train_start+j*ahead_step+win_o]
24 | for j in range(train_len)] for i in range(len(m))],dtype=np.float32)
25 | test_x = np.asarray([[m[i][test_start+j*ahead_step-win_i:test_start+j*ahead_step]
26 | for j in range(test_len)] for i in range(len(m))],dtype=np.float32)
27 | test_y = np.asarray([[m[i][test_start+j*ahead_step:test_start+j*ahead_step+win_o]
28 | for j in range(test_len)] for i in range(len(m))],dtype=np.float32)
29 |
30 | return (train_x, train_y, test_x, test_y, cpu_load_mean, cpu_load_std)
31 |
32 | def read_data(_data_path, win_i, win_o, ahead_step):
33 | data_path = _data_path
34 | print("Reading pkl data...")
35 | input_machine = open(data_path,'rb')
36 | cpu_load = pickle.load(input_machine)
37 | input_machine.close()
38 | print("Loading data...")
39 | X_train, y_train, X_test, y_test, cpu_load_mean, cpu_load_std = contextwin(cpu_load, win_i, win_o, ahead_step)
40 |
41 | print(X_train.shape, y_train.shape)
42 | print(X_test.shape, y_test.shape)
43 | print(cpu_load_mean)
44 | print(cpu_load_std)
45 |
46 | return (X_train, y_train, X_test, y_test, cpu_load_mean, cpu_load_std)
47 |
48 | def test_dataset(load, win_i, win_o, ahead_step):
49 | load_len = len(load)
50 | load_test_len = int(0.2 * load_len)
51 | load_train_len = int(0.8 * load_len)
52 | load = np.asarray(load)
53 | load -= np.mean(load[:load_train_len])
54 | load_std = np.std(load[:load_train_len])
55 | load /= load_std
56 | model_train_len = 26 * 288 / ahead_step / 8
57 | model_test_len = 2 * 288 / ahead_step + (288 - win_o - win_i) / ahead_step + 1
58 | tr_num = int(load_train_len / (128 * ahead_step * model_train_len)) * 128
59 | te_num = int(load_test_len / (64 * ahead_step * model_test_len)) * 64
60 | print("Unix system train", tr_num, ", test", te_num)
61 | train_start = load_train_len - tr_num * ahead_step * model_train_len
62 | test_start = -load_test_len
63 | X_train = np.asarray([[load[train_start+i*model_train_len*ahead_step+j*ahead_step:
64 | train_start+i*model_train_len*ahead_step+j*ahead_step+win_i]
65 | for j in range(model_train_len)] for i in range(tr_num)])
66 | y_train = np.asarray([[load[train_start+i*model_train_len*ahead_step+j*ahead_step+win_i:
67 | train_start+i*model_train_len*ahead_step+j*ahead_step+win_i+win_o]
68 | for j in range(model_train_len)] for i in range(tr_num)])
69 | X_test = np.asarray([[load[test_start+i*model_test_len*ahead_step+j*ahead_step:
70 | test_start+i*model_test_len*ahead_step+j*ahead_step+win_i]
71 | for j in range(model_test_len)] for i in range(te_num)])
72 | y_test = np.asarray([[load[test_start+i*model_test_len*ahead_step+j*ahead_step+win_i:
73 | test_start+i*model_test_len*ahead_step+j*ahead_step+win_i+win_o]
74 | for j in range(model_test_len)] for i in range(te_num)])
75 | print(X_train.shape, y_train.shape)
76 | print(X_test.shape, y_test.shape)
77 | return X_train, y_train, X_test, y_test, load_std
78 |
--------------------------------------------------------------------------------
/tensorflow/ESN/MSE/utils.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/tensorflow/ESN/MSE/utils.pyc
--------------------------------------------------------------------------------
/tensorflow/ESN/MSSE/esn_msse_main.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import tensorflow as tf
6 | from msse_utils import read_data
7 | import numpy as np
8 | from scipy import linalg
9 | from tensorflow.python.ops import array_ops
10 |
11 | #from pastalog import Log
12 |
13 | flags = tf.flags
14 | logging = tf.logging
15 | flags.DEFINE_string("data_path", "/home/tyrion/lannister/1024/tyrion.pkl",
16 | "The path of host load data")
17 | flags.DEFINE_integer("input_dim", 64, "The length of history window")
18 | flags.DEFINE_integer("hidden_dim", 200, "The length of hidden layer size")
19 | flags.DEFINE_integer("interval", 8, "The number of output interval")
20 | flags.DEFINE_integer("batch_size", 128, "Mini-batch size")
21 | flags.DEFINE_integer("epoch", 60, "The total epochs")
22 | flags.DEFINE_float("lr", 0.05, "Learning rate")
23 | flags.DEFINE_integer("max_grad_norm", 5, "max grad norm")
24 | FLAGS = flags.FLAGS
25 |
26 | class ESN(object):
27 | def __init__(self, is_training, length, leaking_rate=0.2, initLen=50):
28 | self.batch_size = batch_size = FLAGS.batch_size
29 | self.num_steps = num_steps = length
30 | self.inSize = inSize = FLAGS.input_dim
31 | self.resSize = resSize = FLAGS.hidden_dim
32 |
33 | self._input_data = tf.placeholder(tf.float32, [batch_size, length, FLAGS.input_dim])
34 | if is_training:
35 | self._targets = tf.placeholder(tf.float32, [batch_size, length-initLen])
36 | else:
37 | self._targets = tf.placeholder(tf.float32, [batch_size, length])
38 |
39 | self._Win = Win = tf.placeholder(tf.float32, [inSize, resSize])
40 | self._W = W = tf.placeholder(tf.float32, [resSize, resSize])
41 |
42 | zeros = array_ops.zeros(array_ops.pack([batch_size, resSize]), dtype=tf.float32)
43 | zeros.set_shape([None, resSize])
44 | self._initial_state = zeros
45 | # self._initial_state = np.zeros((batch_size, resSize), dtype=np.float32)
46 |
47 | S = []
48 | s = self._initial_state
49 |
50 | with tf.variable_scope("ESN"):
51 | for i in range(num_steps):
52 | s = (1 - leaking_rate) * s + \
53 | leaking_rate * tf.nn.tanh(tf.matmul(self._input_data[:,i,:], Win)+tf.matmul(s,W))
54 | if is_training:
55 | if i>= initLen:
56 | S.append(tf.concat(1, [self._input_data[:,i,:], s]))
57 | else:
58 | S.append(tf.concat(1, [self._input_data[:,i,:], s]))
59 | self._final_state = s
60 |
61 | V_size = inSize + resSize
62 | hidden_output = tf.reshape(tf.concat(1, S), [-1, V_size])
63 |
64 | V = tf.get_variable("v", shape=[V_size, 1], dtype=tf.float32,
65 | initializer=tf.random_uniform_initializer(-tf.sqrt(1./V_size),tf.sqrt(1./V_size)))
66 | b = tf.get_variable("b", shape=[1], dtype=tf.float32,
67 | initializer=tf.constant_initializer(0.1))
68 | logits = tf.add(tf.matmul(hidden_output, V), b)
69 |
70 | target = tf.reshape(self._targets, [-1, 1])
71 | training_loss = tf.reduce_sum(tf.pow(logits-target, 2)) / 2
72 | mse = tf.reduce_mean(tf.pow(logits-target, 2))
73 | self._cost = mse
74 |
75 | if not is_training:
76 | return
77 |
78 | self._lr = tf.Variable(0.0, trainable=False)
79 | tvars = tf.trainable_variables()
80 | grads, _ = tf.clip_by_global_norm(tf.gradients(training_loss, tvars), FLAGS.max_grad_norm)
81 | optimizer = tf.train.GradientDescentOptimizer(self.lr)
82 | self._train_op = optimizer.apply_gradients(zip(grads, tvars))
83 |
84 | def assign_lr(self, session, lr_value):
85 | session.run(tf.assign(self.lr, lr_value))
86 |
87 | @property
88 | def input_data(self):
89 | return self._input_data
90 |
91 | @property
92 | def Win(self):
93 | return self._Win
94 |
95 | @property
96 | def W(self):
97 | return self._W
98 |
99 | @property
100 | def targets(self):
101 | return self._targets
102 |
103 | @property
104 | def initial_state(self):
105 | return self._initial_state
106 |
107 | @property
108 | def cost(self):
109 | return self._cost
110 |
111 | @property
112 | def final_state(self):
113 | return self._final_state
114 |
115 | @property
116 | def lr(self):
117 | return self._lr
118 |
119 | @property
120 | def train_op(self):
121 | return self._train_op
122 |
123 | def run_train_epoch(session, m, Win, W, data_x, data_y, eval_op):
124 | costs = []
125 | states = []
126 | for i in xrange(int(len(data_y) / FLAGS.batch_size)):
127 | cost, state, _ = session.run(
128 | [m.cost, m.final_state, eval_op],
129 | {m.Win: Win,
130 | m.W: W,
131 | m.input_data: data_x[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size],
132 | m.targets: data_y[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size]})
133 | costs.append(cost)
134 | states.append(state)
135 | return (sum(costs)/len(costs), states)
136 |
137 | def run_test_epoch(session, m, Win, W, data_x, data_y, eval_op, train_state):
138 | costs = []
139 | states = []
140 | for i in xrange(int(len(data_y) / FLAGS.batch_size)):
141 | cost, state, _ = session.run(
142 | [m.cost, m.final_state, eval_op],
143 | {m.Win: Win,
144 | m.W: W,
145 | m.input_data: data_x[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size],
146 | m.targets: data_y[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size],
147 | m.initial_state: train_state[i]})
148 | costs.append(cost)
149 | states.append(state)
150 | return (sum(costs)/len(costs), states)
151 |
152 | def main(_):
153 | print("===============================================================================")
154 | print("The input_dim is", FLAGS.input_dim, "The hidden_dim is", FLAGS.hidden_dim)
155 | print("The interval is", FLAGS.interval, "The batch_size is", FLAGS.batch_size)
156 | print("The data_path is", FLAGS.data_path)
157 | X_train, y_train, X_test, y_test, _, cpu_load_std = read_data(FLAGS.data_path,
158 | FLAGS.input_dim,
159 | 8,
160 | FLAGS.input_dim)
161 |
162 | inSize = FLAGS.input_dim
163 | resSize = FLAGS.hidden_dim
164 | rho = 0.1
165 | cr = 0.05
166 | Win = np.float32(np.random.rand(inSize, resSize)/5 - 0.1)
167 | N = resSize * resSize
168 | W = np.random.rand(N) - 0.5
169 | zero_index = np.random.permutation(N)[int(N * cr * 1.0):]
170 | W[zero_index] = 0
171 | W = W.reshape((resSize, resSize))
172 | rhoW = max(abs(linalg.eig(W)[0]))
173 | W *= rho / rhoW
174 | W = np.float32(W)
175 |
176 | with tf.Graph().as_default(), tf.Session() as session:
177 | with tf.variable_scope("model", reuse=None):
178 | m_train = ESN(is_training=True, length=len(y_train[0]))
179 | with tf.variable_scope("model", reuse=True):
180 | m_test = ESN(is_training=False, length=len(y_test[0]))
181 |
182 | tf.initialize_all_variables().run()
183 |
184 | #log_a = Log('http://localhost:8120','modelA')
185 | # pastalog --serve 8120
186 |
187 | scale = cpu_load_std ** 2
188 | train_best = test_best = 0.0
189 | for i in range(FLAGS.epoch):
190 | if i < FLAGS.epoch/3:
191 | lr_decay = 1
192 | elif i < FLAGS.epoch*2/3:
193 | lr_decay = 0.1
194 | else:
195 | lr_decay = 0.01
196 | m_train.assign_lr(session, FLAGS.lr * lr_decay)
197 | train_loss, train_state = run_train_epoch(session, m_train, Win, W, X_train,
198 | y_train[:,50:,FLAGS.interval-1],
199 | m_train.train_op)
200 | test_loss, _ = run_test_epoch(session, m_test, Win, W,
201 | X_test, y_test[:,:,FLAGS.interval-1],
202 | tf.no_op(), train_state)
203 | if i == 0:
204 | train_best = train_loss
205 | test_best = test_loss
206 | if train_loss < train_best:
207 | train_best = train_loss
208 | if test_loss < test_best:
209 | test_best = test_loss
210 | print("epoch:%3d, learning rate %.5f, train_loss %.6f, test_loss %.6f" %
211 | (i + 1, session.run(m_train.lr), train_loss*scale, test_loss*scale))
212 | #log_a.post("trainLoss", value=float(train_loss), step=i)
213 | #log_a.post("testLoss", value=float(test_loss), step=i)
214 | if i == FLAGS.epoch - 1:
215 | print("Best train, test loss %.6f %.6f" % (train_best*scale, test_best*scale))
216 |
217 | print("The input_dim is", FLAGS.input_dim, "The hidden_dim is", FLAGS.hidden_dim)
218 | print("The interval is", FLAGS.interval, "The batch_size is", FLAGS.interval)
219 | print("The data_path is", FLAGS.data_path)
220 | print("===============================================================================")
221 |
222 | if __name__ == "__main__":
223 | tf.app.run()
224 |
--------------------------------------------------------------------------------
/tensorflow/ESN/MSSE/esn_msse_save_logits.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import tensorflow as tf
6 | from msse_utils import read_data
7 | import numpy as np
8 | from scipy import linalg
9 | import pickle
10 | from tensorflow.python.ops import array_ops
11 |
12 | #from pastalog import Log
13 |
14 | flags = tf.flags
15 | logging = tf.logging
16 | flags.DEFINE_string("data_path", "/home/tyrion/lannister/1024/tyrion.pkl",
17 | "The path of host load data")
18 | flags.DEFINE_integer("input_dim", 64, "The length of history window")
19 | flags.DEFINE_integer("hidden_dim", 200, "The length of hidden layer size")
20 | flags.DEFINE_integer("interval", 1, "The number of output interval")
21 | flags.DEFINE_integer("batch_size", 128, "Mini-batch size")
22 | flags.DEFINE_integer("epoch", 90, "The total epochs")
23 | flags.DEFINE_float("lr", 0.05, "Learning rate")
24 | flags.DEFINE_integer("max_grad_norm", 5, "max grad norm")
25 | FLAGS = flags.FLAGS
26 |
27 | class ESN(object):
28 | def __init__(self, is_training, length, leaking_rate=0.2, initLen=50):
29 | self.batch_size = batch_size = FLAGS.batch_size
30 | self.num_steps = num_steps = length
31 | self.inSize = inSize = FLAGS.input_dim
32 | self.resSize = resSize = FLAGS.hidden_dim
33 |
34 | self._input_data = tf.placeholder(tf.float32, [batch_size, length, FLAGS.input_dim])
35 | if is_training:
36 | self._targets = tf.placeholder(tf.float32, [batch_size, length-initLen])
37 | else:
38 | self._targets = tf.placeholder(tf.float32, [batch_size, length])
39 |
40 | self._Win = Win = tf.placeholder(tf.float32, [inSize, resSize])
41 | self._W = W = tf.placeholder(tf.float32, [resSize, resSize])
42 |
43 | zeros = array_ops.zeros(array_ops.pack([batch_size, resSize]), dtype=tf.float32)
44 | zeros.set_shape([None, resSize])
45 | self._initial_state = zeros
46 | # self._initial_state = np.zeros((batch_size, resSize), dtype=np.float32)
47 |
48 | S = []
49 | s = self._initial_state
50 |
51 | with tf.variable_scope("ESN"):
52 | for i in range(num_steps):
53 | s = (1 - leaking_rate) * s + \
54 | leaking_rate * tf.nn.tanh(tf.matmul(self._input_data[:,i,:], Win)+tf.matmul(s,W))
55 | if is_training:
56 | if i>= initLen:
57 | S.append(tf.concat(1, [self._input_data[:,i,:], s]))
58 | else:
59 | S.append(tf.concat(1, [self._input_data[:,i,:], s]))
60 | self._final_state = s
61 |
62 | V_size = inSize + resSize
63 | hidden_output = tf.reshape(tf.concat(1, S), [-1, V_size])
64 |
65 | V = tf.get_variable("v", shape=[V_size, 1], dtype=tf.float32,
66 | initializer=tf.random_uniform_initializer(-tf.sqrt(1./V_size),tf.sqrt(1./V_size)))
67 | b = tf.get_variable("b", shape=[1], dtype=tf.float32,
68 | initializer=tf.constant_initializer(0.1))
69 | logits = tf.add(tf.matmul(hidden_output, V), b)
70 |
71 | target = tf.reshape(self._targets, [-1, 1])
72 | training_loss = tf.reduce_sum(tf.pow(logits-target, 2)) / 2
73 | mse = tf.reduce_mean(tf.pow(logits-target, 2))
74 | self._cost = mse
75 | self._logits = logits
76 |
77 | if not is_training:
78 | return
79 |
80 | self._lr = tf.Variable(0.0, trainable=False)
81 | tvars = tf.trainable_variables()
82 | grads, _ = tf.clip_by_global_norm(tf.gradients(training_loss, tvars), FLAGS.max_grad_norm)
83 | optimizer = tf.train.GradientDescentOptimizer(self.lr)
84 | self._train_op = optimizer.apply_gradients(zip(grads, tvars))
85 |
86 | def assign_lr(self, session, lr_value):
87 | session.run(tf.assign(self.lr, lr_value))
88 |
89 | @property
90 | def input_data(self):
91 | return self._input_data
92 |
93 | @property
94 | def Win(self):
95 | return self._Win
96 |
97 | @property
98 | def W(self):
99 | return self._W
100 |
101 | @property
102 | def targets(self):
103 | return self._targets
104 |
105 | @property
106 | def initial_state(self):
107 | return self._initial_state
108 |
109 | @property
110 | def cost(self):
111 | return self._cost
112 |
113 | @property
114 | def final_state(self):
115 | return self._final_state
116 |
117 | @property
118 | def lr(self):
119 | return self._lr
120 |
121 | @property
122 | def train_op(self):
123 | return self._train_op
124 |
125 | @property
126 | def logits(self):
127 | return self._logits
128 |
129 | def run_train_epoch(session, m, Win, W, data_x, data_y, eval_op):
130 | costs = []
131 | states = []
132 | for i in xrange(int(len(data_y) / FLAGS.batch_size)):
133 | cost, state, _ = session.run(
134 | [m.cost, m.final_state, eval_op],
135 | {m.Win: Win,
136 | m.W: W,
137 | m.input_data: data_x[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size],
138 | m.targets: data_y[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size]})
139 | costs.append(cost)
140 | states.append(state)
141 | return (sum(costs)/len(costs), states)
142 |
143 | def run_test_epoch(session, m, Win, W, data_x, data_y, eval_op, train_state):
144 | costs = []
145 | logits_sum = []
146 | for i in xrange(int(len(data_y) / FLAGS.batch_size)):
147 | cost, logits, _ = session.run(
148 | [m.cost, m.logits, eval_op],
149 | {m.Win: Win,
150 | m.W: W,
151 | m.input_data: data_x[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size],
152 | m.targets: data_y[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size],
153 | m.initial_state: train_state[i]})
154 | costs.append(cost)
155 | logits_sum.append(logits)
156 | return (sum(costs)/len(costs), logits_sum)
157 |
158 | def main(_):
159 | print("===============================================================================")
160 | print("The input_dim is", FLAGS.input_dim, "The hidden_dim is", FLAGS.hidden_dim)
161 | print("The interval is", FLAGS.interval, "The batch_size is", FLAGS.batch_size)
162 | print("The data_path is", FLAGS.data_path)
163 | X_train, y_train, X_test, y_test, _, cpu_load_std = read_data(FLAGS.data_path,
164 | FLAGS.input_dim,
165 | 8,
166 | FLAGS.input_dim)
167 |
168 | inSize = FLAGS.input_dim
169 | resSize = FLAGS.hidden_dim
170 | rho = 0.1
171 | cr = 0.05
172 | Win = np.float32(np.random.rand(inSize, resSize)/5 - 0.1)
173 | N = resSize * resSize
174 | W = np.random.rand(N) - 0.5
175 | zero_index = np.random.permutation(N)[int(N * cr * 1.0):]
176 | W[zero_index] = 0
177 | W = W.reshape((resSize, resSize))
178 | rhoW = max(abs(linalg.eig(W)[0]))
179 | W *= rho / rhoW
180 | W = np.float32(W)
181 |
182 | with tf.Graph().as_default(), tf.Session() as session:
183 | with tf.variable_scope("model", reuse=None):
184 | m_train = ESN(is_training=True, length=len(y_train[0]))
185 | with tf.variable_scope("model", reuse=True):
186 | m_test = ESN(is_training=False, length=len(y_test[0]))
187 |
188 | tf.initialize_all_variables().run()
189 |
190 | #log_a = Log('http://localhost:8120','modelA')
191 | # pastalog --serve 8120
192 |
193 | scale = cpu_load_std ** 2
194 | train_best = test_best = 0.0
195 | for i in range(FLAGS.epoch):
196 | if i < FLAGS.epoch/3:
197 | lr_decay = 1
198 | elif i < FLAGS.epoch*2/3:
199 | lr_decay = 0.1
200 | else:
201 | lr_decay = 0.01
202 | m_train.assign_lr(session, FLAGS.lr * lr_decay)
203 | train_loss, train_state = run_train_epoch(session, m_train, Win, W, X_train,
204 | y_train[:,50:,FLAGS.interval-1],
205 | m_train.train_op)
206 | test_loss, logits = run_test_epoch(session, m_test, Win, W,
207 | X_test, y_test[:,:,FLAGS.interval-1],
208 | tf.no_op(), train_state)
209 | if i == 0:
210 | train_best = train_loss
211 | test_best = test_loss
212 | test_cost_best = np.asarray(logits)
213 | if train_loss < train_best:
214 | train_best = train_loss
215 | if test_loss < test_best:
216 | test_best = test_loss
217 | test_cost_best = np.asarray(logits)
218 | print("epoch:%3d, learning rate %.5f, train_loss %.6f, test_loss %.6f" %
219 | (i + 1, session.run(m_train.lr), train_loss*scale, test_loss*scale))
220 | #log_a.post("trainLoss", value=float(train_loss), step=i)
221 | #log_a.post("testLoss", value=float(test_loss), step=i)
222 | if i == FLAGS.epoch - 1:
223 | print("Best train, test loss %.6f %.6f" % (train_best*scale, test_best*scale))
224 | print(test_cost_best.shape)
225 | save_path = "./logits_esn_msse_1024_"+str(FLAGS.interval)+".pkl"
226 | with open(save_path,'wb') as output:
227 | pickle.dump(test_cost_best, output)
228 |
229 | print("The input_dim is", FLAGS.input_dim, "The hidden_dim is", FLAGS.hidden_dim)
230 | print("The interval is", FLAGS.interval, "The batch_size is", FLAGS.batch_size)
231 | print("The data_path is", FLAGS.data_path)
232 | print("===============================================================================")
233 |
234 | if __name__ == "__main__":
235 | tf.app.run()
236 |
--------------------------------------------------------------------------------
/tensorflow/ESN/MSSE/msse_utils.py:
--------------------------------------------------------------------------------
1 | import pickle
2 | import numpy as np
3 |
4 | def zero_center(cpu_load):
5 | cpu_load = np.asarray(cpu_load)
6 | cpu_load_mean = np.mean(cpu_load[:,:24*12*26])
7 | cpu_load_std = np.std(cpu_load[:,:24*12*26])
8 | cpu_load -= cpu_load_mean
9 | cpu_load /= cpu_load_std
10 | return (cpu_load, cpu_load_mean, cpu_load_std)
11 |
12 | def calcu_mean(actual_data, start, base_seg, n):
13 | seg_mean = []
14 | for i in range(n):
15 | seg_mean.append(np.mean(actual_data[start:start+base_seg*2**i]))
16 | return seg_mean
17 |
18 | def contextwin(cpu_load, win_i, n, ahead_step):
19 | m, cpu_load_mean, cpu_load_std = zero_center(cpu_load)
20 | a = 26
21 | b = 3
22 | train_len = a * 288 / ahead_step
23 | test_len = (b-1) * 288 / ahead_step + (288 - 2**(n-1)) / ahead_step
24 | train_start = win_i
25 | test_start = a*288 + win_i
26 |
27 | train_x = np.asarray([[m[i][train_start+j*ahead_step-win_i:train_start+j*ahead_step]
28 | for j in range(train_len)] for i in range(len(m))],dtype=np.float32)
29 | train_y = np.asarray([[calcu_mean(m[i], train_start+j*ahead_step, 1, n)
30 | for j in range(train_len)] for i in range(len(m))],dtype=np.float32)
31 | test_x = np.asarray([[m[i][test_start+j*ahead_step-win_i:test_start+j*ahead_step]
32 | for j in range(test_len)] for i in range(len(m))],dtype=np.float32)
33 | test_y = np.asarray([[calcu_mean(m[i], test_start+j*ahead_step, 1, n)
34 | for j in range(test_len)] for i in range(len(m))],dtype=np.float32)
35 |
36 | return (train_x, train_y, test_x, test_y, cpu_load_mean, cpu_load_std)
37 |
38 | def read_data(_data_path, win_i, n, ahead_step):
39 | data_path = _data_path
40 | print("Reading pkl data...")
41 | input_machine = open(data_path,'rb')
42 | cpu_load = pickle.load(input_machine)
43 | input_machine.close()
44 | print("Loading data...")
45 | X_train, y_train, X_test, y_test, cpu_load_mean, cpu_load_std = contextwin(cpu_load, win_i, n, ahead_step)
46 |
47 | print(X_train.shape, y_train.shape)
48 | print(X_test.shape, y_test.shape)
49 | print(cpu_load_mean)
50 | print(cpu_load_std)
51 |
52 | return (X_train, y_train, X_test, y_test, cpu_load_mean, cpu_load_std)
53 |
54 | #data_path = "/home/tyrion/Documents/Cloud Computing/python/data/72/tyrion.pkl"
55 | #X_train, y_train, X_test, y_test, _, cpu_load_std = read_data(data_path, 24, 6, 24)
--------------------------------------------------------------------------------
/tensorflow/ESN/MSSE/msse_utils.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/tensorflow/ESN/MSSE/msse_utils.pyc
--------------------------------------------------------------------------------
/tensorflow/ESN/MSSE/test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | interval_arr=(1 2 3 4 5 6 7 8)
4 | for interval in ${interval_arr[@]}; do
5 | echo "The interval is $interval"
6 | python esn_msse_main.py --interval=$interval >> 1013.txt
7 | done
8 |
--------------------------------------------------------------------------------
/tensorflow/ESN/README.md:
--------------------------------------------------------------------------------
1 | # Echo State Networks
2 |
3 | > Using ESN to predict the host load
4 |
5 | - [MSE](./MSE) Predict host load several-step-ahead measured with average MSE(Mean Squared Error) in Google Cluster Data.
6 | - [MSSE](./MSSE) Predict host load over a long period measured with average MSSE(Mean Segment Squared Error) in Google Cluster Data.
7 | - [autoencoder](./autoencoder) Add an autoencoder before the original ESN model.
8 | - [grid](./grid) Only predict the MSE in Grid dataset.
9 |
--------------------------------------------------------------------------------
/tensorflow/ESN/autoencoder/1024/high/1.pkl:
--------------------------------------------------------------------------------
1 | cnumpy.core.multiarray
2 | _reconstruct
3 | p0
4 | (cnumpy
5 | ndarray
6 | p1
7 | (I0
8 | tp2
9 | S'b'
10 | p3
11 | tp4
12 | Rp5
13 | (I1
14 | (I64
15 | tp6
16 | cnumpy
17 | dtype
18 | p7
19 | (S'f4'
20 | p8
21 | I0
22 | I1
23 | tp9
24 | Rp10
25 | (I3
26 | S'<'
27 | p11
28 | NNNI-1
29 | I-1
30 | I0
31 | tp12
32 | bI00
33 | S',\x04\xb9=`\xe09=\xaa+\xdf=<\x0f\xee=@\x13\xe1=\xb2\x80\xc9=\x04%L=V\xd4\xa0=\\_\xa4=\xf2\x8c\xb5=R(\xd2=\xb5\xfc\x08>\x7f\xc0\t>\x1c\xab\x03><\xc4\xa4=\xe2\xa3\xa3=p\x0b\xa2=\xba\x04\xac=\xeb\x02\xea=F\xa1\xb1=\xcc\xd4\xb0=\xd4\xb1\xbd=\x06a\xa5=&\xc0\xbc=V\xfd\xaf=xV\xb0=\xbe\x02\xb6=r\xae\xc6=\xf6\x12\xc8=\xac\xa2\xb2=\xaar\xa6=\x1e#\xa5=Tc\xa7=&\x81\xab=\x1c\xd1\xc9=*\xf8\xb9=\xa0\xc9\xba=\xc6?\xbd=t\xdeS=\x1c|\xc8=\xcc\xb3K=N\x7f\xdd=\xee:\xc3=\x18w<=\xec\xd1?=\\[[=I(\xcf=\xf9\x9f\xce=\xf1\x8c\xcc=i\xdd\xd8=\xf6\x17\xd0=\x8a\x05\xbb=r\x89\xb5=\x1bj\xd0=\x08\xf8\xc7=\x17L\xce=\xd8F\xce=\r\xb9\xd2=\x91\xbd\xf3=\x90x\xdd=\xf0uv=\x00F\xd4=\xd0w[=\xe0y\xcd='
34 | p13
35 | tp14
36 | b.
--------------------------------------------------------------------------------
/tensorflow/ESN/autoencoder/1024/high/1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/tensorflow/ESN/autoencoder/1024/high/1.png
--------------------------------------------------------------------------------
/tensorflow/ESN/autoencoder/1024/high/2.pkl:
--------------------------------------------------------------------------------
1 | cnumpy.core.multiarray
2 | _reconstruct
3 | p0
4 | (cnumpy
5 | ndarray
6 | p1
7 | (I0
8 | tp2
9 | S'b'
10 | p3
11 | tp4
12 | Rp5
13 | (I1
14 | (I64
15 | tp6
16 | cnumpy
17 | dtype
18 | p7
19 | (S'f4'
20 | p8
21 | I0
22 | I1
23 | tp9
24 | Rp10
25 | (I3
26 | S'<'
27 | p11
28 | NNNI-1
29 | I-1
30 | I0
31 | tp12
32 | bI00
33 | S'\x809\xa9=\x10K\x99=\x8ab\xc0=3\xaf\xe6=\x8b,\xd6=\x14F\xb4=\xe0\x16\x94=\xa6\xa3\xa0=\xfc\xe0\xa9=\x08\xe1\xbb=G\x86\xe1=\\!\x01>\xfb9\x06>+\x10\xee=\x1e\x9e\xbc=\x8eu\xa7=,\x17\xaa=\x92\xb3\xb5=F~\xc3=X\xad\xbe=4\n\xba=*$\xbd=0\t\xba=t\x07\xbc=\x18\xd8\xc1=,\xa2\xba=\x92\xdf\xbd=T"\xc3=\x8a\x7f\xc7=X\xf4\xba=\xc8\x97\xad=Th\xae=\x82\xe4\xae=\\\x01\xbd=lR\xc4=\xa0}\xc7=\xfek\xbb=d\x08\xa7=^\xc1\x9a=\xf8@\xa0=@\x08\xa9=\xccL\xbb=\xca\xd9\xb1=\xe6\x0b\x80=\x00+e=\xd6z\x89=8\xad\xaf=@X\xc7=\xc4\x0b\xca=N|\xce=R\x85\xc9=\xb8\xb9\xc1=\xc2\xc5\xb7=\xa2\x1b\xbd=V\xc2\xc7=\x10\xdc\xc4= \x1a\xc6=X\xff\xcf=Z\x07\xca=r\xa9\xbb=\x88\xb7\xa9=f\xee\x98=\xdaX\xa3=\xd2i\xa7='
34 | p13
35 | tp14
36 | b.
--------------------------------------------------------------------------------
/tensorflow/ESN/autoencoder/1024/high/2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/tensorflow/ESN/autoencoder/1024/high/2.png
--------------------------------------------------------------------------------
/tensorflow/ESN/autoencoder/1024/high/3.pkl:
--------------------------------------------------------------------------------
1 | cnumpy.core.multiarray
2 | _reconstruct
3 | p0
4 | (cnumpy
5 | ndarray
6 | p1
7 | (I0
8 | tp2
9 | S'b'
10 | p3
11 | tp4
12 | Rp5
13 | (I1
14 | (I64
15 | tp6
16 | cnumpy
17 | dtype
18 | p7
19 | (S'f4'
20 | p8
21 | I0
22 | I1
23 | tp9
24 | Rp10
25 | (I3
26 | S'<'
27 | p11
28 | NNNI-1
29 | I-1
30 | I0
31 | tp12
32 | bI00
33 | S'\x8d\xd6\xcc=\x9d\xec\xfb= \x03\xef=\x82\x97\x04>H^\x1c>[\xf2C>\xdc,\x07>\xb71\x88>\xd0\xf7\x95>\x19\xaf\x8c>\xf4\'\xfb=\xe4e\r>\x00V"><\xe7\x1d>\x11\x01\xe0=\x1a^\xff=r\xd7\x07>\xc9\x91\xf9=\x88\xcb\xeb=\xbeT\xdf=\xe2\x19\x03>\x94\x9c\x01>\xb0\x7f\xf0=\xe5F\xf2=\xb05\'>\x9e\xc4\xe5=\x808\xeb=4\xe4\xfe=<6 >\x06\x16\x89>\xa3\xaa\x8a>\x8c\xddF>\x8c\xdb,>\xce\x9e1>\xbd\x85 >\x07)\xe3=:A\x0b>\xda|\n>\x17Q\xea=\xf3\xee\xe4=x\\\x15>\x1dEg>\xe896>\\\xa9\x06>\xf2<*>J\xfa#>\tJ\xd7=\xcc\x9a\xee=\x8a\x90\x11>]\xba?>\xf8\x96\x11>3>\xf8=Rj\x0c>\xa46\t>\xa46\xd5=\x88L\x05>9\xdc\xf6=\xad\xf8\xfd=i\xdb\xe3=\xec\x9a\x02>5u\xf0=\xfa\xe0\xe8=\xea\xa4\xd1=\xc9\xbe\xf4='
34 | p13
35 | tp14
36 | b.
--------------------------------------------------------------------------------
/tensorflow/ESN/autoencoder/1024/high/4.pkl:
--------------------------------------------------------------------------------
1 | cnumpy.core.multiarray
2 | _reconstruct
3 | p0
4 | (cnumpy
5 | ndarray
6 | p1
7 | (I0
8 | tp2
9 | S'b'
10 | p3
11 | tp4
12 | Rp5
13 | (I1
14 | (I64
15 | tp6
16 | cnumpy
17 | dtype
18 | p7
19 | (S'f4'
20 | p8
21 | I0
22 | I1
23 | tp9
24 | Rp10
25 | (I3
26 | S'<'
27 | p11
28 | NNNI-1
29 | I-1
30 | I0
31 | tp12
32 | bI00
33 | S"o\xae\xed=\xdfm\xf2=\x88.\xfd=\xa9\x16\x11>nw\x1e>\xae\x9f(>\t(/>rv\x7f>?\x01\x8f> \xba}>\xbf\xb2*>\xf5\xf8\x16>n~\x16>\x07\x96\x17>\xd8E\x05>\xc5T\x05>\x10\x06\x07>\xac\xd1\x04>\xb6\xe4\xf9=\xd4\x08\x01>\x8e\x82\x06>\xb8\\\t>UI\x05>6\xa9\t>\x88J\x13>\x16\xdb\n>\x7f0\xfd=IX\n>~\xc77>\x87\xf3}>\xc6\x1b\x80>\xbbdV>\x0c\xf04>\xf7\xf62>\xb5\xda\x1c>7+\x13>\xa6\xe1\x10>\xfaI\n>\x05\x80\x05>n\xb9\x06>\xe0O'>>\xd4T>\x13\xd2>>\x0b2\x1a>\xf4\x12\x1b>\x0c\x92#>tL\x06>\xf2Z\x05>.k\x1a>\xbe\\+>\xe2\xfd\x1a>}+\x0e>P\x9e\t>|\x87\x06>:\xb8\x02>\x98\x94\x07><\xed\x06>2\xd0\x05>\xd3`\xfc=\xbe\xf0\xf7=\xc4\xc4\xf9=S\xec\xf1=\x7fH\xea=b\x80\xfa="
34 | p13
35 | tp14
36 | b.
--------------------------------------------------------------------------------
/tensorflow/ESN/autoencoder/1024/high/Untitled Document:
--------------------------------------------------------------------------------
1 | 0.005
2 |
--------------------------------------------------------------------------------
/tensorflow/ESN/autoencoder/1024/high/Untitled Document~:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/tensorflow/ESN/autoencoder/1024/high/Untitled Document~
--------------------------------------------------------------------------------
/tensorflow/ESN/autoencoder/1024/high/biases.pkl:
--------------------------------------------------------------------------------
1 | cnumpy.core.multiarray
2 | _reconstruct
3 | p0
4 | (cnumpy
5 | ndarray
6 | p1
7 | (I0
8 | tp2
9 | S'b'
10 | p3
11 | tp4
12 | Rp5
13 | (I1
14 | (I200
15 | tp6
16 | cnumpy
17 | dtype
18 | p7
19 | (S'f4'
20 | p8
21 | I0
22 | I1
23 | tp9
24 | Rp10
25 | (I3
26 | S'<'
27 | p11
28 | NNNI-1
29 | I-1
30 | I0
31 | tp12
32 | bI00
33 | S'\x85&\x16\xc0\x12\x01\x16\xc0\\\xad\x16\xc0\x88\xef\x12\xc0\x16\xb4\x14\xc0\x08\xa4\x16\xc0X\xc8\x15\xc0\xfc}\x15\xc0\x9ey\x13\xc0\xe9\xea\x15\xc04R\x16\xc04r\x17\xc0`\xcc\x14\xc0\xfbx\x14\xc0B\xb7\x18\xc09k\x15\xc0!\x90\x13\xc0\xbc|\x18\xc0\xe3o\x14\xc0\xb4l\x15\xc0\x07\xf2\x12\xc0\x04\x1a\x13\xc05.\x14\xc0\xa7f\x17\xc0\xe5&\x16\xc0o?\x15\xc0\xe7\xfe\x16\xc0$U\x15\xc0Bj\x16\xc0\xf9\xde\x15\xc0\xe3\xf1\x15\xc0\x03\r\x16\xc0ip\x15\xc0.L\x13\xc0\x08\xe3\x17\xc0\xbd\x02\x16\xc0i\xf4\x14\xc0\x1e\x8b\x16\xc0\xa8:\x16\xc0#\xf1\x13\xc0U\xf9\x15\xc0\x0c\x82\x16\xc0[4\x15\xc0\xb4\xd0\x13\xc0\x87\n\x16\xc08\x8d\x18\xc0<\xce\x15\xc0\x0el\x15\xc0!p\x16\xc0O\xd5\x17\xc0t\x17\x19\xc0\x92\xf8\x19\xc0\xabT\x18\xc0\x04\x87\x12\xc0\xae\xb7\x19\xc0-k\x13\xc0o\xf9\x13\xc0\xab\x1c\x16\xc0\xc7c\x19\xc0\xdee\x13\xc0C5\x17\xc08s\x17\xc0\x16\x87\x17\xc0;H\x15\xc0\x1c\x19\x15\xc0-n\x18\xc0/;\x13\xc0\x98\xd0\x12\xc0\x97X\x16\xc0\xd5\x8a\x18\xc0\xf3?\x19\xc0"k\x14\xc0\xaen\x15\xc0S\xe2\x15\xc0\xc5P\x18\xc0Tc\x16\xc0\xca\xfd\x13\xc0\xc5\r\x16\xc0\xb5\xe7\x13\xc0}\x92\x14\xc0\\\xb4\x15\xc0\t\xb9\x16\xc0\xa5A\x17\xc0\x0e\x0e\x12\xc0\x06\xa3\x0f\xc0\xfc\x80\x15\xc0\xca\x11\x12\xc0\xe3\\\x17\xc0d\x80\x14\xc0\x04I\x15\xc0\x9c\x1c\x16\xc0\xbe\xa1\x12\xc0\x9d\x86\x13\xc0\x9f\xf3\x02\xc0\xa4 \x15\xc0\xa1<\x16\xc0\xce\xa5\x13\xc0Wn\x0e\xc0\xbc\x9c\x13\xc0\x91!\x18\xc0dx\x15\xc0\x11\xd1\x15\xc0\x03?\x13\xc0\xffB\x18\xc0\xd6\xaa\x16\xc0\xdcR\x17\xc0\xd3\x07\x16\xc0cX\x14\xc0\xe5t\x13\xc0\xe3\x82\x13\xc0O(\x19\xc0\x92D\x17\xc0\r]\x16\xc0\x8fW\x13\xc0pO\x13\xc0\xea\xe3\x16\xc0\x1a\x91\x18\xc0\x93~\x14\xc0 -\x12\xc0Yi\x17\xc0\x97\xc9\x12\xc0\xf0I\x18\xc0\xc3a\x16\xc0\x9d\x9f\x14\xc0_\x1d\x14\xc0\x14t\x14\xc0\xac\xa8\x14\xc0h\x93\x17\xc0.\xce\x13\xc0\xa9\x86\x15\xc0\xc7Y\x19\xc0\xf6\'\x16\xc0h\xdf\x12\xc0\x9c\xca\x0e\xc0K\xf4\x14\xc0\xcc[\x11\xc0(\x97\x14\xc0\xd2z\x17\xc0\x19<\x17\xc0_\xd8\x13\xc0YO\x14\xc0\x11\xaf\x13\xc0u%\x15\xc0\xff\xc9\x16\xc0u4\x16\xc0\x80\xc0\x17\xc0c\xde\x10\xc0\xef\r\x15\xc0s\x06\x11\xc0\xee}\x13\xc0-\x8d\x16\xc06!\x15\xc0U\x9a\x15\xc0\x7f\xc0\t>\x1c\xab\x03><\xc4\xa4=\xe2\xa3\xa3=p\x0b\xa2=\xba\x04\xac=\xeb\x02\xea=F\xa1\xb1=\xcc\xd4\xb0=\xd4\xb1\xbd=\x06a\xa5=&\xc0\xbc=V\xfd\xaf=xV\xb0=\xbe\x02\xb6=r\xae\xc6=\xf6\x12\xc8=\xac\xa2\xb2=\xaar\xa6=\x1e#\xa5=Tc\xa7=&\x81\xab=\x1c\xd1\xc9=*\xf8\xb9=\xa0\xc9\xba=\xc6?\xbd=t\xdeS=\x1c|\xc8=\xcc\xb3K=N\x7f\xdd=\xee:\xc3=\x18w<=\xec\xd1?=\\[[=I(\xcf=\xf9\x9f\xce=\xf1\x8c\xcc=i\xdd\xd8=\xf6\x17\xd0=\x8a\x05\xbb=r\x89\xb5=\x1bj\xd0=\x08\xf8\xc7=\x17L\xce=\xd8F\xce=\r\xb9\xd2=\x91\xbd\xf3=\x90x\xdd=\xf0uv=\x00F\xd4=\xd0w[=\xe0y\xcd='
34 | p13
35 | tp14
36 | b.
--------------------------------------------------------------------------------
/tensorflow/ESN/autoencoder/1024/low/1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/tensorflow/ESN/autoencoder/1024/low/1.png
--------------------------------------------------------------------------------
/tensorflow/ESN/autoencoder/1024/low/2.pkl:
--------------------------------------------------------------------------------
1 | cnumpy.core.multiarray
2 | _reconstruct
3 | p0
4 | (cnumpy
5 | ndarray
6 | p1
7 | (I0
8 | tp2
9 | S'b'
10 | p3
11 | tp4
12 | Rp5
13 | (I1
14 | (I64
15 | tp6
16 | cnumpy
17 | dtype
18 | p7
19 | (S'f4'
20 | p8
21 | I0
22 | I1
23 | tp9
24 | Rp10
25 | (I3
26 | S'<'
27 | p11
28 | NNNI-1
29 | I-1
30 | I0
31 | tp12
32 | bI00
33 | S'\x16b\xbe=\x88\xbcw=CC\xea=\xf2\x00\xfd=\xd4\xe7\xe4=\xbd\x16\xd1=\xb8xo=r\xd9\xab=\x14"\xb8=\xfaz\xc2=}T\xe6=\x81y\x0c>\x07\xe1\r>\xdc\xa1\x05>HQ\xb1=\x18\x95\xaa=|G\xaa=\xbc\t\xb4=\x0cX\xe3=B9\xbf=\xf2\xc9\xb6=\xb6\xad\xc8=\x12\x11\xb8=D\xbb\xd0=\x04\xc9\xc4=\xaa\xb4\xc0=>V\xc0=dh\xcc="\xe0\xd2=\xec\x95\xc1=\xc0\xd9\xae=\xe0\xdc\xab=\xfa\xfe\xa8=\x8aW\xb2=\x1bM\xd3=\xc6\x87\xc1=\x86\xac\xc1=\xfc\xe6\xc6=d\xcd\x8e=\xa9{\xd0=&\\\x88=\xa9\xb3\xe8=<\x9b\xcf=\xc4\x1ai=\xa4&_=p\x8b\x80=\x83\xa7\xd8=n\xa8\xe1=/\xcb\xd7=\xed\xd4\xe4=\x12N\xe0=\xee\xfc\xc9=P\xf4\xb6=J\xd7\xdb=}\x1c\xd6=\xc0Q\xda=\xa0Z\xd8=\xf25\xdf=`\xad\xf3=t\x0c\xdf=\x885\x95=\xaa\x9e\xd8=x\x88\x93=\xa9\xfd\xd4='
34 | p13
35 | tp14
36 | b.
--------------------------------------------------------------------------------
/tensorflow/ESN/autoencoder/1024/low/2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/tensorflow/ESN/autoencoder/1024/low/2.png
--------------------------------------------------------------------------------
/tensorflow/ESN/autoencoder/1024/low/3.pkl:
--------------------------------------------------------------------------------
1 | cnumpy.core.multiarray
2 | _reconstruct
3 | p0
4 | (cnumpy
5 | ndarray
6 | p1
7 | (I0
8 | tp2
9 | S'b'
10 | p3
11 | tp4
12 | Rp5
13 | (I1
14 | (I64
15 | tp6
16 | cnumpy
17 | dtype
18 | p7
19 | (S'f4'
20 | p8
21 | I0
22 | I1
23 | tp9
24 | Rp10
25 | (I3
26 | S'<'
27 | p11
28 | NNNI-1
29 | I-1
30 | I0
31 | tp12
32 | bI00
33 | S'\x8d\xd6\xcc=\x9d\xec\xfb= \x03\xef=\x82\x97\x04>H^\x1c>[\xf2C>\xdc,\x07>\xb71\x88>\xd0\xf7\x95>\x19\xaf\x8c>\xf4\'\xfb=\xe4e\r>\x00V"><\xe7\x1d>\x11\x01\xe0=\x1a^\xff=r\xd7\x07>\xc9\x91\xf9=\x88\xcb\xeb=\xbeT\xdf=\xe2\x19\x03>\x94\x9c\x01>\xb0\x7f\xf0=\xe5F\xf2=\xb05\'>\x9e\xc4\xe5=\x808\xeb=4\xe4\xfe=<6 >\x06\x16\x89>\xa3\xaa\x8a>\x8c\xddF>\x8c\xdb,>\xce\x9e1>\xbd\x85 >\x07)\xe3=:A\x0b>\xda|\n>\x17Q\xea=\xf3\xee\xe4=x\\\x15>\x1dEg>\xe896>\\\xa9\x06>\xf2<*>J\xfa#>\tJ\xd7=\xcc\x9a\xee=\x8a\x90\x11>]\xba?>\xf8\x96\x11>3>\xf8=Rj\x0c>\xa46\t>\xa46\xd5=\x88L\x05>9\xdc\xf6=\xad\xf8\xfd=i\xdb\xe3=\xec\x9a\x02>5u\xf0=\xfa\xe0\xe8=\xea\xa4\xd1=\xc9\xbe\xf4='
34 | p13
35 | tp14
36 | b.
--------------------------------------------------------------------------------
/tensorflow/ESN/autoencoder/1024/low/4.pkl:
--------------------------------------------------------------------------------
1 | cnumpy.core.multiarray
2 | _reconstruct
3 | p0
4 | (cnumpy
5 | ndarray
6 | p1
7 | (I0
8 | tp2
9 | S'b'
10 | p3
11 | tp4
12 | Rp5
13 | (I1
14 | (I64
15 | tp6
16 | cnumpy
17 | dtype
18 | p7
19 | (S'f4'
20 | p8
21 | I0
22 | I1
23 | tp9
24 | Rp10
25 | (I3
26 | S'<'
27 | p11
28 | NNNI-1
29 | I-1
30 | I0
31 | tp12
32 | bI00
33 | S'\xe4r\xe6=y\xe2\x03>p\x0b\x03>\x97V\x11>k\r$>V\xfdD>\x1eg\x15>}1\x89>\x88H\x97>A\xf8\x8b>\xb5\xac\x12>\xda\x06\x13>\x8e9&>|\x99 >nJ\xf5=\x9a\x99\x04>XI\x0b>\xf3\xdf\x03>\x17)\xfd=w1\xfb=^!\n>\xda`\t>\xa3\xb7\x04>\x11e\x08>*G+>\x16\xe7\x00>\xb74\xfe=\xaf\'\x06>\xd4\xa7(>\xa0)\x89>\xf3,\x89>\xe0\xa2N>7\xff1>K~2>\x8a\xfd#>\xe4\x07\xfc=>\xa6\x10>J\x8c\x13>\xb3\x9f\x04>\xdd?\xfd=\xe8\x81 >o\xccl>\x9exA>p\xa0\x15>\x82\xa7.>\x92\x01*>\x118\xf5=b]\x05>n\xe0\x1c>\xe7\xbe@>\xec}\x1d>\xeef\x08>\x86>\x12>lU\x13>\xca"\xef=\x1b\xc8\x0e>\xe8?\x07>*+\t>\x9a\xae\xfa=D\x82\t>\xc0\x8a\x01>\xac\xb1\x00>O\x04\xf0=\xc0}\x01>'
34 | p13
35 | tp14
36 | b.
--------------------------------------------------------------------------------
/tensorflow/ESN/autoencoder/1024/low/Untitled Document:
--------------------------------------------------------------------------------
1 | 0.001
2 |
--------------------------------------------------------------------------------
/tensorflow/ESN/autoencoder/1024/low/Untitled Document~:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/tensorflow/ESN/autoencoder/1024/low/Untitled Document~
--------------------------------------------------------------------------------
/tensorflow/ESN/autoencoder/1024/low/biases.pkl:
--------------------------------------------------------------------------------
1 | cnumpy.core.multiarray
2 | _reconstruct
3 | p0
4 | (cnumpy
5 | ndarray
6 | p1
7 | (I0
8 | tp2
9 | S'b'
10 | p3
11 | tp4
12 | Rp5
13 | (I1
14 | (I200
15 | tp6
16 | cnumpy
17 | dtype
18 | p7
19 | (S'f4'
20 | p8
21 | I0
22 | I1
23 | tp9
24 | Rp10
25 | (I3
26 | S'<'
27 | p11
28 | NNNI-1
29 | I-1
30 | I0
31 | tp12
32 | bI00
33 | S'm"\x16\xc0\x87\xb8\x19\xc0\x12t\x16\xc0\x0f-\x13\xc09\x98\x15\xc0\xc8B\x15\xc0\x9f\xc7\x17\xc0\xba$\x16\xc0\x9du\x16\xc0\xdeA\x16\xc07\x1a\x14\xc04\xa8\x13\xc0\x92\x00\x14\xc0\xf9y\x14\xc0,\xc8\x14\xc0\xd5U\x13\xc0\x1b\xe8\x19\xc0\x7f\xf6\x15\xc0]D\x15\xc0\x9a\x14\x16\xc0J\xf7\x15\xc0i1\x17\xc0\xc6\xbd\xfb\xbfo\x86\x14\xc0\xdd\xe2\x15\xc0Y\xa4\x16\xc0\x08\xb0\x13\xc0c\x95\x16\xc0\xd9\x93\x15\xc0c\xd0\x15\xc0,f\x13\xc0\xdd\x9b\x15\xc0\xea9\x15\xc0L\xc0\x14\xc0H\x18\x15\xc0\xc8\x8e\x14\xc0\xe3\xb1\x14\xc0\x90|\x14\xc0\xd4\x04\x15\xc0\xc5\x1e\x16\xc0\x7f6\x16\xc0\x07\xdf\x14\xc0\xd2N\x14\xc0"\xd8\x16\xc0\x05;\x16\xc0M#\x16\xc0\xc3>\x15\xc0;W\x17\xc0\xf9\x0b\x17\xc0\xb4\xd8\x17\xc0\xc6\x93\x14\xc0\xf7\x15\x16\xc0\xb5\xc3\x14\xc0[\xce\x0c\xc0\xa5k\x14\xc0;;\x18\xc0\x0f\x97\x15\xc0\xb7\xa7\x0f\xc0\xa2\x17\x15\xc0\x9f\xd7\x17\xc0Q\x02\x15\xc0\xfd\xf2\x16\xc0\x8d8\x13\xc0>\xc9\x15\xc0I\x8f\x15\xc06\x18\x14\xc0\x02"\x15\xc0\x8f\xf0\x13\xc0\xee\x89\x15\xc0\xe2\xdd\x15\xc0\xa9m\x13\xc0\x11\x8e\x15\xc0\xbd\x88\x14\xc0E\xa6\x15\xc0\xecp\x14\xc0\xa5\x1c\x14\xc0v.\x15\xc0_\x0f\x14\xc0\x18\xcb\x18\xc0\x96\xbd\x14\xc0\x12\xfe\x14\xc0I\x8a\x16\xc0\xeb#\x15\xc0\xa0\xf9\x17\xc0\xe3\xba\x12\xc0\xae\xe2\x16\xc0\xe0q\x15\xc0S\xc1\x13\xc0\xf8(\x15\xc0rJ\x15\xc0\xea\xe8\x13\xc0@\xb2\x15\xc0S\x03\x15\xc0U\x8b\x1a\xc0\xdc\x0f\x15\xc0\xc8\xbd\x15\xc0\xba\x02\x17\xc0$i\x15\xc0\xe6\xad\x15\xc0b\xc0\x16\xc0\x9f9\x16\xc0f\x89\x14\xc0-\xfc\x15\xc0\xb3.\x16\xc0\xf4?\x17\xc0b\xe9\x17\xc0K\xc5\x14\xc0 #\x13\xc0\xeb\x94\x17\xc0\xd3\x9b\x13\xc0A{\x14\xc0<\x96\x15\xc0\xbe\xa9\x14\xc0\xbf\x8d\x14\xc0\x19\x8f\x18\xc0\xdc\xff\x14\xc0\x03\x1c\x16\xc0\xe6}\x13\xc0\x9e?\x15\xc0Y\xef\x18\xc0\x9b$\x17\xc060\x14\xc0B\x98\x14\xc0Cf\x16\xc0\xac\x89\x15\xc0\xd9\xdd\x14\xc0s\xeb\x13\xc06*\x12\xc0\xd3\r\x16\xc0\xad|\x15\xc0Vn\x17\xc0\x12@\x18\xc0(u\x15\xc0\xdch\x14\xc0\xe4\xec\x13\xc06\xe9\x14\xc0\x11l\x15\xc0\xc8Z\x14\xc0\x96\x05\x15\xc0}\xb3\x16\xc0\xdd\xe9\x12\xc0{\x19\x13\xc0\x8f\x07\x16\xc0.\x0c\x15\xc0%\xaf\x0f\xc0\xbd\xf6\x15\xc0\x83\x96\x13\xc0\xd3\x9a\x14\xc0WZ\x15\xc0\xe0\x84\x14\xc0p\xa8\x14\xc0\x9fV\x17\xc0\x93\x14\x14\xc02\x86\x12\xc0@*\x15\xc0\xfb!\x13\xc0\xdfE\x14\xc0\xbd\xe7\x16\xc0\xdbp\x16\xc0\xa1L\x16\xc0\xf4\n\x15\xc0\x93\x0f\x15\xc0.\xd5\x15\xc0\xeeN\x0c\xc0g\xb6\x17\xc0\xcev\x17\xc0\xff9\x14\xc0n\xf3\x14\xc0\xbe\xc4\x15\xc0\x06~\x16\xc0i\n\x16\xc0\xcd`\x15\xc0\xc1\x12\x15\xc0\x1d2\x15\xc0\x88q\x15\xc0\xf5P\x13\xc0\xcdz\x14\xc03%\x18\xc0\xb7\x19\x14\xc0\xa6?\x15\xc0t\x07\x15\xc0\x03\x87\x11\xc0\xd9\x7f\x15\xc0M\xc9\x16\xc0Cm\x14\xc08\x1d\x16\xc0\xd82\x16\xc02\xed\x13\xc0\xc2\x92\x13\xc0~"\x16\xc0\xef\xd4\x15\xc03A\x17\xc0\xf4\xab\x16\xc0\xb3\x91\x17\xc0_\x16\x17\xc0\x0e\xf7\x14\xc0\xe3\xb3\x17\xc0\xf7P\x16\xc0\x99\x96\x11\xc0\xbf\xb1\x16\xc0'
34 | p13
35 | tp14
36 | b.
--------------------------------------------------------------------------------
/tensorflow/ESN/autoencoder/1024/medium/1.pkl:
--------------------------------------------------------------------------------
1 | cnumpy.core.multiarray
2 | _reconstruct
3 | p0
4 | (cnumpy
5 | ndarray
6 | p1
7 | (I0
8 | tp2
9 | S'b'
10 | p3
11 | tp4
12 | Rp5
13 | (I1
14 | (I64
15 | tp6
16 | cnumpy
17 | dtype
18 | p7
19 | (S'f4'
20 | p8
21 | I0
22 | I1
23 | tp9
24 | Rp10
25 | (I3
26 | S'<'
27 | p11
28 | NNNI-1
29 | I-1
30 | I0
31 | tp12
32 | bI00
33 | S',\x04\xb9=`\xe09=\xaa+\xdf=<\x0f\xee=@\x13\xe1=\xb2\x80\xc9=\x04%L=V\xd4\xa0=\\_\xa4=\xf2\x8c\xb5=R(\xd2=\xb5\xfc\x08>\x7f\xc0\t>\x1c\xab\x03><\xc4\xa4=\xe2\xa3\xa3=p\x0b\xa2=\xba\x04\xac=\xeb\x02\xea=F\xa1\xb1=\xcc\xd4\xb0=\xd4\xb1\xbd=\x06a\xa5=&\xc0\xbc=V\xfd\xaf=xV\xb0=\xbe\x02\xb6=r\xae\xc6=\xf6\x12\xc8=\xac\xa2\xb2=\xaar\xa6=\x1e#\xa5=Tc\xa7=&\x81\xab=\x1c\xd1\xc9=*\xf8\xb9=\xa0\xc9\xba=\xc6?\xbd=t\xdeS=\x1c|\xc8=\xcc\xb3K=N\x7f\xdd=\xee:\xc3=\x18w<=\xec\xd1?=\\[[=I(\xcf=\xf9\x9f\xce=\xf1\x8c\xcc=i\xdd\xd8=\xf6\x17\xd0=\x8a\x05\xbb=r\x89\xb5=\x1bj\xd0=\x08\xf8\xc7=\x17L\xce=\xd8F\xce=\r\xb9\xd2=\x91\xbd\xf3=\x90x\xdd=\xf0uv=\x00F\xd4=\xd0w[=\xe0y\xcd='
34 | p13
35 | tp14
36 | b.
--------------------------------------------------------------------------------
/tensorflow/ESN/autoencoder/1024/medium/1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/tensorflow/ESN/autoencoder/1024/medium/1.png
--------------------------------------------------------------------------------
/tensorflow/ESN/autoencoder/1024/medium/2.pkl:
--------------------------------------------------------------------------------
1 | cnumpy.core.multiarray
2 | _reconstruct
3 | p0
4 | (cnumpy
5 | ndarray
6 | p1
7 | (I0
8 | tp2
9 | S'b'
10 | p3
11 | tp4
12 | Rp5
13 | (I1
14 | (I64
15 | tp6
16 | cnumpy
17 | dtype
18 | p7
19 | (S'f4'
20 | p8
21 | I0
22 | I1
23 | tp9
24 | Rp10
25 | (I3
26 | S'<'
27 | p11
28 | NNNI-1
29 | I-1
30 | I0
31 | tp12
32 | bI00
33 | S'\xe6<\xb1=\x98V\x84=\x89\x92\xd0=\xff\xb5\xef=\x97\xdc\xdd=*\xdf\xbf=r\xdc\x82=\x14\x88\xa2=\xd4\xbd\xaa=\xac\x04\xbd=\xb1\xcc\xe2=\xbe7\x07>\x9f\xb1\x0b>/\x01\xfc=\xfa-\xb5=\x00\xe8\xa7=B7\xa6=j\xa2\xb2=\xbf\xb6\xd1=n\x14\xbe=\x08\xe4\xb3=6\xa9\xc0=\xd67\xb8=^\xbe\xc0=\xeao\xc6=d\xb0\xb9=.\xde\xbd=\xea\x8c\xc7=\xba\xea\xcc=B\xed\xbc=@\x89\xad=b\xd5\xb1=\x86)\xaf=\xa6\xa3\xbc=\xc5%\xcc=\xe7\x0e\xce=B7\xbd=4Q\xb2=V\xf7\x97=\xf8\x96\xb5=\x08\xa7\x98=\xf1B\xcd=|\xfc\xbf=\x10\xd0f=lTb=\xe0\x02|=\xa8\x11\xbd=\x12\x00\xd2=V6\xcc=#\x01\xd6=\xf4\xfc\xd0=\xd2\x9e\xc4=`z\xb3=z\xb8\xc4=^\xfc\xc9=\xc8\x9f\xc9=\x90\x98\xc9=w\xb0\xce=Q\x81\xd7=4-\xc4=j\xd4\x99=\xa2C\xad=\xf27\x95=\xe0\xae\xb1='
34 | p13
35 | tp14
36 | b.
--------------------------------------------------------------------------------
/tensorflow/ESN/autoencoder/1024/medium/2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/tensorflow/ESN/autoencoder/1024/medium/2.png
--------------------------------------------------------------------------------
/tensorflow/ESN/autoencoder/1024/medium/3.pkl:
--------------------------------------------------------------------------------
1 | cnumpy.core.multiarray
2 | _reconstruct
3 | p0
4 | (cnumpy
5 | ndarray
6 | p1
7 | (I0
8 | tp2
9 | S'b'
10 | p3
11 | tp4
12 | Rp5
13 | (I1
14 | (I64
15 | tp6
16 | cnumpy
17 | dtype
18 | p7
19 | (S'f4'
20 | p8
21 | I0
22 | I1
23 | tp9
24 | Rp10
25 | (I3
26 | S'<'
27 | p11
28 | NNNI-1
29 | I-1
30 | I0
31 | tp12
32 | bI00
33 | S'\x8d\xd6\xcc=\x9d\xec\xfb= \x03\xef=\x82\x97\x04>H^\x1c>[\xf2C>\xdc,\x07>\xb71\x88>\xd0\xf7\x95>\x19\xaf\x8c>\xf4\'\xfb=\xe4e\r>\x00V"><\xe7\x1d>\x11\x01\xe0=\x1a^\xff=r\xd7\x07>\xc9\x91\xf9=\x88\xcb\xeb=\xbeT\xdf=\xe2\x19\x03>\x94\x9c\x01>\xb0\x7f\xf0=\xe5F\xf2=\xb05\'>\x9e\xc4\xe5=\x808\xeb=4\xe4\xfe=<6 >\x06\x16\x89>\xa3\xaa\x8a>\x8c\xddF>\x8c\xdb,>\xce\x9e1>\xbd\x85 >\x07)\xe3=:A\x0b>\xda|\n>\x17Q\xea=\xf3\xee\xe4=x\\\x15>\x1dEg>\xe896>\\\xa9\x06>\xf2<*>J\xfa#>\tJ\xd7=\xcc\x9a\xee=\x8a\x90\x11>]\xba?>\xf8\x96\x11>3>\xf8=Rj\x0c>\xa46\t>\xa46\xd5=\x88L\x05>9\xdc\xf6=\xad\xf8\xfd=i\xdb\xe3=\xec\x9a\x02>5u\xf0=\xfa\xe0\xe8=\xea\xa4\xd1=\xc9\xbe\xf4='
34 | p13
35 | tp14
36 | b.
--------------------------------------------------------------------------------
/tensorflow/ESN/autoencoder/1024/medium/4.pkl:
--------------------------------------------------------------------------------
1 | cnumpy.core.multiarray
2 | _reconstruct
3 | p0
4 | (cnumpy
5 | ndarray
6 | p1
7 | (I0
8 | tp2
9 | S'b'
10 | p3
11 | tp4
12 | Rp5
13 | (I1
14 | (I64
15 | tp6
16 | cnumpy
17 | dtype
18 | p7
19 | (S'f4'
20 | p8
21 | I0
22 | I1
23 | tp9
24 | Rp10
25 | (I3
26 | S'<'
27 | p11
28 | NNNI-1
29 | I-1
30 | I0
31 | tp12
32 | bI00
33 | S'$R\xe9=\xb8\xc3\xfa=X\xe1\x00>i\xd1\x12><&">n\xd80>\xbb\x8c!>2\x9f\x84>]\xdf\x92>\xc7:\x86>\xf4\xf8\x19>\xa4\xcb\x16>p\x18\x1c><\xe5\x1b>\x01u\xfc=\xa1\xc5\x04>\x1cM\x0b>\xa8\xd7\x03>\xe6m\xfa=\xaa\xf1\xfe=R9\x07>\x8cn\n>F\x13\x05>\xf3Y\x0b>\x85\x03 >\\\x06\x06>\x14\xcf\xfd=\x00\x86\r>\xea\x832>-\xfe\x82>\x01#\x84>\x03\xf0U>B?6>\x16\xda7>\xa7\xc3">P\xe8\x10>V\x0c\x12>\xfc\xce\x0f>xY\x05>\xd3\x92\x05>\x0e\x1b!>h\xf2\\>\x1aE>>\x0eS\x16>\x9a\xc5&>\x16q\'>\x82\x10\xff=&\xa0\x04>4P\x19>"\xd44>\xb4C\x1a>\xc0\x9a\x0b>LW\x0c>\xbe\xce\n>\xaa\xa8\xfd=\x99\x9d\t>\xbat\x07>\xf2\xd9\x05>\xbe|\xfa=\xe4n\x00>\xb2\xae\xfb=\xbf.\xef=l.\xed=\xc3\x15\xf5='
34 | p13
35 | tp14
36 | b.
--------------------------------------------------------------------------------
/tensorflow/ESN/autoencoder/1024/medium/Untitled Document:
--------------------------------------------------------------------------------
1 | 0.003
2 |
--------------------------------------------------------------------------------
/tensorflow/ESN/autoencoder/1024/medium/Untitled Document~:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/tensorflow/ESN/autoencoder/1024/medium/Untitled Document~
--------------------------------------------------------------------------------
/tensorflow/ESN/autoencoder/1024/medium/biases.pkl:
--------------------------------------------------------------------------------
1 | cnumpy.core.multiarray
2 | _reconstruct
3 | p0
4 | (cnumpy
5 | ndarray
6 | p1
7 | (I0
8 | tp2
9 | S'b'
10 | p3
11 | tp4
12 | Rp5
13 | (I1
14 | (I200
15 | tp6
16 | cnumpy
17 | dtype
18 | p7
19 | (S'f4'
20 | p8
21 | I0
22 | I1
23 | tp9
24 | Rp10
25 | (I3
26 | S'<'
27 | p11
28 | NNNI-1
29 | I-1
30 | I0
31 | tp12
32 | bI00
33 | S'|&\x16\xc0@\x7f\x19\xc0c\x10\x15\xc0\xc9\xaa\x15\xc0\'\x10\x18\xc0\x1f1\x18\xc0\xd3D\x17\xc0\x8a\xa8\x15\xc0$\xf9\x12\xc0\x1e\r\x16\xc0~?\x15\xc00\x9f\x15\xc0\x01\xe4\x14\xc0\xc3\xdc\x14\xc0?!\x18\xc0\xefk\x18\xc0\xe9\x1d\x15\xc0\x1dA\x17\xc0\xc5\xc9\x17\xc0\xda\x1f\x17\xc0\xaa\x1c\x15\xc0\xff8\x15\xc0p\x8b\x14\xc0\t\xaf\x18\xc0\x18\x8d\x13\xc0n*\x12\xc0,\xfa\x12\xc0\x96A\x18\xc0\x98T\x16\xc0\xbbW\x18\xc0\x8b:\x14\xc0\x10\x8e\x13\xc0\xa5\xca\x17\xc0\xd3\xf1\x1a\xc0\xb0G\x16\xc0\xb7\xbe\x15\xc0~\xe6\x10\xc0i\xfd\x17\xc0w\xaf\x13\xc0bo\x15\xc0J\xe3\x16\xc0z6\x13\xc0\xc1\xc3\x14\xc0\x18\xb9\x19\xc0\xff&\x16\xc0_\xa3\x15\xc0\xcf:\x17\xc0f\xf9\x16\xc0\t\x11\x16\xc05Z\x13\xc0\xf6\xbe\x15\xc0\nc\x16\xc0\x11\x9b\x15\xc0\xa8\'\x18\xc0\xd5.\x15\xc0\x13\xc0l\x85\x16\xc0\x10\xcc\x14\xc0o$\x14\xc0n=\x16\xc0\x90\xc1\x17\xc04\xa8\x14\xc0\xb4p\x14\xc0\xe6(\x15\xc0H\x80\x15\xc0\xbc\xc3\x14\xc0\x9d9\x17\xc0;k\x12\xc0\x10\x07\x17\xc0\xfa\xba\x15\xc0\xcb\xc1\x14\xc0\xea\xea\x13\xc0'
34 | p13
35 | tp14
36 | b.
--------------------------------------------------------------------------------
/tensorflow/ESN/autoencoder/ae.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import tensorflow as tf
6 | from utils import read_data
7 | import numpy as np
8 | from scipy import linalg
9 | import pickle
10 |
11 | #from pastalog import Log
12 |
13 | flags = tf.flags
14 | logging = tf.logging
15 | flags.DEFINE_string("data_path", "/home/tyrion/lannister/1024/tyrion.pkl",
16 | "The path of host load data")
17 | flags.DEFINE_string("ae_path", "high", "The path of weight and bias")
18 | flags.DEFINE_integer("input_dim", 64, "The length of history window")
19 | flags.DEFINE_integer("ae_dim", 200, "The length of autoencoder hidden layer size")
20 | flags.DEFINE_integer("hidden_dim", 100, "The length of hidden layer size")
21 | flags.DEFINE_integer("output_dim", 6, "The length of prediction window")
22 | flags.DEFINE_integer("batch_size", 128, "Mini-batch size")
23 | flags.DEFINE_integer("epoch", 90, "The total epochs")
24 | flags.DEFINE_float("lr", 0.05, "Learning rate")
25 | flags.DEFINE_integer("max_grad_norm", 5, "max grad norm")
26 | FLAGS = flags.FLAGS
27 |
28 | def get_ae_weight():
29 | with open("./1024/"+FLAGS.ae_path+"/weights.pkl", 'rb') as input_1:
30 | ae_weight = pickle.load(input_1)
31 | with open("./1024/"+FLAGS.ae_path+"/biases.pkl", 'rb') as input_2:
32 | ae_bias = pickle.load(input_2)
33 |
34 | return (ae_weight, ae_bias)
35 |
36 | class ESN(object):
37 | def __init__(self, is_training, length, leaking_rate=0.2, initLen=100):
38 | self.batch_size = batch_size = FLAGS.batch_size
39 | self.num_steps = num_steps = length
40 | self.inSize = inSize = FLAGS.input_dim
41 | self.Win_inSize = Win_inSize = FLAGS.ae_dim
42 | self.resSize = resSize = FLAGS.hidden_dim
43 |
44 | self._input_data = tf.placeholder(tf.float32, [batch_size, length, FLAGS.input_dim])
45 | if is_training:
46 | self._targets = tf.placeholder(tf.float32, [batch_size, length-initLen, FLAGS.output_dim])
47 | else:
48 | self._targets = tf.placeholder(tf.float32, [batch_size, length, FLAGS.output_dim])
49 | self._ae_weights = tf.placeholder(tf.float32, [FLAGS.input_dim, FLAGS.ae_dim])
50 | self._ae_biases = tf.placeholder(tf.float32, [FLAGS.ae_dim])
51 |
52 | self._Win = Win = tf.placeholder(tf.float32, [Win_inSize, resSize])
53 | self._W = W = tf.placeholder(tf.float32, [resSize, resSize])
54 |
55 | self._initial_state = np.zeros((batch_size, resSize), dtype=np.float32)
56 |
57 | S = []
58 | s = self._initial_state
59 |
60 | with tf.variable_scope("ESN"):
61 | for i in range(num_steps):
62 | ae_out = tf.nn.sigmoid(tf.add(tf.matmul(self._input_data[:,i,:],
63 | self._ae_weights),
64 | self._ae_biases))
65 | s = (1 - leaking_rate) * s + \
66 | leaking_rate * tf.nn.tanh(tf.matmul(ae_out, Win)+tf.matmul(s,W))
67 | if is_training:
68 | if i>= initLen:
69 | S.append(tf.concat(1, [self._input_data[:,i,:], s]))
70 | else:
71 | S.append(tf.concat(1, [self._input_data[:,i,:], s]))
72 | self._final_state = s
73 |
74 | V_size = inSize + resSize
75 | hidden_output = tf.reshape(tf.concat(1, S), [-1, V_size])
76 |
77 | V = tf.get_variable("v", shape=[V_size, FLAGS.output_dim], dtype=tf.float32,
78 | initializer=tf.random_uniform_initializer(-tf.sqrt(1./V_size),tf.sqrt(1./V_size)))
79 | b = tf.get_variable("b", shape=[FLAGS.output_dim], dtype=tf.float32,
80 | initializer=tf.constant_initializer(0.1))
81 | logits = tf.add(tf.matmul(hidden_output, V), b)
82 |
83 | target = tf.reshape(self._targets, [-1, FLAGS.output_dim])
84 | training_loss = tf.reduce_sum(tf.pow(logits-target, 2)) / 2
85 | mse = tf.reduce_mean(tf.pow(logits-target, 2))
86 | self._cost = mse
87 |
88 | if not is_training:
89 | return
90 |
91 | self._lr = tf.Variable(0.0, trainable=False)
92 | tvars = tf.trainable_variables()
93 | grads, _ = tf.clip_by_global_norm(tf.gradients(training_loss, tvars), FLAGS.max_grad_norm)
94 | optimizer = tf.train.GradientDescentOptimizer(self.lr)
95 | self._train_op = optimizer.apply_gradients(zip(grads, tvars))
96 |
97 | def assign_lr(self, session, lr_value):
98 | session.run(tf.assign(self.lr, lr_value))
99 |
100 | @property
101 | def input_data(self):
102 | return self._input_data
103 |
104 | @property
105 | def Win(self):
106 | return self._Win
107 |
108 | @property
109 | def W(self):
110 | return self._W
111 |
112 | @property
113 | def targets(self):
114 | return self._targets
115 |
116 | @property
117 | def ae_weights(self):
118 | return self._ae_weights
119 |
120 | @property
121 | def ae_biases(self):
122 | return self._ae_biases
123 |
124 | @property
125 | def initial_state(self):
126 | return self._initial_state
127 |
128 | @property
129 | def cost(self):
130 | return self._cost
131 |
132 | @property
133 | def final_state(self):
134 | return self._final_state
135 |
136 | @property
137 | def lr(self):
138 | return self._lr
139 |
140 | @property
141 | def train_op(self):
142 | return self._train_op
143 |
144 | def run_train_epoch(session, m, Win, W, data_x, data_y, ae_weight, ae_bias, eval_op):
145 | costs = []
146 | states = []
147 | for i in xrange(int(len(data_y) / FLAGS.batch_size)):
148 | cost, state, _ = session.run(
149 | [m.cost, m.final_state, eval_op],
150 | {m.Win: Win,
151 | m.W: W,
152 | m.input_data: data_x[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size],
153 | m.targets: data_y[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size],
154 | m.ae_weights: ae_weight,
155 | m.ae_biases: ae_bias})
156 | costs.append(cost)
157 | states.append(state)
158 | return (sum(costs)/len(costs), states)
159 |
160 | def run_test_epoch(session, m, Win, W, data_x, data_y, ae_weight, ae_bias, eval_op, train_state):
161 | costs = []
162 | states = []
163 | for i in xrange(int(len(data_y) / FLAGS.batch_size)):
164 | cost, state, _ = session.run(
165 | [m.cost, m.final_state, eval_op],
166 | {m.Win: Win,
167 | m.W: W,
168 | m.input_data: data_x[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size],
169 | m.targets: data_y[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size],
170 | m.ae_weights: ae_weight,
171 | m.ae_biases: ae_bias})
172 | costs.append(cost)
173 | states.append(state)
174 | return (sum(costs)/len(costs), states)
175 |
176 | def main(_):
177 | print("===============================================================================")
178 | print("The input_dim is", FLAGS.input_dim, "The hidden_dim is", FLAGS.hidden_dim)
179 | print("The output_dim is", FLAGS.output_dim, "The batch_size is", FLAGS.batch_size)
180 | print("The ae_path is", FLAGS.ae_path)
181 | print("The data_path is", FLAGS.data_path)
182 | X_train, y_train, X_test, y_test, _, cpu_load_std = read_data(FLAGS.data_path,
183 | FLAGS.input_dim,
184 | FLAGS.output_dim,
185 | 32)
186 |
187 | Win_inSize = FLAGS.ae_dim
188 | resSize = FLAGS.hidden_dim
189 | rho = 0.1
190 | # rho = 0.9
191 | cr = 0.05
192 | Win = np.float32(np.random.rand(Win_inSize, resSize)/5 - 0.1)
193 | # Win = np.float32(np.random.rand(Win_inSize, resSize) - 0.5)
194 | N = resSize * resSize
195 | W = np.random.rand(N) - 0.5
196 | zero_index = np.random.permutation(N)[int(N * cr * 1.0):]
197 | W[zero_index] = 0
198 | W = W.reshape((resSize, resSize))
199 | rhoW = max(abs(linalg.eig(W)[0]))
200 | W *= rho / rhoW
201 | W = np.float32(W)
202 |
203 | ae_weight, ae_bias = get_ae_weight()
204 |
205 | with tf.Graph().as_default(), tf.Session() as session:
206 | with tf.variable_scope("model", reuse=None):
207 | m_train = ESN(is_training=True, length=len(y_train[0]))
208 | with tf.variable_scope("model", reuse=True):
209 | m_test = ESN(is_training=False, length=len(y_test[0]))
210 |
211 | tf.initialize_all_variables().run()
212 |
213 | #log_a = Log('http://localhost:8120','modelA')
214 | # pastalog --serve 8120
215 |
216 | scale = cpu_load_std ** 2
217 | train_best = test_best = 0.0
218 | for i in range(FLAGS.epoch):
219 | if i < FLAGS.epoch/3:
220 | lr_decay = 1
221 | elif i < FLAGS.epoch*2/3:
222 | lr_decay = 0.1
223 | else:
224 | lr_decay = 0.01
225 | m_train.assign_lr(session, FLAGS.lr * lr_decay)
226 | train_loss, train_state = run_train_epoch(session, m_train, Win, W, X_train,
227 | y_train[:,100:,:],
228 | ae_weight, ae_bias, m_train.train_op)
229 | test_loss, _ = run_test_epoch(session, m_test, Win, W,
230 | X_test, y_test,
231 | ae_weight, ae_bias, tf.no_op(), train_state)
232 | if i == 0:
233 | train_best = train_loss
234 | test_best = test_loss
235 | if train_loss < train_best:
236 | train_best = train_loss
237 | if test_loss < test_best:
238 | test_best = test_loss
239 | print("epoch:%3d, learning rate %.5f, train_loss %.6f, test_loss %.6f" %
240 | (i + 1, session.run(m_train.lr), train_loss*scale, test_loss*scale))
241 | #log_a.post("trainLoss", value=float(train_loss), step=i)
242 | #log_a.post("testLoss", value=float(test_loss), step=i)
243 | if i == FLAGS.epoch - 1:
244 | print("Best train, test loss %.6f %.6f" % (train_best*scale, test_best*scale))
245 |
246 | print("The input_dim is", FLAGS.input_dim, "The hidden_dim is", FLAGS.hidden_dim)
247 | print("The output_dim is", FLAGS.output_dim, "The batch_size is", FLAGS.batch_size)
248 | print("The ae_path is", FLAGS.ae_path)
249 | print("The data_path is", FLAGS.data_path)
250 | print("===============================================================================")
251 |
252 | if __name__ == "__main__":
253 | tf.app.run()
254 |
--------------------------------------------------------------------------------
/tensorflow/ESN/autoencoder/test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | output_dim_arr=(6 12 18 24 30 36)
3 | ae_path_arr=("high" "medium" "low")
4 | for output_dim in ${output_dim_arr[@]}; do
5 | echo "The output_dim is $output_dim"
6 | for ae_path in ${ae_path_arr[@]}; do
7 | echo "The ae_path is $ae_path"
8 | python ae.py --output_dim=$output_dim --ae_path=$ae_path >> 0723.txt
9 | done
10 | done
11 |
--------------------------------------------------------------------------------
/tensorflow/ESN/autoencoder/utils.py:
--------------------------------------------------------------------------------
1 | import pickle
2 | import numpy as np
3 |
4 | def zero_center(cpu_load):
5 | cpu_load = np.asarray(cpu_load)
6 | cpu_load_mean = np.mean(cpu_load[:,:24*12*26])
7 | cpu_load_std = np.std(cpu_load[:,:24*12*26])
8 | cpu_load -= cpu_load_mean
9 | cpu_load /= cpu_load_std
10 | return (cpu_load, cpu_load_mean, cpu_load_std)
11 |
12 | def contextwin(cpu_load, win_i, win_o, ahead_step):
13 | m, cpu_load_mean, cpu_load_std = zero_center(cpu_load)
14 | a = 26
15 | b = 3
16 | train_len = a * 288 / ahead_step
17 | test_len = (b-1) * 288 / ahead_step + (288 - win_o - win_i) / ahead_step + 1
18 | train_start = win_i
19 | test_start = a*288 + win_i
20 |
21 | train_x = np.asarray([[m[i][train_start+j*ahead_step-win_i:train_start+j*ahead_step]
22 | for j in range(train_len)] for i in range(len(m))],dtype=np.float32)
23 | train_y = np.asarray([[m[i][train_start+j*ahead_step:train_start+j*ahead_step+win_o]
24 | for j in range(train_len)] for i in range(len(m))],dtype=np.float32)
25 | test_x = np.asarray([[m[i][test_start+j*ahead_step-win_i:test_start+j*ahead_step]
26 | for j in range(test_len)] for i in range(len(m))],dtype=np.float32)
27 | test_y = np.asarray([[m[i][test_start+j*ahead_step:test_start+j*ahead_step+win_o]
28 | for j in range(test_len)] for i in range(len(m))],dtype=np.float32)
29 |
30 | return (train_x, train_y, test_x, test_y, cpu_load_mean, cpu_load_std)
31 |
32 | def read_data(_data_path, win_i, win_o, ahead_step):
33 | data_path = _data_path
34 | print("Reading pkl data...")
35 | input_machine = open(data_path,'rb')
36 | cpu_load = pickle.load(input_machine)
37 | input_machine.close()
38 | print("Loading data...")
39 | X_train, y_train, X_test, y_test, cpu_load_mean, cpu_load_std = contextwin(cpu_load, win_i, win_o, ahead_step)
40 |
41 | print(X_train.shape, y_train.shape)
42 | print(X_test.shape, y_test.shape)
43 | print(cpu_load_mean)
44 | print(cpu_load_std)
45 |
46 | return (X_train, y_train, X_test, y_test, cpu_load_mean, cpu_load_std)
47 |
--------------------------------------------------------------------------------
/tensorflow/ESN/autoencoder/utils.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/tensorflow/ESN/autoencoder/utils.pyc
--------------------------------------------------------------------------------
/tensorflow/ESN/grid/esn_mse_main.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import tensorflow as tf
6 | from utils_grid import test_dataset
7 | import numpy as np
8 | import pickle
9 | from scipy import linalg
10 | from tensorflow.python.ops import array_ops
11 |
12 | #from pastalog import Log
13 |
14 | flags = tf.flags
15 | logging = tf.logging
16 | flags.DEFINE_string("data_path", "/home/tyrion/lannister/1024/tyrion.pkl",
17 | "The path of host load data")
18 | flags.DEFINE_integer("input_dim", 64, "The length of history window")
19 | flags.DEFINE_integer("hidden_dim", 200, "The length of hidden layer size")
20 | flags.DEFINE_integer("output_dim", 36, "The length of prediction window")
21 | flags.DEFINE_integer("batch_size", 32, "Mini-batch size")
22 | flags.DEFINE_integer("test_batch_size", 32, "Mini-batch size of testing data")
23 | flags.DEFINE_string("grid", "sahara", "The machine of Grid")
24 | flags.DEFINE_integer("epoch", 60, "The total epochs")
25 | flags.DEFINE_float("lr", 0.05, "Learning rate")
26 | flags.DEFINE_integer("max_grad_norm", 5, "max grad norm")
27 | FLAGS = flags.FLAGS
28 |
29 | class ESN(object):
30 | def __init__(self, is_training, length, batch_size, leaking_rate=0.2, initLen=50):
31 | self.batch_size = batch_size
32 | self.num_steps = num_steps = length
33 | self.inSize = inSize = FLAGS.input_dim
34 | self.resSize = resSize = FLAGS.hidden_dim
35 |
36 | self._input_data = tf.placeholder(tf.float32, [batch_size, length, FLAGS.input_dim])
37 | if is_training:
38 | self._targets = tf.placeholder(tf.float32, [batch_size, length-initLen, FLAGS.output_dim])
39 | else:
40 | self._targets = tf.placeholder(tf.float32, [batch_size, length, FLAGS.output_dim])
41 |
42 | self._Win = Win = tf.placeholder(tf.float32, [inSize, resSize])
43 | self._W = W = tf.placeholder(tf.float32, [resSize, resSize])
44 |
45 | zeros = array_ops.zeros(array_ops.pack([batch_size, resSize]), dtype=tf.float32)
46 | zeros.set_shape([None, resSize])
47 | self._initial_state = zeros
48 | # self._initial_state = np.zeros((batch_size, resSize), dtype=np.float32)
49 |
50 | S = []
51 | s = self._initial_state
52 |
53 | with tf.variable_scope("ESN"):
54 | for i in range(num_steps):
55 | s = (1 - leaking_rate) * s + \
56 | leaking_rate * tf.nn.tanh(tf.matmul(self._input_data[:,i,:], Win)+tf.matmul(s,W))
57 | if is_training:
58 | if i>= initLen:
59 | S.append(tf.concat(1, [self._input_data[:,i,:], s]))
60 | else:
61 | S.append(tf.concat(1, [self._input_data[:,i,:], s]))
62 | self._final_state = s
63 |
64 | V_size = inSize + resSize
65 | hidden_output = tf.reshape(tf.concat(1, S), [-1, V_size])
66 |
67 | V = tf.get_variable("v", shape=[V_size, FLAGS.output_dim], dtype=tf.float32,
68 | initializer=tf.random_uniform_initializer(-tf.sqrt(1./V_size),tf.sqrt(1./V_size)))
69 | b = tf.get_variable("b", shape=[FLAGS.output_dim], dtype=tf.float32,
70 | initializer=tf.constant_initializer(0.1))
71 | logits = tf.add(tf.matmul(hidden_output, V), b)
72 |
73 | target = tf.reshape(self._targets, [-1, FLAGS.output_dim])
74 | training_loss = tf.reduce_sum(tf.pow(logits-target, 2)) / 2
75 | mse = tf.reduce_mean(tf.pow(logits-target, 2))
76 | self._cost = mse
77 |
78 | if not is_training:
79 | return
80 |
81 | self._lr = tf.Variable(0.0, trainable=False)
82 | tvars = tf.trainable_variables()
83 | grads, _ = tf.clip_by_global_norm(tf.gradients(training_loss, tvars), FLAGS.max_grad_norm)
84 | optimizer = tf.train.GradientDescentOptimizer(self.lr)
85 | self._train_op = optimizer.apply_gradients(zip(grads, tvars))
86 |
87 | def assign_lr(self, session, lr_value):
88 | session.run(tf.assign(self.lr, lr_value))
89 |
90 | @property
91 | def input_data(self):
92 | return self._input_data
93 |
94 | @property
95 | def Win(self):
96 | return self._Win
97 |
98 | @property
99 | def W(self):
100 | return self._W
101 |
102 | @property
103 | def targets(self):
104 | return self._targets
105 |
106 | @property
107 | def initial_state(self):
108 | return self._initial_state
109 |
110 | @property
111 | def cost(self):
112 | return self._cost
113 |
114 | @property
115 | def final_state(self):
116 | return self._final_state
117 |
118 | @property
119 | def lr(self):
120 | return self._lr
121 |
122 | @property
123 | def train_op(self):
124 | return self._train_op
125 |
126 | def run_train_epoch(session, m, Win, W, data_x, data_y, eval_op):
127 | costs = []
128 | states = []
129 | for i in xrange(int(len(data_y) / FLAGS.batch_size)):
130 | cost, state, _ = session.run(
131 | [m.cost, m.final_state, eval_op],
132 | {m.Win: Win,
133 | m.W: W,
134 | m.input_data: data_x[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size],
135 | m.targets: data_y[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size]})
136 | costs.append(cost)
137 | states.append(state)
138 | return (np.mean(costs), states)
139 |
140 | def run_test_epoch(session, m, Win, W, data_x, data_y, eval_op):
141 | costs = []
142 | states = []
143 | for i in xrange(int(len(data_y) / FLAGS.test_batch_size)):
144 | cost, state, _ = session.run(
145 | [m.cost, m.final_state, eval_op],
146 | {m.Win: Win,
147 | m.W: W,
148 | m.input_data: data_x[i*FLAGS.test_batch_size:(i+1)*FLAGS.test_batch_size],
149 | m.targets: data_y[i*FLAGS.test_batch_size:(i+1)*FLAGS.test_batch_size]})
150 | costs.append(cost)
151 | states.append(state)
152 | return (np.mean(costs), states)
153 |
154 | def main(_):
155 | print("===============================================================================")
156 | print("The input_dim is", FLAGS.input_dim, "The hidden_dim is", FLAGS.hidden_dim)
157 | print("The output_dim is", FLAGS.output_dim, "The batch_size is", FLAGS.batch_size)
158 | print("The data_path is", FLAGS.data_path, "The machine is", FLAGS.grid)
159 |
160 | with open("./data/"+FLAGS.grid+".pkl", 'rb') as f:
161 | grid = pickle.load(f)
162 | X_train, y_train, X_test, y_test, std_grid = test_dataset(grid,
163 | FLAGS.input_dim, FLAGS.output_dim, FLAGS.input_dim)
164 |
165 | inSize = FLAGS.input_dim
166 | resSize = FLAGS.hidden_dim
167 | rho = 0.1
168 | # rho = 0.9
169 | cr = 0.05
170 | Win = np.float32(np.random.rand(inSize, resSize)/5 - 0.1)
171 | # Win = np.float32(np.random.rand(inSize, resSize) - 0.5)
172 | N = resSize * resSize
173 | W = np.random.rand(N) - 0.5
174 | zero_index = np.random.permutation(N)[int(N * cr * 1.0):]
175 | W[zero_index] = 0
176 | W = W.reshape((resSize, resSize))
177 | rhoW = max(abs(linalg.eig(W)[0]))
178 | W *= rho / rhoW
179 | W = np.float32(W)
180 |
181 | with tf.Graph().as_default(), tf.Session() as session:
182 | with tf.variable_scope("model", reuse=None):
183 | m_train = ESN(is_training=True, length=X_train.shape[1],
184 | batch_size=FLAGS.batch_size)
185 | with tf.variable_scope("model", reuse=True):
186 | m_test = ESN(is_training=False, length=X_test.shape[1],
187 | batch_size=FLAGS.test_batch_size)
188 |
189 | tf.initialize_all_variables().run()
190 |
191 | #log_a = Log('http://localhost:8120','modelA')
192 | # pastalog --serve 8120
193 |
194 | scale = std_grid ** 2
195 | train_best = test_best = 0.0
196 | for i in range(FLAGS.epoch):
197 | if i < FLAGS.epoch/3:
198 | lr_decay = 1
199 | elif i < FLAGS.epoch*2/3:
200 | lr_decay = 0.1
201 | else:
202 | lr_decay = 0.01
203 | m_train.assign_lr(session, FLAGS.lr * lr_decay)
204 | train_loss, _ = run_train_epoch(session, m_train, Win, W, X_train,
205 | y_train[:,50:,:], m_train.train_op)
206 | test_loss, _ = run_test_epoch(session, m_test, Win, W,
207 | X_test, y_test, tf.no_op())
208 | if i == 0:
209 | train_best = train_loss
210 | test_best = test_loss
211 | if train_loss < train_best:
212 | train_best = train_loss
213 | if test_loss < test_best:
214 | test_best = test_loss
215 | print("epoch:%3d, learning rate %.5f, train_loss %.6f, test_loss %.6f" %
216 | (i + 1, session.run(m_train.lr), train_loss*scale, test_loss*scale))
217 | #log_a.post("trainLoss", value=float(train_loss), step=i)
218 | #log_a.post("testLoss", value=float(test_loss), step=i)
219 | if i == FLAGS.epoch - 1:
220 | print("Best train, test loss %.6f %.6f" % (train_best*scale, test_best*scale))
221 |
222 | print("The input_dim is", FLAGS.input_dim, "The hidden_dim is", FLAGS.hidden_dim)
223 | print("The output_dim is", FLAGS.output_dim, "The batch_size is", FLAGS.batch_size)
224 | print("The data_path is", FLAGS.data_path, "The machine is", FLAGS.grid)
225 | print("===============================================================================")
226 |
227 | if __name__ == "__main__":
228 | tf.app.run()
229 |
--------------------------------------------------------------------------------
/tensorflow/ESN/grid/test.sh:
--------------------------------------------------------------------------------
1 | output_dim_arr=(6 12 18 24 30 36)
2 | grid_arr=("axp0" "axp7" "sahara" "themis")
3 | for output_dim in ${output_dim_arr[@]}; do
4 | echo "The output_dim is $output_dim"
5 | for grid in ${grid_arr[@]}; do
6 | echo "The grid is $grid"
7 | python esn_mse_main.py --output_dim=$output_dim --grid=$grid >> 0902.txt
8 | done
9 | done
10 |
--------------------------------------------------------------------------------
/tensorflow/ESN/grid/utils_grid.py:
--------------------------------------------------------------------------------
1 | import pickle
2 | import numpy as np
3 |
4 | def zero_center(cpu_load):
5 | cpu_load = np.asarray(cpu_load)
6 | cpu_load_mean = np.mean(cpu_load[:,:24*12*26])
7 | cpu_load_std = np.std(cpu_load[:,:24*12*26])
8 | cpu_load -= cpu_load_mean
9 | cpu_load /= cpu_load_std
10 | return (cpu_load, cpu_load_mean, cpu_load_std)
11 |
12 | def contextwin(cpu_load, win_i, win_o, ahead_step):
13 | m, cpu_load_mean, cpu_load_std = zero_center(cpu_load)
14 | a = 26
15 | b = 3
16 | train_len = a * 288 / ahead_step
17 | test_len = (b-1) * 288 / ahead_step + (288 - win_o - win_i) / ahead_step + 1
18 | train_start = win_i
19 | test_start = a*288 + win_i
20 |
21 | train_x = np.asarray([[m[i][train_start+j*ahead_step-win_i:train_start+j*ahead_step]
22 | for j in range(train_len)] for i in range(len(m))],dtype=np.float32)
23 | train_y = np.asarray([[m[i][train_start+j*ahead_step:train_start+j*ahead_step+win_o]
24 | for j in range(train_len)] for i in range(len(m))],dtype=np.float32)
25 | test_x = np.asarray([[m[i][test_start+j*ahead_step-win_i:test_start+j*ahead_step]
26 | for j in range(test_len)] for i in range(len(m))],dtype=np.float32)
27 | test_y = np.asarray([[m[i][test_start+j*ahead_step:test_start+j*ahead_step+win_o]
28 | for j in range(test_len)] for i in range(len(m))],dtype=np.float32)
29 |
30 | return (train_x, train_y, test_x, test_y, cpu_load_mean, cpu_load_std)
31 |
32 | def read_data(_data_path, win_i, win_o, ahead_step):
33 | data_path = _data_path
34 | print("Reading pkl data...")
35 | input_machine = open(data_path,'rb')
36 | cpu_load = pickle.load(input_machine)
37 | input_machine.close()
38 | print("Loading data...")
39 | X_train, y_train, X_test, y_test, cpu_load_mean, cpu_load_std = contextwin(cpu_load, win_i, win_o, ahead_step)
40 |
41 | print(X_train.shape, y_train.shape)
42 | print(X_test.shape, y_test.shape)
43 | print(cpu_load_mean)
44 | print(cpu_load_std)
45 |
46 | return (X_train, y_train, X_test, y_test, cpu_load_mean, cpu_load_std)
47 |
48 | def truncated_index(long_period, num, reverse):
49 | index = []
50 | single_period_len = int(long_period/num)
51 | for i in range(num):
52 | if reverse:
53 | j = num-1 - i
54 | else:
55 | j = i
56 | index.append(range(j*single_period_len, (j+1)*single_period_len))
57 | return single_period_len, index
58 |
59 | def test_dataset(load, win_i, win_o, ahead_step):
60 | load_len = len(load)
61 | load_test_len = int(0.2 * load_len)
62 | load_train_len = int(0.8 * load_len)
63 | load = np.asarray(load)
64 | load -= np.mean(load[:load_train_len])
65 | load_std = np.std(load[:load_train_len])
66 | load /= load_std
67 | model_train_len = 26 * 288 / ahead_step
68 | model_test_len = 2 * 288 / ahead_step + (288 - win_o - win_i) / ahead_step + 1
69 | tr_num = int(load_train_len / (16 * ahead_step * model_train_len)) * 16
70 | te_num = int(load_test_len / (16 * ahead_step * model_test_len)) * 16
71 | print("Unix system train", tr_num, ", test", te_num)
72 | train_start = load_train_len - tr_num * ahead_step * model_train_len
73 | test_start = -load_test_len
74 | X_train = np.asarray([[load[train_start+i*model_train_len*ahead_step+j*ahead_step:
75 | train_start+i*model_train_len*ahead_step+j*ahead_step+win_i]
76 | for j in range(model_train_len)] for i in range(tr_num)])
77 | y_train = np.asarray([[load[train_start+i*model_train_len*ahead_step+j*ahead_step+win_i:
78 | train_start+i*model_train_len*ahead_step+j*ahead_step+win_i+win_o]
79 | for j in range(model_train_len)] for i in range(tr_num)])
80 | X_test = np.asarray([[load[test_start+i*model_test_len*ahead_step+j*ahead_step:
81 | test_start+i*model_test_len*ahead_step+j*ahead_step+win_i]
82 | for j in range(model_test_len)] for i in range(te_num)])
83 | y_test = np.asarray([[load[test_start+i*model_test_len*ahead_step+j*ahead_step+win_i:
84 | test_start+i*model_test_len*ahead_step+j*ahead_step+win_i+win_o]
85 | for j in range(model_test_len)] for i in range(te_num)])
86 | print(X_train.shape, y_train.shape)
87 | print(X_test.shape, y_test.shape)
88 | return X_train, y_train, X_test, y_test, load_std
89 |
--------------------------------------------------------------------------------
/tensorflow/ESN/grid/utils_grid.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/tensorflow/ESN/grid/utils_grid.pyc
--------------------------------------------------------------------------------
/tensorflow/LSTM/MSE/1222.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/tensorflow/LSTM/MSE/1222.txt
--------------------------------------------------------------------------------
/tensorflow/LSTM/MSE/main.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import tensorflow as tf
6 | from utils import read_data, truncated_index
7 | import rnn_cell
8 |
9 | import numpy as np
10 | import time
11 |
12 | #from pastalog import Log
13 |
14 | flags = tf.flags
15 | logging = tf.logging
16 | flags.DEFINE_string("data_path", "/home/tyrion/lannister/1024/tyrion.pkl",
17 | "The path of host load data")
18 | flags.DEFINE_integer("input_dim", 24, "The length of history window")
19 | flags.DEFINE_integer("hidden_dim", 128, "The length of hidden layer size")
20 | flags.DEFINE_integer("output_dim", 6, "The length of prediction window")
21 | flags.DEFINE_integer("batch_size", 128, "Mini-batch size")
22 | flags.DEFINE_integer("trun_num", 8, "The number of small truncated period")
23 | flags.DEFINE_bool("reverse", False, "Whether to reverse the training sequence")
24 | flags.DEFINE_integer("epoch", 60, "The total epochs")
25 | flags.DEFINE_integer("layer", 1, "The number of recurrent layers")
26 | flags.DEFINE_float("lr", 0.05, "Learning rate")
27 | flags.DEFINE_string("model", "lstm", "The RNN type")
28 | flags.DEFINE_float("keep_prob", 1.0, "keep prob")
29 | flags.DEFINE_integer("max_grad_norm", 5, "max grad norm")
30 | FLAGS = flags.FLAGS
31 |
32 | class RNNModel(object):
33 | def __init__(self, is_training, length):
34 | self.batch_size = batch_size = FLAGS.batch_size
35 | self.num_steps = num_steps = length
36 | hidden_size = FLAGS.hidden_dim
37 |
38 | self._input_data = tf.placeholder(tf.float32, [batch_size, None, FLAGS.input_dim])
39 | self._targets = tf.placeholder(tf.float32, [batch_size, None, FLAGS.output_dim])
40 |
41 | if FLAGS.model == "rnn":
42 | vanilla_rnn_cell = rnn_cell.BasicRNNCell(num_units=FLAGS.hidden_dim)
43 | if is_training and FLAGS.keep_prob < 1:
44 | vanilla_rnn_cell = rnn_cell.DropoutWrapper(vanilla_rnn_cell,
45 | output_keep_prob=FLAGS.keep_prob)
46 | if FLAGS.layer == 1:
47 | cell = vanilla_rnn_cell
48 | elif FLAGS.layer == 2:
49 | cell = rnn_cell.MultiRNNCell([vanilla_rnn_cell] * 2)
50 | elif FLAGS.model == "lstm":
51 | lstm_cell = rnn_cell.BasicLSTMCell(num_units=FLAGS.hidden_dim,
52 | forget_bias=1.0)
53 | if is_training and FLAGS.keep_prob < 1:
54 | lstm_cell = rnn_cell.DropoutWrapper(lstm_cell,
55 | output_keep_prob=FLAGS.keep_prob)
56 | if FLAGS.layer == 1:
57 | cell = lstm_cell
58 | elif FLAGS.layer == 2:
59 | cell = rnn_cell.MultiRNNCell([lstm_cell] * 2)
60 | elif FLAGS.model == "gru":
61 | gru_cell = rnn_cell.GRUCell(num_units=FLAGS.hidden_dim)
62 | if is_training and FLAGS.keep_prob < 1:
63 | gru_cell = rnn_cell.DropoutWrapper(gru_cell,
64 | output_keep_prob=FLAGS.keep_prob)
65 | cell = gru_cell
66 | else:
67 | raise ValueError("Invalid model: %s", FLAGS.model)
68 |
69 | self._initial_state = cell.zero_state(batch_size, tf.float32)
70 |
71 | outputs = []
72 | state = self._initial_state
73 | with tf.variable_scope("RNN"):
74 | for time_step in range(num_steps):
75 | if time_step > 0:
76 | tf.get_variable_scope().reuse_variables()
77 | (cell_output, state) = cell(self._input_data[:, time_step, :], state)
78 | outputs.append(cell_output)
79 | self._final_state = state
80 |
81 | hidden_output = tf.reshape(tf.concat(1, outputs), [-1, hidden_size])
82 |
83 | V_1 = tf.get_variable("v_1", shape=[hidden_size, FLAGS.output_dim],
84 | initializer=tf.random_uniform_initializer(-tf.sqrt(1./hidden_size),tf.sqrt(1./hidden_size)))
85 | b_1 = tf.get_variable("b_1", shape=[FLAGS.output_dim], initializer=tf.constant_initializer(0.1))
86 | logits = tf.add(tf.matmul(hidden_output, V_1), b_1)
87 |
88 | target = tf.reshape(self._targets, [-1, FLAGS.output_dim])
89 | training_loss = tf.reduce_sum(tf.pow(logits-target, 2)) / 2
90 | mse = tf.reduce_mean(tf.pow(logits-target, 2))
91 | self._cost = mse
92 |
93 | if not is_training:
94 | return
95 |
96 | self._lr = tf.Variable(0.0, trainable=False)
97 | tvars = tf.trainable_variables()
98 | grads, _ = tf.clip_by_global_norm(tf.gradients(training_loss, tvars), FLAGS.max_grad_norm)
99 | optimizer = tf.train.GradientDescentOptimizer(self.lr)
100 | self._train_op = optimizer.apply_gradients(zip(grads, tvars))
101 |
102 | def assign_lr(self, session, lr_value):
103 | session.run(tf.assign(self.lr, lr_value))
104 |
105 | @property
106 | def input_data(self):
107 | return self._input_data
108 |
109 | @property
110 | def targets(self):
111 | return self._targets
112 |
113 | @property
114 | def initial_state(self):
115 | return self._initial_state
116 |
117 | @property
118 | def cost(self):
119 | return self._cost
120 |
121 | @property
122 | def final_state(self):
123 | return self._final_state
124 |
125 | @property
126 | def lr(self):
127 | return self._lr
128 |
129 | @property
130 | def train_op(self):
131 | return self._train_op
132 |
133 | def run_train_epoch(session, m, data_x, data_y, eval_op):
134 | costs = []
135 | states = []
136 | for i in xrange(int(len(data_y) / FLAGS.batch_size)):
137 | cost, state, _ = session.run(
138 | [m.cost, m.final_state, eval_op],
139 | {m.input_data: data_x[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size],
140 | m.targets: data_y[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size]})
141 | costs.append(cost)
142 | states.append(state)
143 | return (sum(costs)/len(costs), states)
144 |
145 | def run_test_epoch(session, m, data_x, data_y, eval_op, train_state):
146 | costs = []
147 | states = []
148 | for i in xrange(int(len(data_y) / FLAGS.batch_size)):
149 | cost, state, _ = session.run(
150 | [m.cost, m.final_state, eval_op],
151 | {m.input_data: data_x[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size],
152 | m.targets: data_y[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size],
153 | m.initial_state: train_state[i]})
154 | # m.targets: data_y[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size]})
155 | costs.append(cost)
156 | states.append(state)
157 | return (sum(costs)/len(costs), states)
158 |
159 | def main(_):
160 | print("===============================================================================")
161 | print("The input_dim is", FLAGS.input_dim, "The hidden_dim is", FLAGS.hidden_dim)
162 | print("The output_dim is", FLAGS.output_dim)
163 | print("The keep_prob is", FLAGS.keep_prob, "The batch_size is", FLAGS.batch_size)
164 | print("The model is", FLAGS.model, "The number of layer is", FLAGS.layer)
165 | print("The truncated number is", FLAGS.trun_num, "The reverse is", FLAGS.reverse)
166 | X_train, y_train, X_test, y_test, _, cpu_load_std = read_data(FLAGS.data_path,
167 | FLAGS.input_dim,
168 | FLAGS.output_dim,
169 | FLAGS.input_dim)
170 | train_data_len = X_train.shape[1]
171 | train_len, train_index = truncated_index(train_data_len, FLAGS.trun_num, FLAGS.reverse)
172 | print("train length", train_len)
173 | print(train_index)
174 | with tf.Graph().as_default(), tf.Session() as session:
175 | with tf.variable_scope("model", reuse=None):
176 | m_train = RNNModel(is_training=True, length=train_len)
177 | with tf.variable_scope("model", reuse=True):
178 | m_test = RNNModel(is_training=False, length=len(y_test[0]))
179 |
180 | tf.initialize_all_variables().run()
181 |
182 | #log_a = Log('http://localhost:8120','modelA')
183 | # pastalog --serve 8120
184 |
185 | scale = cpu_load_std ** 2
186 | test_best = 0.0
187 | training_time = []
188 | for i in range(FLAGS.epoch):
189 | if i < FLAGS.epoch/3:
190 | lr_decay = 1
191 | elif i < FLAGS.epoch*2/3:
192 | lr_decay = 0.1
193 | else:
194 | lr_decay = 0.01
195 | m_train.assign_lr(session, FLAGS.lr * lr_decay)
196 | train_loss_list = []
197 | train_state_list = []
198 | start = time.time()
199 | for j in range(FLAGS.trun_num):
200 | train_loss, train_state = run_train_epoch(session, m_train, X_train[:,train_index[j],:],
201 | y_train[:,train_index[j],:], m_train.train_op)
202 | train_loss_list.append(train_loss)
203 | if FLAGS.reverse:
204 | if j == 0:
205 | train_state_list.append(train_state)
206 | else:
207 | if j == FLAGS.trun_num-1:
208 | train_state_list.append(train_state)
209 | finish = time.time()
210 | training_time.append(finish - start)
211 | test_loss, _ = run_test_epoch(session, m_test, X_test, y_test, tf.no_op(), train_state_list[0])
212 | if i == 0:
213 | test_best = test_loss
214 | if test_loss < test_best:
215 | test_best = test_loss
216 | # print("epoch:%3d, lr %.5f, train_loss_1 %.6f, train_loss_2 %.6f, test_loss %.6f" %
217 | # (i + 1, session.run(m_train.lr), train_loss_1*scale,
218 | # train_loss_2*scale, test_loss*scale))
219 | #print(np.asarray(train_loss_list)*scale)
220 | print("epoch:%3d, lr %.5f, train_loss %.6f, test_loss %.6f, speed %.2f seconds/epoch"
221 | % (i + 1, session.run(m_train.lr), np.mean(train_loss_list)*scale,
222 | test_loss*scale, training_time[i]))
223 | #log_a.post("trainLoss", value=float(train_loss), step=i)
224 | #log_a.post("testLoss", value=float(test_loss), step=i)
225 | if i == FLAGS.epoch - 1:
226 | print("Best test loss %.6f" % (test_best*scale))
227 | print("Average %.4f seconds for one epoch" % (np.mean(training_time)))
228 |
229 | print("The input_dim is", FLAGS.input_dim, "The hidden_dim is", FLAGS.hidden_dim)
230 | print("The output_dim is", FLAGS.output_dim)
231 | print("The keep_prob is", FLAGS.keep_prob, "The batch_size is", FLAGS.batch_size)
232 | print("The model is", FLAGS.model, "The number of layer is", FLAGS.layer)
233 | print("The truncated number is", FLAGS.trun_num, "The reverse is", FLAGS.reverse)
234 | print("===============================================================================")
235 |
236 | if __name__ == "__main__":
237 | tf.app.run()
238 |
--------------------------------------------------------------------------------
/tensorflow/LSTM/MSE/rnn_cell.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/tensorflow/LSTM/MSE/rnn_cell.pyc
--------------------------------------------------------------------------------
/tensorflow/LSTM/MSE/test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | if [ ]; then
3 | output_dim_arr=(6 12 18 24 30 36)
4 | for output_dim in ${output_dim_arr[@]}; do
5 | echo "The output_dim is $output_dim"
6 | python main.py --output_dim=$output_dim >> 0726_2.txt
7 | done
8 |
9 | output_dim_arr=(6 12 18 24 30 36)
10 | trun_num_arr=(1 2 3 4 6 8 12 13 24 26)
11 | model1="rnn"
12 | for output_dim in ${output_dim_arr[@]}; do
13 | echo "The output_dim is $output_dim"
14 | for trun_num in ${trun_num_arr[@]}; do
15 | echo "The trun_num is $trun_num"
16 | python main.py --output_dim=$output_dim --trun_num=$trun_num --model=$model1 >> 0726_rnn.txt
17 | done
18 | done
19 |
20 | output_dim_arr=(6 12 18 24 30 36)
21 | trun_num_arr=(1 2 3 4 6 8 12 13 24 26)
22 | model2="lstm"
23 | for output_dim in ${output_dim_arr[@]}; do
24 | echo "The output_dim is $output_dim"
25 | for trun_num in ${trun_num_arr[@]}; do
26 | echo "The trun_num is $trun_num"
27 | python main.py --output_dim=$output_dim --trun_num=$trun_num --model=$model2 >> 0726_lstm.txt
28 | done
29 | done
30 | fi
31 |
32 | output_dim_arr=(6 12 18 24 30 36)
33 | for output_dim in ${output_dim_arr[@]}; do
34 | echo "The output_dim is $output_dim"
35 | python main.py --output_dim=$output_dim >> 1222.txt
36 | done
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
--------------------------------------------------------------------------------
/tensorflow/LSTM/MSE/utils.py:
--------------------------------------------------------------------------------
1 | import pickle
2 | import numpy as np
3 |
4 | def zero_center(cpu_load):
5 | cpu_load = np.asarray(cpu_load)
6 | cpu_load_mean = np.mean(cpu_load[:,:24*12*26])
7 | cpu_load_std = np.std(cpu_load[:,:24*12*26])
8 | cpu_load -= cpu_load_mean
9 | cpu_load /= cpu_load_std
10 | return (cpu_load, cpu_load_mean, cpu_load_std)
11 |
12 | def contextwin(cpu_load, win_i, win_o, ahead_step):
13 | m, cpu_load_mean, cpu_load_std = zero_center(cpu_load)
14 | a = 26
15 | b = 3
16 | train_len = a * 288 / ahead_step
17 | test_len = (b-1) * 288 / ahead_step + (288 - win_o - win_i) / ahead_step + 1
18 | train_start = win_i
19 | test_start = a*288 + win_i
20 |
21 | train_x = np.asarray([[m[i][train_start+j*ahead_step-win_i:train_start+j*ahead_step]
22 | for j in range(train_len)] for i in range(len(m))],dtype=np.float32)
23 | train_y = np.asarray([[m[i][train_start+j*ahead_step:train_start+j*ahead_step+win_o]
24 | for j in range(train_len)] for i in range(len(m))],dtype=np.float32)
25 | test_x = np.asarray([[m[i][test_start+j*ahead_step-win_i:test_start+j*ahead_step]
26 | for j in range(test_len)] for i in range(len(m))],dtype=np.float32)
27 | test_y = np.asarray([[m[i][test_start+j*ahead_step:test_start+j*ahead_step+win_o]
28 | for j in range(test_len)] for i in range(len(m))],dtype=np.float32)
29 |
30 | return (train_x, train_y, test_x, test_y, cpu_load_mean, cpu_load_std)
31 |
32 | def read_data(_data_path, win_i, win_o, ahead_step):
33 | data_path = _data_path
34 | print("Reading pkl data...")
35 | input_machine = open(data_path,'rb')
36 | cpu_load = pickle.load(input_machine)
37 | input_machine.close()
38 | print("Loading data...")
39 | X_train, y_train, X_test, y_test, cpu_load_mean, cpu_load_std = contextwin(cpu_load, win_i, win_o, ahead_step)
40 |
41 | print(X_train.shape, y_train.shape)
42 | print(X_test.shape, y_test.shape)
43 | print(cpu_load_mean)
44 | print(cpu_load_std)
45 |
46 | return (X_train, y_train, X_test, y_test, cpu_load_mean, cpu_load_std)
47 |
48 | def truncated_index(long_period, num, reverse):
49 | index = []
50 | single_period_len = int(long_period/num)
51 | for i in range(num):
52 | if reverse:
53 | j = num-1 - i
54 | else:
55 | j = i
56 | index.append(range(j*single_period_len, (j+1)*single_period_len))
57 | return single_period_len, index
58 |
59 | def test_dataset(load, win_i, win_o, ahead_step):
60 | load_len = len(load)
61 | load_test_len = int(0.2 * load_len)
62 | load_train_len = int(0.8 * load_len)
63 | load = np.asarray(load)
64 | load -= np.mean(load[:load_train_len])
65 | load_std = np.std(load[:load_train_len])
66 | load /= load_std
67 | model_train_len = 26 * 288 / ahead_step / 8
68 | model_test_len = 2 * 288 / ahead_step + (288 - win_o - win_i) / ahead_step + 1
69 | tr_num = int(load_train_len / (128 * ahead_step * model_train_len)) * 128
70 | te_num = int(load_test_len / (64 * ahead_step * model_test_len)) * 64
71 | print("Unix system train", tr_num, ", test", te_num)
72 | train_start = load_train_len - tr_num * ahead_step * model_train_len
73 | test_start = -load_test_len
74 | X_train = np.asarray([[load[train_start+i*model_train_len*ahead_step+j*ahead_step:
75 | train_start+i*model_train_len*ahead_step+j*ahead_step+win_i]
76 | for j in range(model_train_len)] for i in range(tr_num)])
77 | y_train = np.asarray([[load[train_start+i*model_train_len*ahead_step+j*ahead_step+win_i:
78 | train_start+i*model_train_len*ahead_step+j*ahead_step+win_i+win_o]
79 | for j in range(model_train_len)] for i in range(tr_num)])
80 | X_test = np.asarray([[load[test_start+i*model_test_len*ahead_step+j*ahead_step:
81 | test_start+i*model_test_len*ahead_step+j*ahead_step+win_i]
82 | for j in range(model_test_len)] for i in range(te_num)])
83 | y_test = np.asarray([[load[test_start+i*model_test_len*ahead_step+j*ahead_step+win_i:
84 | test_start+i*model_test_len*ahead_step+j*ahead_step+win_i+win_o]
85 | for j in range(model_test_len)] for i in range(te_num)])
86 | print(X_train.shape, y_train.shape)
87 | print(X_test.shape, y_test.shape)
88 | return X_train, y_train, X_test, y_test, load_std
89 |
--------------------------------------------------------------------------------
/tensorflow/LSTM/MSE/utils.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/tensorflow/LSTM/MSE/utils.pyc
--------------------------------------------------------------------------------
/tensorflow/LSTM/MSSE/lstm_msse_main.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import tensorflow as tf
6 | import numpy as np
7 | import time
8 | from utils import read_data, truncated_index
9 | import rnn_cell
10 |
11 | #from pastalog import Log
12 |
13 | flags = tf.flags
14 | logging = tf.logging
15 | flags.DEFINE_string("data_path", "/home/tyrion/lannister/1024/tyrion.pkl",
16 | "The path of host load data")
17 | flags.DEFINE_integer("input_dim", 24, "The length of history window")
18 | flags.DEFINE_integer("hidden_dim", 128, "The length of hidden layer size")
19 | flags.DEFINE_integer("interval", 8, "The number of output interval")
20 | flags.DEFINE_integer("batch_size", 128, "Mini-batch size")
21 | flags.DEFINE_integer("trun_num", 8, "The number of small truncated period")
22 | flags.DEFINE_bool("reverse", False, "Whether to reverse the training sequence")
23 | flags.DEFINE_integer("epoch", 60, "The total epochs")
24 | flags.DEFINE_integer("layer", 1, "The number of recurrent layers")
25 | flags.DEFINE_float("lr", 0.05, "Learning rate")
26 | flags.DEFINE_string("model", "lstm", "The RNN type")
27 | flags.DEFINE_float("keep_prob", 1.0, "keep prob")
28 | flags.DEFINE_integer("max_grad_norm", 5, "max grad norm")
29 | FLAGS = flags.FLAGS
30 |
31 | class RNNModel(object):
32 | def __init__(self, is_training, length):
33 | self.batch_size = batch_size = FLAGS.batch_size
34 | self.num_steps = num_steps = length
35 | hidden_size = FLAGS.hidden_dim
36 |
37 | self._input_data = tf.placeholder(tf.float32, [batch_size, length, FLAGS.input_dim])
38 | self._targets = tf.placeholder(tf.float32, [batch_size, length])
39 |
40 | if FLAGS.model == "rnn":
41 | vanilla_rnn_cell = rnn_cell.BasicRNNCell(num_units=FLAGS.hidden_dim)
42 | if is_training and FLAGS.keep_prob < 1:
43 | vanilla_rnn_cell = rnn_cell.DropoutWrapper(vanilla_rnn_cell,
44 | output_keep_prob=FLAGS.keep_prob)
45 | if FLAGS.layer == 1:
46 | cell = vanilla_rnn_cell
47 | elif FLAGS.layer == 2:
48 | cell = rnn_cell.MultiRNNCell([vanilla_rnn_cell] * 2)
49 | elif FLAGS.model == "lstm":
50 | lstm_cell = rnn_cell.BasicLSTMCell(num_units=FLAGS.hidden_dim,
51 | forget_bias=1.0)
52 | if is_training and FLAGS.keep_prob < 1:
53 | lstm_cell = rnn_cell.DropoutWrapper(lstm_cell,
54 | output_keep_prob=FLAGS.keep_prob)
55 | if FLAGS.layer == 1:
56 | cell = lstm_cell
57 | elif FLAGS.layer == 2:
58 | cell = rnn_cell.MultiRNNCell([lstm_cell] * 2)
59 | elif FLAGS.model == "gru":
60 | gru_cell = rnn_cell.GRUCell(num_units=FLAGS.hidden_dim)
61 | if is_training and FLAGS.keep_prob < 1:
62 | gru_cell = rnn_cell.DropoutWrapper(gru_cell,
63 | output_keep_prob=FLAGS.keep_prob)
64 | cell = gru_cell
65 | else:
66 | raise ValueError("Invalid model: %s", FLAGS.model)
67 |
68 | self._initial_state = cell.zero_state(batch_size, tf.float32)
69 |
70 | outputs = []
71 | state = self._initial_state
72 | with tf.variable_scope("RNN"):
73 | for time_step in range(num_steps):
74 | if time_step > 0:
75 | tf.get_variable_scope().reuse_variables()
76 | (cell_output, state) = cell(self._input_data[:, time_step, :], state)
77 | outputs.append(cell_output)
78 | self._final_state = state
79 |
80 | hidden_output = tf.reshape(tf.concat(1, outputs), [-1, hidden_size])
81 |
82 | V_1 = tf.get_variable("v_1", shape=[hidden_size, 1],
83 | initializer=tf.random_uniform_initializer(-tf.sqrt(1./hidden_size),
84 | tf.sqrt(1./hidden_size)))
85 | b_1 = tf.get_variable("b_1", shape=[1], initializer=tf.constant_initializer(0.1))
86 | logits = tf.add(tf.matmul(hidden_output, V_1), b_1)
87 |
88 | target = tf.reshape(self._targets, [-1, 1])
89 | training_loss = tf.reduce_sum(tf.pow(logits-target, 2)) / 2
90 | mse = tf.reduce_mean(tf.pow(logits-target, 2))
91 | self._cost = mse
92 |
93 | if not is_training:
94 | return
95 |
96 | self._lr = tf.Variable(0.0, trainable=False)
97 | tvars = tf.trainable_variables()
98 | grads, _ = tf.clip_by_global_norm(tf.gradients(training_loss, tvars),
99 | FLAGS.max_grad_norm)
100 | optimizer = tf.train.GradientDescentOptimizer(self.lr)
101 | self._train_op = optimizer.apply_gradients(zip(grads, tvars))
102 |
103 | def assign_lr(self, session, lr_value):
104 | session.run(tf.assign(self.lr, lr_value))
105 |
106 | @property
107 | def input_data(self):
108 | return self._input_data
109 |
110 | @property
111 | def targets(self):
112 | return self._targets
113 |
114 | @property
115 | def initial_state(self):
116 | return self._initial_state
117 |
118 | @property
119 | def cost(self):
120 | return self._cost
121 |
122 | @property
123 | def final_state(self):
124 | return self._final_state
125 |
126 | @property
127 | def lr(self):
128 | return self._lr
129 |
130 | @property
131 | def train_op(self):
132 | return self._train_op
133 |
134 | def run_train_epoch(session, m, data_x, data_y, eval_op):
135 | costs = []
136 | states = []
137 | for i in xrange(int(len(data_y) / FLAGS.batch_size)):
138 | cost, state, _ = session.run(
139 | [m.cost, m.final_state, eval_op],
140 | {m.input_data: data_x[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size],
141 | m.targets: data_y[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size]})
142 | costs.append(cost)
143 | states.append(state)
144 | return (sum(costs)/len(costs), states)
145 |
146 | def run_test_epoch(session, m, data_x, data_y, eval_op, train_state):
147 | costs = []
148 | states = []
149 | for i in xrange(int(len(data_y) / FLAGS.batch_size)):
150 | cost, state, _ = session.run(
151 | [m.cost, m.final_state, eval_op],
152 | {m.input_data: data_x[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size],
153 | m.targets: data_y[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size],
154 | m.initial_state: train_state[i]})
155 | costs.append(cost)
156 | states.append(state)
157 | return (sum(costs)/len(costs), states)
158 |
159 | def main(_):
160 | print("===============================================================================")
161 | print("The input_dim is", FLAGS.input_dim, "The hidden_dim is", FLAGS.hidden_dim)
162 | print("The interval is", FLAGS.interval)
163 | print("The keep_prob is", FLAGS.keep_prob, "The batch_size is", FLAGS.batch_size)
164 | print("The model is", FLAGS.model, "The number of layer is", FLAGS.layer)
165 | print("The data path is ", FLAGS.data_path)
166 | print("The truncated number is", FLAGS.trun_num, "The reverse is", FLAGS.reverse)
167 | X_train, y_train, X_test, y_test, _, cpu_load_std = read_data(FLAGS.data_path,
168 | FLAGS.input_dim,
169 | 8, FLAGS.input_dim)
170 | train_data_len = X_train.shape[1]
171 | train_len, train_index = truncated_index(train_data_len, FLAGS.trun_num, FLAGS.reverse)
172 | print("train length", train_len)
173 | print(train_index)
174 | with tf.Graph().as_default(), tf.Session() as session:
175 | with tf.variable_scope("model", reuse=None):
176 | m_train = RNNModel(is_training=True, length=train_len)
177 | with tf.variable_scope("model", reuse=True):
178 | m_test = RNNModel(is_training=False, length=len(y_test[0]))
179 |
180 | tf.initialize_all_variables().run()
181 |
182 | #log_a = Log('http://localhost:8120','modelA')
183 | # pastalog --serve 8120
184 |
185 | scale = cpu_load_std ** 2
186 | train_best = test_best = 0.0
187 | training_time = []
188 | for i in range(FLAGS.epoch):
189 | if i < FLAGS.epoch/3:
190 | lr_decay = 1
191 | elif i < FLAGS.epoch*2/3:
192 | lr_decay = 0.1
193 | else:
194 | lr_decay = 0.01
195 | m_train.assign_lr(session, FLAGS.lr * lr_decay)
196 | train_loss_list = []
197 | train_state_list = []
198 | start = time.time()
199 | for j in range(FLAGS.trun_num):
200 | train_loss, train_state = run_train_epoch(session, m_train, X_train[:,train_index[j],:],
201 | y_train[:,train_index[j],FLAGS.interval-1],
202 | m_train.train_op)
203 | train_loss_list.append(train_loss)
204 | if FLAGS.reverse:
205 | if j == 0:
206 | train_state_list.append(train_state)
207 | else:
208 | if j == FLAGS.trun_num-1:
209 | train_state_list.append(train_state)
210 | finish = time.time()
211 | training_time.append(finish - start)
212 | test_loss, _ = run_test_epoch(session, m_test, X_test,
213 | y_test[:,:,FLAGS.interval-1],
214 | tf.no_op(), train_state)
215 | if i == 0:
216 | train_best = train_loss
217 | test_best = test_loss
218 | if train_loss < train_best:
219 | train_best = train_loss
220 | if test_loss < test_best:
221 | test_best = test_loss
222 | print(np.asarray(train_loss_list)*scale)
223 | print("epoch:%3d, lr %.5f, train_loss %.6f, test_loss %.6f, speed %.2f seconds/epoch"
224 | % (i + 1, session.run(m_train.lr), np.mean(train_loss_list)*scale,
225 | test_loss*scale, training_time[i]))
226 | #log_a.post("trainLoss", value=float(train_loss), step=i)
227 | #log_a.post("testLoss", value=float(test_loss), step=i)
228 | if i == FLAGS.epoch - 1:
229 | print("Best train, test loss %.6f %.6f" % (train_best*scale, test_best*scale))
230 | print("Average %.4f seconds for one epoch" % (np.mean(training_time)))
231 |
232 | print("The input_dim is", FLAGS.input_dim, "The hidden_dim is", FLAGS.hidden_dim)
233 | print("The interval is", FLAGS.interval)
234 | print("The keep_prob is", FLAGS.keep_prob, "The batch_size is", FLAGS.batch_size)
235 | print("The model is", FLAGS.model, "The number of layer is", FLAGS.layer)
236 | print("The data path is ", FLAGS.data_path)
237 | print("The truncated number is", FLAGS.trun_num, "The reverse is", FLAGS.reverse)
238 | print("===============================================================================")
239 |
240 | if __name__ == "__main__":
241 | tf.app.run()
242 |
--------------------------------------------------------------------------------
/tensorflow/LSTM/MSSE/rnn_cell.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/tensorflow/LSTM/MSSE/rnn_cell.pyc
--------------------------------------------------------------------------------
/tensorflow/LSTM/MSSE/test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | interval_arr=(8 7 6 5 4 3 2 1)
4 | for interval in ${interval_arr[@]}; do
5 | echo "The interval is $interval"
6 | python lstm_msse_main.py --interval=$interval >> 1012.txt
7 | done
8 |
--------------------------------------------------------------------------------
/tensorflow/LSTM/MSSE/utils.py:
--------------------------------------------------------------------------------
1 | import pickle
2 | import numpy as np
3 |
4 | def zero_center(cpu_load):
5 | cpu_load = np.asarray(cpu_load)
6 | cpu_load_mean = np.mean(cpu_load[:,:24*12*26])
7 | cpu_load_std = np.std(cpu_load[:,:24*12*26])
8 | cpu_load -= cpu_load_mean
9 | cpu_load /= cpu_load_std
10 | return (cpu_load, cpu_load_mean, cpu_load_std)
11 |
12 | def calcu_mean(actual_data, start, base_seg, n):
13 | seg_mean = []
14 | for i in range(n):
15 | seg_mean.append(np.mean(actual_data[start:start+base_seg*2**i]))
16 | return seg_mean
17 |
18 | def contextwin(cpu_load, win_i, n, ahead_step):
19 | m, cpu_load_mean, cpu_load_std = zero_center(cpu_load)
20 | a = 26
21 | b = 3
22 | train_len = a * 288 / ahead_step
23 | test_len = (b-1) * 288 / ahead_step + (288 - 2**(n-1)) / ahead_step
24 | train_start = win_i
25 | test_start = a*288 + win_i
26 |
27 | train_x = np.asarray([[m[i][train_start+j*ahead_step-win_i:train_start+j*ahead_step]
28 | for j in range(train_len)] for i in range(len(m))],dtype=np.float32)
29 | train_y = np.asarray([[calcu_mean(m[i], train_start+j*ahead_step, 1, n)
30 | for j in range(train_len)] for i in range(len(m))],dtype=np.float32)
31 | test_x = np.asarray([[m[i][test_start+j*ahead_step-win_i:test_start+j*ahead_step]
32 | for j in range(test_len)] for i in range(len(m))],dtype=np.float32)
33 | test_y = np.asarray([[calcu_mean(m[i], test_start+j*ahead_step, 1, n)
34 | for j in range(test_len)] for i in range(len(m))],dtype=np.float32)
35 |
36 | return (train_x, train_y, test_x, test_y, cpu_load_mean, cpu_load_std)
37 |
38 | def read_data(_data_path, win_i, n, ahead_step):
39 | data_path = _data_path
40 | print("Reading pkl data...")
41 | input_machine = open(data_path,'rb')
42 | cpu_load = pickle.load(input_machine)
43 | input_machine.close()
44 | print("Loading data...")
45 | X_train, y_train, X_test, y_test, cpu_load_mean, cpu_load_std = contextwin(cpu_load, win_i, n, ahead_step)
46 |
47 | print(X_train.shape, y_train.shape)
48 | print(X_test.shape, y_test.shape)
49 | print(cpu_load_mean)
50 | print(cpu_load_std)
51 |
52 | return (X_train, y_train, X_test, y_test, cpu_load_mean, cpu_load_std)
53 |
54 | def truncated_index(long_period, num, reverse):
55 | index = []
56 | single_period_len = int(long_period/num)
57 | for i in range(num):
58 | if reverse:
59 | j = num-1 - i
60 | else:
61 | j = i
62 | index.append(range(j*single_period_len, (j+1)*single_period_len))
63 | return single_period_len, index
--------------------------------------------------------------------------------
/tensorflow/LSTM/MSSE/utils.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/tensorflow/LSTM/MSSE/utils.pyc
--------------------------------------------------------------------------------
/tensorflow/LSTM/README.md:
--------------------------------------------------------------------------------
1 | # Long Short-Term Memory
2 |
3 | > Using LSTM to predict the host load
4 |
5 | - [MSE](./MSE) Predict host load several-step-ahead measured with average MSE(Mean Squared Error) in Google Cluster Data.
6 | - [MSSE](./MSSE) Predict host load over a long period measured with average MSSE(Mean Segment Squared Error) in Google Cluster Data.
7 | - [grid](./grid) Only predict the MSE in Grid dataset.
8 |
--------------------------------------------------------------------------------
/tensorflow/LSTM/grid/main.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import tensorflow as tf
6 | from utils_grid import test_dataset
7 | import rnn_cell
8 |
9 | import numpy as np
10 | import time
11 | import pickle
12 |
13 | #from pastalog import Log
14 |
15 | flags = tf.flags
16 | logging = tf.logging
17 | flags.DEFINE_string("data_path", "/home/tyrion/lannister/1024/tyrion.pkl",
18 | "The path of host load data")
19 | flags.DEFINE_integer("input_dim", 24, "The length of history window")
20 | flags.DEFINE_integer("hidden_dim", 128, "The length of hidden layer size")
21 | flags.DEFINE_integer("output_dim", 6, "The length of prediction window")
22 | flags.DEFINE_integer("batch_size", 32, "Mini-batch size")
23 | flags.DEFINE_integer("test_batch_size", 32, "Mini-batch size of testing data")
24 | flags.DEFINE_integer("epoch", 60, "The total epochs")
25 | flags.DEFINE_float("lr", 0.05, "Learning rate")
26 | flags.DEFINE_string("model", "lstm", "The RNN type")
27 | flags.DEFINE_string("grid", "axp7", "The machine of Grid")
28 | flags.DEFINE_integer("layer", 1, "The layers of model")
29 | flags.DEFINE_float("keep_prob", 1.0, "keep prob")
30 | flags.DEFINE_integer("max_grad_norm", 5, "max grad norm")
31 | FLAGS = flags.FLAGS
32 |
33 | class RNNModel(object):
34 | def __init__(self, is_training, length, batch_size):
35 | self.batch_size = batch_size
36 | self.num_steps = num_steps = length
37 | hidden_size = FLAGS.hidden_dim
38 |
39 | self._input_data = tf.placeholder(tf.float32, [batch_size, None, FLAGS.input_dim])
40 | self._targets = tf.placeholder(tf.float32, [batch_size, None, FLAGS.output_dim])
41 |
42 | if FLAGS.model == "rnn":
43 | vanilla_rnn_cell = rnn_cell.BasicRNNCell(num_units=FLAGS.hidden_dim)
44 | if is_training and FLAGS.keep_prob < 1:
45 | vanilla_rnn_cell = rnn_cell.DropoutWrapper(vanilla_rnn_cell,
46 | output_keep_prob=FLAGS.keep_prob)
47 | if FLAGS.layer == 1:
48 | cell = vanilla_rnn_cell
49 | elif FLAGS.layer == 2:
50 | cell = rnn_cell.MultiRNNCell([vanilla_rnn_cell] * 2)
51 | elif FLAGS.model == "lstm":
52 | lstm_cell = rnn_cell.BasicLSTMCell(num_units=FLAGS.hidden_dim,
53 | forget_bias=1.0)
54 | if is_training and FLAGS.keep_prob < 1:
55 | lstm_cell = rnn_cell.DropoutWrapper(lstm_cell,
56 | output_keep_prob=FLAGS.keep_prob)
57 | if FLAGS.layer == 1:
58 | cell = lstm_cell
59 | elif FLAGS.layer == 2:
60 | cell = rnn_cell.MultiRNNCell([lstm_cell] * 2)
61 | elif FLAGS.model == "gru":
62 | gru_cell = rnn_cell.GRUCell(num_units=FLAGS.hidden_dim)
63 | if is_training and FLAGS.keep_prob < 1:
64 | gru_cell = rnn_cell.DropoutWrapper(gru_cell,
65 | output_keep_prob=FLAGS.keep_prob)
66 | cell = gru_cell
67 | else:
68 | raise ValueError("Invalid model: %s", FLAGS.model)
69 |
70 | self._initial_state = cell.zero_state(batch_size, tf.float32)
71 |
72 | outputs = []
73 | state = self._initial_state
74 | with tf.variable_scope("RNN"):
75 | for time_step in range(num_steps):
76 | if time_step > 0:
77 | tf.get_variable_scope().reuse_variables()
78 | (cell_output, state) = cell(self._input_data[:, time_step, :], state)
79 | outputs.append(cell_output)
80 | self._final_state = state
81 |
82 | hidden_output = tf.reshape(tf.concat(1, outputs), [-1, hidden_size])
83 |
84 | V_1 = tf.get_variable("v_1", shape=[hidden_size, FLAGS.output_dim],
85 | initializer=tf.random_uniform_initializer(-tf.sqrt(1./hidden_size),tf.sqrt(1./hidden_size)))
86 | b_1 = tf.get_variable("b_1", shape=[FLAGS.output_dim], initializer=tf.constant_initializer(0.1))
87 | logits = tf.add(tf.matmul(hidden_output, V_1), b_1)
88 |
89 | target = tf.reshape(self._targets, [-1, FLAGS.output_dim])
90 | training_loss = tf.reduce_sum(tf.pow(logits-target, 2)) / 2
91 | mse = tf.reduce_mean(tf.pow(logits-target, 2))
92 | self._cost = mse
93 |
94 | if not is_training:
95 | return
96 |
97 | self._lr = tf.Variable(0.0, trainable=False)
98 | tvars = tf.trainable_variables()
99 | grads, _ = tf.clip_by_global_norm(tf.gradients(training_loss, tvars), FLAGS.max_grad_norm)
100 | optimizer = tf.train.GradientDescentOptimizer(self.lr)
101 | self._train_op = optimizer.apply_gradients(zip(grads, tvars))
102 |
103 | def assign_lr(self, session, lr_value):
104 | session.run(tf.assign(self.lr, lr_value))
105 |
106 | @property
107 | def input_data(self):
108 | return self._input_data
109 |
110 | @property
111 | def targets(self):
112 | return self._targets
113 |
114 | @property
115 | def initial_state(self):
116 | return self._initial_state
117 |
118 | @property
119 | def cost(self):
120 | return self._cost
121 |
122 | @property
123 | def final_state(self):
124 | return self._final_state
125 |
126 | @property
127 | def lr(self):
128 | return self._lr
129 |
130 | @property
131 | def train_op(self):
132 | return self._train_op
133 |
134 | def run_train_epoch(session, m, data_x, data_y, eval_op):
135 | costs = []
136 | states = []
137 | for i in xrange(int(len(data_y) / FLAGS.batch_size)):
138 | cost, state, _ = session.run(
139 | [m.cost, m.final_state, eval_op],
140 | {m.input_data: data_x[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size],
141 | m.targets: data_y[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size]})
142 | costs.append(cost)
143 | states.append(state)
144 | return (np.mean(costs), states)
145 |
146 | def run_test_epoch(session, m, data_x, data_y, eval_op):
147 | costs = []
148 | states = []
149 | for i in xrange(int(len(data_y) / FLAGS.test_batch_size)):
150 | cost, state, _ = session.run(
151 | [m.cost, m.final_state, eval_op],
152 | {m.input_data: data_x[i*FLAGS.test_batch_size:(i+1)*FLAGS.test_batch_size],
153 | m.targets: data_y[i*FLAGS.test_batch_size:(i+1)*FLAGS.test_batch_size]})
154 | # m.targets: data_y[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size]})
155 | costs.append(cost)
156 | states.append(state)
157 | return (np.mean(costs), states)
158 |
159 | def main(_):
160 | print("===============================================================================")
161 | print("The input_dim is", FLAGS.input_dim, "The hidden_dim is", FLAGS.hidden_dim)
162 | print("The output_dim is", FLAGS.output_dim)
163 | print("The keep_prob is", FLAGS.keep_prob, "The batch_size is", FLAGS.batch_size)
164 | print("The model is", FLAGS.model, "The machine is", FLAGS.grid)
165 | # with open("./data/axp0.pkl", 'rb') as f:
166 | # axp0 = pickle.load(f)
167 | # X_train, y_train, X_test, y_test, std_grid = test_dataset(axp0,
168 | # FLAGS.input_dim, FLAGS.output_dim, FLAGS.input_dim)
169 |
170 | # with open("./data/axp7.pkl", 'rb') as f:
171 | # axp7 = pickle.load(f)
172 | # X_train, y_train, X_test, y_test, std_grid = test_dataset(axp7,
173 | # FLAGS.input_dim, FLAGS.output_dim, FLAGS.input_dim)
174 |
175 | # with open("./data/sahara.pkl", 'rb') as f:
176 | # sahara = pickle.load(f)
177 | # X_train, y_train, X_test, y_test, std_grid = test_dataset(sahara,
178 | # FLAGS.input_dim, FLAGS.output_dim, FLAGS.input_dim)
179 |
180 | # with open("./data/themis.pkl", 'rb') as f:
181 | # themis = pickle.load(f)
182 | # X_train, y_train, X_test, y_test, std_grid = test_dataset(themis,
183 | # FLAGS.input_dim, FLAGS.output_dim, FLAGS.input_dim)
184 |
185 | with open("./data/"+FLAGS.grid+".pkl", 'rb') as f:
186 | grid = pickle.load(f)
187 | X_train, y_train, X_test, y_test, std_grid = test_dataset(grid,
188 | FLAGS.input_dim, FLAGS.output_dim, FLAGS.input_dim)
189 |
190 | train_len = X_train.shape[1]
191 | print("train length", train_len)
192 | with tf.Graph().as_default(), tf.Session() as session:
193 | with tf.variable_scope("model", reuse=None):
194 | m_train = RNNModel(is_training=True, length=X_train.shape[1],
195 | batch_size=FLAGS.batch_size)
196 | with tf.variable_scope("model", reuse=True):
197 | m_test = RNNModel(is_training=False, length=X_test.shape[1],
198 | batch_size=FLAGS.test_batch_size)
199 |
200 | tf.initialize_all_variables().run()
201 |
202 | #log_a = Log('http://localhost:8120','modelA')
203 | # pastalog --serve 8120
204 |
205 | scale = std_grid ** 2
206 | test_best = 0.0
207 | training_time = []
208 | for i in range(FLAGS.epoch):
209 | if i < FLAGS.epoch/3:
210 | lr_decay = 1
211 | elif i < FLAGS.epoch*2/3:
212 | lr_decay = 0.1
213 | else:
214 | lr_decay = 0.01
215 | m_train.assign_lr(session, FLAGS.lr * lr_decay)
216 | start = time.time()
217 | train_loss, _ = run_train_epoch(session, m_train, X_train,
218 | y_train, m_train.train_op)
219 | finish = time.time()
220 | training_time.append(finish - start)
221 | test_loss, _ = run_test_epoch(session, m_test, X_test, y_test,
222 | tf.no_op())
223 | if i == 0:
224 | test_best = test_loss
225 | if test_loss < test_best:
226 | test_best = test_loss
227 | # print("epoch:%3d, lr %.5f, train_loss_1 %.6f, train_loss_2 %.6f, test_loss %.6f" %
228 | # (i + 1, session.run(m_train.lr), train_loss_1*scale,
229 | # train_loss_2*scale, test_loss*scale))
230 | #print(np.asarray(train_loss_list)*scale)
231 | print("epoch:%3d, lr %.5f, train_loss %.6f, test_loss %.6f, speed %.2f seconds/epoch"
232 | % (i + 1, session.run(m_train.lr), train_loss*scale,
233 | test_loss*scale, training_time[i]))
234 | #log_a.post("trainLoss", value=float(train_loss), step=i)
235 | #log_a.post("testLoss", value=float(test_loss), step=i)
236 | if i == FLAGS.epoch - 1:
237 | print("Best test loss %.6f" % (test_best*scale))
238 | print("Average %.4f seconds for one epoch" % (np.mean(training_time)))
239 |
240 | print("The input_dim is", FLAGS.input_dim, "The hidden_dim is", FLAGS.hidden_dim)
241 | print("The output_dim is", FLAGS.output_dim)
242 | print("The keep_prob is", FLAGS.keep_prob, "The batch_size is", FLAGS.batch_size)
243 | print("The model is", FLAGS.model, "The machine is", FLAGS.grid)
244 | print("===============================================================================")
245 |
246 | if __name__ == "__main__":
247 | tf.app.run()
248 |
--------------------------------------------------------------------------------
/tensorflow/LSTM/grid/rnn_cell.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/tensorflow/LSTM/grid/rnn_cell.pyc
--------------------------------------------------------------------------------
/tensorflow/LSTM/grid/save_logits.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import tensorflow as tf
6 | import numpy as np
7 | import pickle
8 | import time
9 |
10 | from utils_grid import test_dataset
11 | import rnn_cell
12 |
13 | #from pastalog import Log
14 |
15 | flags = tf.flags
16 | logging = tf.logging
17 | flags.DEFINE_string("data_path", "/home/tyrion/lannister/1024/tyrion.pkl",
18 | "The path of host load data")
19 | flags.DEFINE_integer("input_dim", 24, "The length of history window")
20 | flags.DEFINE_integer("hidden_dim", 128, "The length of hidden layer size")
21 | flags.DEFINE_integer("output_dim", 6, "The length of prediction window")
22 | flags.DEFINE_integer("batch_size", 32, "Mini-batch size")
23 | flags.DEFINE_integer("test_batch_size", 32, "Mini-batch size of testing data")
24 | flags.DEFINE_integer("epoch", 90, "The total epochs")
25 | flags.DEFINE_float("lr", 0.05, "Learning rate")
26 | flags.DEFINE_string("model", "lstm", "The RNN type")
27 | flags.DEFINE_string("grid", "axp7", "The machine of Grid")
28 | flags.DEFINE_integer("layer", 1, "The layers of model")
29 | flags.DEFINE_float("keep_prob", 1.0, "keep prob")
30 | flags.DEFINE_integer("max_grad_norm", 5, "max grad norm")
31 | FLAGS = flags.FLAGS
32 |
33 | class RNNModel(object):
34 | def __init__(self, is_training, length, batch_size):
35 | self.batch_size = batch_size
36 | self.num_steps = num_steps = length
37 | hidden_size = FLAGS.hidden_dim
38 |
39 | self._input_data = tf.placeholder(tf.float32, [batch_size, None, FLAGS.input_dim])
40 | self._targets = tf.placeholder(tf.float32, [batch_size, None, FLAGS.output_dim])
41 |
42 | if FLAGS.model == "rnn":
43 | vanilla_rnn_cell = rnn_cell.BasicRNNCell(num_units=FLAGS.hidden_dim)
44 | if is_training and FLAGS.keep_prob < 1:
45 | vanilla_rnn_cell = rnn_cell.DropoutWrapper(vanilla_rnn_cell,
46 | output_keep_prob=FLAGS.keep_prob)
47 | if FLAGS.layer == 1:
48 | cell = vanilla_rnn_cell
49 | elif FLAGS.layer == 2:
50 | cell = rnn_cell.MultiRNNCell([vanilla_rnn_cell] * 2)
51 | elif FLAGS.model == "lstm":
52 | lstm_cell = rnn_cell.BasicLSTMCell(num_units=FLAGS.hidden_dim,
53 | forget_bias=1.0)
54 | if is_training and FLAGS.keep_prob < 1:
55 | lstm_cell = rnn_cell.DropoutWrapper(lstm_cell,
56 | output_keep_prob=FLAGS.keep_prob)
57 | cell = lstm_cell
58 | #cell = rnn_cell.MultiRNNCell([lstm_cell] * 2)
59 | elif FLAGS.model == "gru":
60 | gru_cell = rnn_cell.GRUCell(num_units=FLAGS.hidden_dim)
61 | if is_training and FLAGS.keep_prob < 1:
62 | gru_cell = rnn_cell.DropoutWrapper(gru_cell,
63 | output_keep_prob=FLAGS.keep_prob)
64 | cell = gru_cell
65 | else:
66 | raise ValueError("Invalid model: %s", FLAGS.model)
67 |
68 | self._initial_state = cell.zero_state(batch_size, tf.float32)
69 |
70 | outputs = []
71 | state = self._initial_state
72 | with tf.variable_scope("RNN"):
73 | for time_step in range(num_steps):
74 | if time_step > 0:
75 | tf.get_variable_scope().reuse_variables()
76 | (cell_output, state) = cell(self._input_data[:, time_step, :], state)
77 | outputs.append(cell_output)
78 | self._final_state = state
79 |
80 | hidden_output = tf.reshape(tf.concat(1, outputs), [-1, hidden_size])
81 | V_1 = tf.get_variable("v_1", shape=[hidden_size, FLAGS.output_dim],
82 | initializer=tf.random_uniform_initializer(-tf.sqrt(1./hidden_size),tf.sqrt(1./hidden_size)))
83 | b_1 = tf.get_variable("b_1", shape=[FLAGS.output_dim], initializer=tf.constant_initializer(0.1))
84 | logits = tf.add(tf.matmul(hidden_output, V_1), b_1)
85 |
86 | target = tf.reshape(self._targets, [-1, FLAGS.output_dim])
87 | training_loss = tf.reduce_sum(tf.pow(logits-target, 2)) / 2
88 | mse = tf.reduce_mean(tf.pow(logits-target, 2))
89 | self._cost = mse
90 |
91 | logits = logits
92 | self._logits = logits
93 |
94 | if not is_training:
95 | return
96 |
97 | self._lr = tf.Variable(0.0, trainable=False)
98 | tvars = tf.trainable_variables()
99 | grads, _ = tf.clip_by_global_norm(tf.gradients(training_loss, tvars), FLAGS.max_grad_norm)
100 | optimizer = tf.train.GradientDescentOptimizer(self.lr)
101 | self._train_op = optimizer.apply_gradients(zip(grads, tvars))
102 |
103 | def assign_lr(self, session, lr_value):
104 | session.run(tf.assign(self.lr, lr_value))
105 |
106 | @property
107 | def input_data(self):
108 | return self._input_data
109 |
110 | @property
111 | def targets(self):
112 | return self._targets
113 |
114 | @property
115 | def initial_state(self):
116 | return self._initial_state
117 |
118 | @property
119 | def cost(self):
120 | return self._cost
121 |
122 | @property
123 | def final_state(self):
124 | return self._final_state
125 |
126 | @property
127 | def lr(self):
128 | return self._lr
129 |
130 | @property
131 | def train_op(self):
132 | return self._train_op
133 |
134 | @property
135 | def logits(self):
136 | return self._logits
137 |
138 | def run_train_epoch(session, m, data_x, data_y, eval_op):
139 | cost_sum = []
140 | state_sum = []
141 | for i in range(int(len(data_y) / FLAGS.batch_size)):
142 | cost, state, _ = session.run(
143 | [m.cost, m.final_state, eval_op],
144 | {m.input_data: data_x[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size],
145 | m.targets: data_y[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size]})
146 | cost_sum.append(cost)
147 | state_sum.append(state)
148 | return (np.mean(cost_sum), state_sum)
149 |
150 | def run_test_epoch(session, m, data_x, data_y, eval_op):
151 | cost_sum = []
152 | logits_sum = []
153 | for i in range(int(len(data_y) / FLAGS.batch_size)):
154 | cost, logits, _ = session.run(
155 | [m.cost, m.logits, eval_op],
156 | {m.input_data: data_x[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size],
157 | m.targets: data_y[i*FLAGS.batch_size:(i+1)*FLAGS.batch_size]})
158 | cost_sum.append(cost)
159 | logits_sum.append(logits)
160 | return (np.mean(cost_sum), logits_sum)
161 |
162 | def main(_):
163 | print("===============================================================================")
164 | print("The input_dim is", FLAGS.input_dim, "The hidden_dim is", FLAGS.hidden_dim)
165 | print("The output_dim is", FLAGS.output_dim)
166 | print("The keep_prob is", FLAGS.keep_prob, "The batch_size is", FLAGS.batch_size)
167 | print("The model is", FLAGS.model, "The machine is", FLAGS.grid)
168 |
169 | with open("./data/"+FLAGS.grid+".pkl", 'rb') as f:
170 | grid = pickle.load(f)
171 | X_train, y_train, X_test, y_test, std_grid = test_dataset(grid,
172 | FLAGS.input_dim, FLAGS.output_dim, FLAGS.input_dim)
173 |
174 | train_len = X_train.shape[1]
175 | print("train length", train_len)
176 | with tf.Graph().as_default(), tf.Session() as session:
177 | with tf.variable_scope("model", reuse=None):
178 | m_train = RNNModel(is_training=True, length=X_train.shape[1],
179 | batch_size=FLAGS.batch_size)
180 | with tf.variable_scope("model", reuse=True):
181 | m_test = RNNModel(is_training=False, length=X_test.shape[1],
182 | batch_size=FLAGS.test_batch_size)
183 |
184 | tf.initialize_all_variables().run()
185 |
186 | #log_a = Log('http://localhost:8120','modelA')
187 | # pastalog --serve 8120
188 |
189 | scale = std_grid ** 2
190 | train_best = test_best = 0.0
191 | training_time = []
192 | for i in range(FLAGS.epoch):
193 | if i < FLAGS.epoch/3:
194 | lr_decay = 1
195 | elif i < FLAGS.epoch*2/3:
196 | lr_decay = 0.1
197 | else:
198 | lr_decay = 0.01
199 | m_train.assign_lr(session, FLAGS.lr * lr_decay)
200 | start = time.time()
201 | train_loss, _ = run_train_epoch(session, m_train, X_train,
202 | y_train, m_train.train_op)
203 | finish = time.time()
204 | training_time.append(finish - start)
205 | test_loss, logits = run_test_epoch(session, m_test, X_test, y_test, tf.no_op())
206 | if i == 0:
207 | train_best = train_loss
208 | test_best = test_loss
209 | test_cost_best = np.asarray(logits)
210 | if train_loss < train_best:
211 | train_best = train_loss
212 | if test_loss < test_best:
213 | test_best = test_loss
214 | test_cost_best = np.asarray(logits)
215 |
216 | print("epoch:%3d, lr %.5f, train_loss %.6f, test_loss %.6f, speed %.2f seconds/epoch"
217 | % (i + 1, session.run(m_train.lr), train_loss*scale,
218 | test_loss*scale, training_time[i]))
219 | #log_a.post("trainLoss", value=float(train_loss), step=i)
220 | #log_a.post("testLoss", value=float(test_loss), step=i)
221 | if i == FLAGS.epoch - 1:
222 | print("Average %.4f seconds for one epoch" % (np.mean(training_time)))
223 | print("Best train, test loss %.6f %.6f" % (train_best*scale, test_best*scale))
224 | print(test_cost_best.shape)
225 | save_path = "./logits/"+str(FLAGS.model)+"_"+str(FLAGS.output_dim)+".pkl"
226 | with open(save_path,'wb') as output:
227 | pickle.dump(test_cost_best, output)
228 |
229 | print("===============================================================================")
230 | print("The input_dim is", FLAGS.input_dim, "The hidden_dim is", FLAGS.hidden_dim)
231 | print("The output_dim is", FLAGS.output_dim)
232 | print("The keep_prob is", FLAGS.keep_prob, "The batch_size is", FLAGS.batch_size)
233 | print("The model is", FLAGS.model, "The machine is", FLAGS.grid)
234 | print("===============================================================================")
235 |
236 | if __name__ == "__main__":
237 | tf.app.run()
238 |
239 |
--------------------------------------------------------------------------------
/tensorflow/LSTM/grid/test.sh:
--------------------------------------------------------------------------------
1 | output_dim_arr=(6 12 18 24 30 36)
2 | grid_arr=("axp0" "axp7" "sahara" "themis")
3 | for output_dim in ${output_dim_arr[@]}; do
4 | echo "The output_dim is $output_dim"
5 | for grid in ${grid_arr[@]}; do
6 | echo "The grid is $grid"
7 | python main.py --output_dim=$output_dim --grid=$grid >> 0901_2.txt
8 | done
9 | done
10 |
--------------------------------------------------------------------------------
/tensorflow/LSTM/grid/utils_grid.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | def test_dataset(load, win_i, win_o, ahead_step):
4 | load_len = len(load)
5 | load_test_len = int(0.2 * load_len)
6 | load_train_len = int(0.8 * load_len)
7 | load = np.asarray(load)
8 | load -= np.mean(load[:load_train_len])
9 | load_std = np.std(load[:load_train_len])
10 | load /= load_std
11 | model_train_len = 26 * 288 / ahead_step / 8
12 | model_test_len = 2 * 288 / ahead_step + (288 - win_o - win_i) / ahead_step + 1
13 | tr_num = int(load_train_len / (128 * ahead_step * model_train_len)) * 128
14 | te_num = int(load_test_len / (64 * ahead_step * model_test_len)) * 64
15 | print("Unix system train", tr_num, ", test", te_num)
16 | train_start = load_train_len - tr_num * ahead_step * model_train_len
17 | test_start = -load_test_len
18 | X_train = np.asarray([[load[train_start+i*model_train_len*ahead_step+j*ahead_step:
19 | train_start+i*model_train_len*ahead_step+j*ahead_step+win_i]
20 | for j in range(model_train_len)] for i in range(tr_num)])
21 | y_train = np.asarray([[load[train_start+i*model_train_len*ahead_step+j*ahead_step+win_i:
22 | train_start+i*model_train_len*ahead_step+j*ahead_step+win_i+win_o]
23 | for j in range(model_train_len)] for i in range(tr_num)])
24 | X_test = np.asarray([[load[test_start+i*model_test_len*ahead_step+j*ahead_step:
25 | test_start+i*model_test_len*ahead_step+j*ahead_step+win_i]
26 | for j in range(model_test_len)] for i in range(te_num)])
27 | y_test = np.asarray([[load[test_start+i*model_test_len*ahead_step+j*ahead_step+win_i:
28 | test_start+i*model_test_len*ahead_step+j*ahead_step+win_i+win_o]
29 | for j in range(model_test_len)] for i in range(te_num)])
30 | print(X_train.shape, y_train.shape)
31 | print(X_test.shape, y_test.shape)
32 | return X_train, y_train, X_test, y_test, load_std
33 |
--------------------------------------------------------------------------------
/tensorflow/LSTM/grid/utils_grid.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniqueAndys/Host-Load-Prediction-with-LSTM/cb4997a68123caad649175d987dffe0627691db0/tensorflow/LSTM/grid/utils_grid.pyc
--------------------------------------------------------------------------------
/tensorflow/README.md:
--------------------------------------------------------------------------------
1 | # Tensorflow implement
2 |
3 | > Predicting host load with ESN and LSTM model
4 |
5 | - [ESN](./ESN) Echo State Networks
6 | - [LSTM](./LSTM) Long Short-Term Memory
7 |
--------------------------------------------------------------------------------