├── berkshire_err.png
├── berkshire_value.png
├── rnn_tensorboard.png
├── README.md
├── fix.py
├── kospi.py
├── fb.py
├── apple.py
├── ms.py
├── samsung.py
├── amazon.py
├── berkshire.py
├── hyundai_motor.py
├── posco.py
├── sk_hynix.py
├── Alphabet.py
├── korea_electric_power_corporation.py
├── samsung_yahoo.py
└── LICENSE
/berkshire_err.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JKKorea/rnn_stock_predictions/HEAD/berkshire_err.png
--------------------------------------------------------------------------------
/berkshire_value.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JKKorea/rnn_stock_predictions/HEAD/berkshire_value.png
--------------------------------------------------------------------------------
/rnn_tensorboard.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JKKorea/rnn_stock_predictions/HEAD/rnn_tensorboard.png
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # rnn_stock_predictions
2 | data Crawling, Pretreatment, Processing, Training, Model Visualization -> AUTOMATION
3 |
4 | # requirments
5 | - Python 3.5.3
6 | - tensorflow 1.1.0
7 | - Pandas_datareader
8 | - numpy
9 | - matplotlib
10 | - datetime
11 |
12 | # Run
13 | - python apple.py
14 | - python kospi.py
15 | - tensorboard --logdir=./tensorflowlog
16 | - tensorboard
17 | http://localhost:6006/
18 | graphs
19 |
20 | # Model
21 | - RNN + Fully connected layer
22 | - Train : Test = 70 : 30
23 | - Train Period = 2010.1.2~2017.5.27
24 | - Predictions Period = 2017.5.28~2017.6.7
25 | - Real Period = 2017.6.8
26 |
27 | # Results
28 | - Alphabet 0.050(RMSE) 1004.28(Real) 1001.59(Predictions)
29 | - apple 0.020(RMSE) 154.99(Real) 155.37 (Predictions)
30 | - berkshire 0.016(RMSE) 250305(Real) 249621 (Predictions)
31 | - hyundai_motor 0.020(RMSE) 160000(Real) 159000 (Predictions)
32 | - kospi 0.022(RMSE) 2363.57(Real) 2360.14(Predictions)
33 | - samsung_electronics 0.022(RMSE) 2258000(Real) 2265000(Predictions)
34 | - sk_hynix 0.024(RMSE) 56700(Real) 56500(Predictions)
35 | - berkshire
36 |
37 | 
38 | 
39 | 
40 |
41 |
42 |
43 | # Reference
44 | - https://github.com/hunkim/DeepLearningZeroToAll
--------------------------------------------------------------------------------
/fix.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 | import time
5 |
6 | tf.set_random_seed(777) # reproducibility
7 | def MinMaxScaler(data):
8 | numerator = data - np.min(data, 0)
9 | denominator = np.max(data, 0) - np.min(data, 0)
10 | # noise term prevents the zero division
11 | return numerator / (denominator + 1e-7)
12 |
13 | # train Parameters
14 | timesteps = seq_length = 7
15 | data_dim = 5
16 | hidden_dim = 10
17 | output_dim = 1
18 | learing_rate = 0.01
19 | iterations = 500
20 |
21 | # Choose stock
22 | stock = "KRX:KOSPI"
23 |
24 | # start time setting
25 | startTime = time.time()
26 |
27 | # data scrolling parts
28 | from pandas_datareader import data, wb
29 | import datetime
30 |
31 | start = datetime.datetime(2010, 1, 2)
32 | end = datetime.datetime(2017, 7, 14)
33 | df = data.DataReader(
34 | stock, # name
35 | "google", # data source
36 | start, # start
37 | end # end
38 | )
39 |
40 | # Convert pandas dataframe to numpy array
41 | xy = df.as_matrix()
42 |
43 | # Open, High, Low, Volume, Close
44 | test_min = np.min(xy,0)
45 | test_max = np.max(xy,0)
46 | denom = test_max - test_min
47 |
48 | xy = MinMaxScaler(xy)
49 | x = xy
50 | y = xy[:, [-2]] # Close as label
51 |
52 | # build a dataset
53 | dataX = []
54 | dataY = []
55 | for i in range(0, len(y) - seq_length):
56 | _x = x[i:i + seq_length]
57 | _y = y[i + seq_length] # Next close price
58 | # print(_x, "->", _y)
59 | dataX.append(_x)
60 | dataY.append(_y)
61 |
62 | # train/test split
63 | train_size = int(len(dataY) * 0.7)
64 | test_size = len(dataY) - train_size
65 | trainX, testX = np.array(dataX[0:train_size]), np.array(
66 | dataX[train_size:len(dataX)])
67 | trainY, testY = np.array(dataY[0:train_size]), np.array(
68 | dataY[train_size:len(dataY)])
69 |
70 | # input place holders
71 | X = tf.placeholder(tf.float32, [None, seq_length, data_dim], name='input_X')
72 | Y = tf.placeholder(tf.float32, [None, 1], name='intput_Y')
73 |
74 | # build a LSTM network
75 | cell = tf.contrib.rnn.BasicLSTMCell(
76 | num_units=hidden_dim, state_is_tuple=True, activation=tf.tanh)
77 | outputs, _states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
78 |
79 | Y_pred = tf.contrib.layers.fully_connected(
80 | outputs[:, -1], output_dim, activation_fn=None) # We use the last cell's output
81 |
82 | # cost/loss
83 | loss = tf.reduce_sum(tf.square(Y_pred - Y), name='losses_sum') # sum of the squares
84 |
85 | # optimizer
86 | optimizer = tf.train.AdamOptimizer(learing_rate)
87 | train = optimizer.minimize(loss, name='train')
88 |
89 | # RMSE
90 | targets = tf.placeholder(tf.float32, [None, 1], name='targets')
91 | predictions = tf.placeholder(tf.float32, [None, 1], name='predictions')
92 | rmse = tf.sqrt(tf.reduce_mean(tf.square(targets - predictions)), name='rmse')
93 |
94 | with tf.Session() as sess:
95 | init = tf.global_variables_initializer()
96 | sess.run(init)
97 |
98 | # Tensorboard
99 | merged = tf.summary.merge_all()
100 | writer = tf.summary.FileWriter("./tensorflowlog", sess.graph)
101 |
102 | losslist = []
103 | # Training step
104 | for i in range(iterations):
105 | _, step_loss = sess.run([train, loss], feed_dict={
106 | X: trainX, Y: trainY})
107 | print("[step: {}] loss: {}".format(i, step_loss))
108 | losslist = np.append(losslist, step_loss)
109 |
110 | # Test step
111 | test_predict = sess.run(Y_pred, feed_dict={X: testX})
112 | rmse = sess.run(rmse, feed_dict={
113 | targets: testY, predictions: test_predict})
114 | print("RMSE: {}".format(rmse))
115 |
116 | # Print train_size, test_size
117 | print("train_size : {}".format(train_size))
118 | print("test_size : {}".format(test_size))
119 |
120 | # end time setting, print time
121 | elapsedTime = time.time() - startTime
122 | print("it took " + "%.3f"%(elapsedTime) + " s.")
123 |
124 | # Plot losss
125 | plt.figure(1)
126 | plt.plot(losslist, color ="green", label ="Error");
127 | plt.xlabel("Iteration Number")
128 | plt.ylabel("Sum of the Squared Error")
129 | plt.legend(loc='upper right', frameon=False)
130 |
131 | # Plot predictions
132 | plt.figure(2)
133 | plt.plot(testY, color ="red", label ="Real")
134 | plt.plot(test_predict, color ="blue", label ="Prediction")
135 | plt.xlabel("Time Period")
136 | plt.ylabel("Stock Price")
137 | plt.legend(loc='upper left', frameon=False)
138 | plt.xticks([])
139 | plt.yticks([])
140 | plt.show()
--------------------------------------------------------------------------------
/kospi.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 | import time
5 |
6 | tf.set_random_seed(777) # reproducibility
7 |
8 | def MinMaxScaler(data):
9 | numerator = data - np.min(data, 0)
10 | denominator = np.max(data, 0) - np.min(data, 0)
11 | # noise term prevents the zero division
12 | return numerator / (denominator + 1e-7)
13 |
14 | # train Parameters
15 | timesteps = seq_length = 7
16 | data_dim = 5
17 | hidden_dim = 10
18 | output_dim = 1
19 | learing_rate = 0.01
20 | iterations = 500
21 |
22 | # Choose stock
23 | stock = "KRX:KOSPI"
24 |
25 | # start time setting
26 | startTime = time.time()
27 |
28 | # data scrolling parts
29 | from pandas_datareader import data, wb
30 | import datetime
31 |
32 | start = datetime.datetime(2010, 1, 2)
33 | end = datetime.datetime(2017, 5, 27)
34 | df = data.DataReader(
35 | stock, # name
36 | "google", # data source
37 | start, # start
38 | end # end
39 | )
40 |
41 | # Convert pandas dataframe to numpy array
42 | xy = df.as_matrix()
43 |
44 | # Open, High, Low, Volume, Close
45 | test_min = np.min(xy,0)
46 | test_max = np.max(xy,0)
47 | denom = test_max - test_min
48 |
49 | xy = MinMaxScaler(xy)
50 | x = xy
51 | y = xy[:, [-2]] # Close as label
52 |
53 | # Test
54 | start = datetime.datetime(2017, 5, 28)
55 | end = datetime.datetime(2017, 6, 7)
56 | df = data.DataReader(
57 | stock, # name
58 | "google", # data source
59 | start, # start
60 | end # end
61 | )
62 |
63 | test_last_X = df.as_matrix().reshape(1,7,5)
64 | test_last_min = np.min(test_last_X, 0)
65 | test_last_max = np.max(test_last_X, 0)
66 | test_last_denom = test_last_max - test_last_min
67 |
68 | # Real
69 | start = datetime.datetime(2017, 6, 8)
70 | end = datetime.datetime(2017, 6, 8)
71 | df = data.DataReader(
72 | stock, # name
73 | "google", # data source
74 | start, # start
75 | end # end
76 | )
77 |
78 | real = df.as_matrix()
79 |
80 |
81 | # build a dataset
82 | dataX = []
83 | dataY = []
84 | for i in range(0, len(y) - seq_length):
85 | _x = x[i:i + seq_length]
86 | _y = y[i + seq_length] # Next close price
87 | # print(_x, "->", _y)
88 | dataX.append(_x)
89 | dataY.append(_y)
90 |
91 | # train/test split 70 / 30
92 | train_size = int(len(dataY) * 0.7)
93 | test_size = len(dataY) - train_size
94 | trainX, testX = np.array(dataX[0:train_size]), np.array(
95 | dataX[train_size:len(dataX)])
96 | trainY, testY = np.array(dataY[0:train_size]), np.array(
97 | dataY[train_size:len(dataY)])
98 |
99 | # input place holders
100 | X = tf.placeholder(tf.float32, [None, seq_length, data_dim], name='input_X')
101 | Y = tf.placeholder(tf.float32, [None, 1], name='intput_Y')
102 |
103 | # build a LSTM network
104 | cell = tf.contrib.rnn.BasicLSTMCell(
105 | num_units=hidden_dim, state_is_tuple=True, activation=tf.tanh)
106 | outputs, _states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
107 |
108 | Y_pred = tf.contrib.layers.fully_connected(
109 | outputs[:, -1], output_dim, activation_fn=None) # We use the last cell's output
110 |
111 | # cost/loss
112 | loss = tf.reduce_sum(tf.square(Y_pred - Y), name='losses_sum') # sum of the squares
113 |
114 | # optimizer
115 | optimizer = tf.train.AdamOptimizer(learing_rate)
116 | train = optimizer.minimize(loss, name='train')
117 |
118 | # RMSE
119 | targets = tf.placeholder(tf.float32, [None, 1], name='targets')
120 | predictions = tf.placeholder(tf.float32, [None, 1], name='predictions')
121 | rmse = tf.sqrt(tf.reduce_mean(tf.square(targets - predictions)), name='rmse')
122 |
123 | with tf.Session() as sess:
124 | init = tf.global_variables_initializer()
125 | sess.run(init)
126 |
127 | # Tensorboard
128 | merged = tf.summary.merge_all()
129 | writer = tf.summary.FileWriter("./tensorflowlog", sess.graph)
130 |
131 | losslist = [];
132 | # Training step
133 | for i in range(iterations):
134 | _, step_loss = sess.run([train, loss], feed_dict={
135 | X: trainX, Y: trainY})
136 | print("[step: {}] loss: {}".format(i, step_loss))
137 | losslist = np.append(losslist, step_loss)
138 |
139 | # Test step
140 | test_predict = sess.run(Y_pred, feed_dict={X: testX})
141 | rmse = sess.run(rmse, feed_dict={
142 | targets: testY, predictions: test_predict})
143 | print("RMSE: {}".format(rmse))
144 |
145 | # Predictions test
146 | prediction_test = sess.run(Y_pred, feed_dict={X: test_last_X})
147 | print("real ", end='')
148 | print(real[0][-2])
149 |
150 | print("predictions ", end='')
151 | print((prediction_test*test_last_denom + test_last_min)[-1][-2])
152 |
153 | # end time setting, print time
154 | elapsedTime = time.time() - startTime
155 | print("it took " + "%.3f"%(elapsedTime) + " s.")
156 |
157 | # Plot losss
158 | plt.figure(1)
159 | plt.plot(losslist, color ="green", label ="Error");
160 | plt.xlabel("Iteration Number")
161 | plt.ylabel("Sum of the Squarred Error")
162 | plt.legend(loc='upper right', frameon=False)
163 |
164 | # Plot predictions
165 | plt.figure(2)
166 | plt.plot(testY, color ="red", label ="Real")
167 | plt.plot(test_predict, color ="blue", label ="Prediction")
168 | plt.xlabel("Time Period")
169 | plt.ylabel("Stock Price")
170 | plt.legend(loc='upper left', frameon=False)
171 | plt.xticks([])
172 | plt.yticks([])
173 | plt.show()
--------------------------------------------------------------------------------
/fb.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 | import time
5 |
6 | tf.set_random_seed(777) # reproducibility
7 | def MinMaxScaler(data):
8 | numerator = data - np.min(data, 0)
9 | denominator = np.max(data, 0) - np.min(data, 0)
10 | # noise term prevents the zero division
11 | return numerator / (denominator + 1e-7)
12 |
13 | # train Parameters
14 | timesteps = seq_length = 7
15 | data_dim = 5
16 | hidden_dim = 10
17 | output_dim = 1
18 | learing_rate = 0.01
19 | iterations = 500
20 |
21 | # Choose stock
22 | stock = "NASDAQ:FB"
23 |
24 | # start time setting
25 | startTime = time.time()
26 |
27 | # data scrolling parts
28 | from pandas_datareader import data, wb
29 | import datetime
30 |
31 | start = datetime.datetime(2012, 5, 18)
32 | end = datetime.datetime(2017, 7, 14)
33 | df = data.DataReader(
34 | stock, # name
35 | "google", # data source
36 | start, # start
37 | end # end
38 | )
39 |
40 | # Convert pandas dataframe to numpy array
41 | xy = df.as_matrix()
42 |
43 | # Open, High, Low, Volume, Close
44 | test_min = np.min(xy,0)
45 | test_max = np.max(xy,0)
46 | denom = test_max - test_min
47 |
48 | xy = MinMaxScaler(xy)
49 | x = xy
50 | y = xy[:, [-2]] # Close as label
51 |
52 | # data for Prediction
53 | start = datetime.datetime(2017, 7, 18)
54 | end = datetime.datetime(2017, 7, 26)
55 | df = data.DataReader(
56 | stock, # name
57 | "google", # data source
58 | start, # start
59 | end # end
60 | )
61 |
62 | test_last_X = df.as_matrix().reshape(1,7,5);
63 |
64 | test_last_min = np.min(test_last_X, 0)
65 | test_last_max = np.max(test_last_X, 0)
66 | test_last_denom = test_last_max - test_last_min
67 |
68 | # real Prediction data
69 | start = datetime.datetime(2017, 7, 27)
70 | end = datetime.datetime(2017, 7, 27)
71 | df = data.DataReader(
72 | stock, # name
73 | "google", # data source
74 | start, # start
75 | end # end
76 | )
77 |
78 | real_stock = df.as_matrix()
79 |
80 | # build a dataset
81 | dataX = []
82 | dataY = []
83 | for i in range(0, len(y) - seq_length):
84 | _x = x[i:i + seq_length]
85 | _y = y[i + seq_length] # Next close price
86 | # print(_x, "->", _y)
87 | dataX.append(_x)
88 | dataY.append(_y)
89 |
90 | # train/test split 70 / 30
91 | train_size = int(len(dataY) * 0.7)
92 | test_size = len(dataY) - train_size
93 | trainX, testX = np.array(dataX[0:train_size]), np.array(
94 | dataX[train_size:len(dataX)])
95 | trainY, testY = np.array(dataY[0:train_size]), np.array(
96 | dataY[train_size:len(dataY)])
97 |
98 | # input place holders
99 | X = tf.placeholder(tf.float32, [None, seq_length, data_dim], name='input_X')
100 | Y = tf.placeholder(tf.float32, [None, 1], name='intput_Y')
101 |
102 | # build a LSTM network
103 | cell = tf.contrib.rnn.BasicLSTMCell(
104 | num_units=hidden_dim, state_is_tuple=True, activation=tf.tanh)
105 | outputs, _states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
106 |
107 | Y_pred = tf.contrib.layers.fully_connected(
108 | outputs[:, -1], output_dim, activation_fn=None) # We use the last cell's output
109 |
110 | # cost/loss
111 | loss = tf.reduce_sum(tf.square(Y_pred - Y), name='losses_sum') # sum of the squares
112 |
113 | # optimizer
114 | optimizer = tf.train.AdamOptimizer(learing_rate)
115 | train = optimizer.minimize(loss, name='train')
116 |
117 | # RMSE
118 | targets = tf.placeholder(tf.float32, [None, 1], name='targets')
119 | predictions = tf.placeholder(tf.float32, [None, 1], name='predictions')
120 | rmse = tf.sqrt(tf.reduce_mean(tf.square(targets - predictions)), name='rmse')
121 |
122 | with tf.Session() as sess:
123 | init = tf.global_variables_initializer()
124 | sess.run(init)
125 |
126 | # Tensorboard
127 | merged = tf.summary.merge_all()
128 | writer = tf.summary.FileWriter("./tensorflowlog", sess.graph)
129 |
130 | losslist = [];
131 | # Training step
132 | for i in range(iterations):
133 | _, step_loss = sess.run([train, loss], feed_dict={
134 | X: trainX, Y: trainY})
135 | print("[step: {}] loss: {}".format(i, step_loss))
136 | losslist = np.append(losslist, step_loss)
137 |
138 | # Test step
139 | test_predict = sess.run(Y_pred, feed_dict={X: testX})
140 | rmse = sess.run(rmse, feed_dict={
141 | targets: testY, predictions: test_predict})
142 | print("RMSE: {}".format(rmse))
143 |
144 | # Print train_size, test_size
145 | print("train_size : {}".format(train_size))
146 | print("test_size : {}".format(test_size))
147 |
148 | # Predictions test
149 | prediction_test = sess.run(Y_pred, feed_dict={X: test_last_X})
150 | print("real stock price : ", end='')
151 | real_value = real_stock[0][-2]
152 | print(real_value)
153 |
154 | print("prediction stock price : ", end='')
155 | prediction_value = (prediction_test*test_last_denom + test_last_min)[-1][-2]
156 | print(prediction_value)
157 |
158 | print("Error rate : ", end='')
159 | print(abs(prediction_value - real_value)/prediction_value * 100)
160 |
161 | # end time setting, print time
162 | elapsedTime = time.time() - startTime
163 | print("it took " + "%.3f"%(elapsedTime) + " s.")
164 |
165 | # Plot losss
166 | plt.figure(1)
167 | plt.plot(losslist, color ="green", label ="Error");
168 | plt.xlabel("Iteration Number")
169 | plt.ylabel("Sum of the Squarred Error")
170 | plt.legend(loc='upper right', frameon=False)
171 |
172 | # Plot predictions
173 | plt.figure(2)
174 | plt.plot(testY, color ="red", label ="Real")
175 | plt.plot(test_predict, color ="blue", label ="Prediction")
176 | plt.xlabel("Time Period")
177 | plt.ylabel("Stock Price")
178 | plt.legend(loc='upper left', frameon=False)
179 | plt.xticks([])
180 | plt.yticks([])
181 | plt.show()
--------------------------------------------------------------------------------
/apple.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 | import time
5 |
6 | tf.set_random_seed(777) # reproducibility
7 | def MinMaxScaler(data):
8 | numerator = data - np.min(data, 0)
9 | denominator = np.max(data, 0) - np.min(data, 0)
10 | # noise term prevents the zero division
11 | return numerator / (denominator + 1e-7)
12 |
13 | # train Parameters
14 | timesteps = seq_length = 7
15 | data_dim = 5
16 | hidden_dim = 10
17 | output_dim = 1
18 | learing_rate = 0.01
19 | iterations = 500
20 |
21 | # Choose stock
22 | stock = "NASDAQ:AAPL"
23 |
24 | # start time setting
25 | startTime = time.time()
26 |
27 | # data scrolling parts
28 | from pandas_datareader import data, wb
29 | import datetime
30 |
31 | start = datetime.datetime(2010, 1, 2)
32 | end = datetime.datetime(2017, 7, 14)
33 | df = data.DataReader(
34 | stock, # name
35 | "google", # data source
36 | start, # start
37 | end # end
38 | )
39 |
40 | # Convert pandas dataframe to numpy array
41 | xy = df.as_matrix()
42 |
43 | # Open, High, Low, Volume, Close
44 | test_min = np.min(xy,0)
45 | test_max = np.max(xy,0)
46 | denom = test_max - test_min
47 |
48 | xy = MinMaxScaler(xy)
49 | x = xy
50 | y = xy[:, [-2]] # Close as label
51 |
52 | # data for Prediction
53 | start = datetime.datetime(2017, 7, 18)
54 | end = datetime.datetime(2017, 7, 26)
55 | df = data.DataReader(
56 | stock, # name
57 | "google", # data source
58 | start, # start
59 | end # end
60 | )
61 |
62 | test_last_X = df.as_matrix().reshape(1,7,5);
63 |
64 | test_last_min = np.min(test_last_X, 0)
65 | test_last_max = np.max(test_last_X, 0)
66 | test_last_denom = test_last_max - test_last_min
67 |
68 | # real Prediction data
69 | start = datetime.datetime(2017, 7, 27)
70 | end = datetime.datetime(2017, 7, 27)
71 | df = data.DataReader(
72 | stock, # name
73 | "google", # data source
74 | start, # start
75 | end # end
76 | )
77 |
78 | real_stock = df.as_matrix()
79 |
80 | # build a dataset
81 | dataX = []
82 | dataY = []
83 | for i in range(0, len(y) - seq_length):
84 | _x = x[i:i + seq_length]
85 | _y = y[i + seq_length] # Next close price
86 | # print(_x, "->", _y)
87 | dataX.append(_x)
88 | dataY.append(_y)
89 |
90 | # train/test split 70 / 30
91 | train_size = int(len(dataY) * 0.7)
92 | test_size = len(dataY) - train_size
93 | trainX, testX = np.array(dataX[0:train_size]), np.array(
94 | dataX[train_size:len(dataX)])
95 | trainY, testY = np.array(dataY[0:train_size]), np.array(
96 | dataY[train_size:len(dataY)])
97 |
98 | # input place holders
99 | X = tf.placeholder(tf.float32, [None, seq_length, data_dim], name='input_X')
100 | Y = tf.placeholder(tf.float32, [None, 1], name='intput_Y')
101 |
102 | # build a LSTM network
103 | cell = tf.contrib.rnn.BasicLSTMCell(
104 | num_units=hidden_dim, state_is_tuple=True, activation=tf.tanh)
105 | outputs, _states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
106 |
107 | Y_pred = tf.contrib.layers.fully_connected(
108 | outputs[:, -1], output_dim, activation_fn=None) # We use the last cell's output
109 |
110 | # cost/loss
111 | loss = tf.reduce_sum(tf.square(Y_pred - Y), name='losses_sum') # sum of the squares
112 |
113 | # optimizer
114 | optimizer = tf.train.AdamOptimizer(learing_rate)
115 | train = optimizer.minimize(loss, name='train')
116 |
117 | # RMSE
118 | targets = tf.placeholder(tf.float32, [None, 1], name='targets')
119 | predictions = tf.placeholder(tf.float32, [None, 1], name='predictions')
120 | rmse = tf.sqrt(tf.reduce_mean(tf.square(targets - predictions)), name='rmse')
121 |
122 | with tf.Session() as sess:
123 | init = tf.global_variables_initializer()
124 | sess.run(init)
125 |
126 | # Tensorboard
127 | merged = tf.summary.merge_all()
128 | writer = tf.summary.FileWriter("./tensorflowlog", sess.graph)
129 |
130 | losslist = [];
131 | # Training step
132 | for i in range(iterations):
133 | _, step_loss = sess.run([train, loss], feed_dict={
134 | X: trainX, Y: trainY})
135 | print("[step: {}] loss: {}".format(i, step_loss))
136 | losslist = np.append(losslist, step_loss)
137 |
138 | # Test step
139 | test_predict = sess.run(Y_pred, feed_dict={X: testX})
140 | rmse = sess.run(rmse, feed_dict={
141 | targets: testY, predictions: test_predict})
142 | print("RMSE: {}".format(rmse))
143 |
144 | # Print train_size, test_size
145 | print("train_size : {}".format(train_size))
146 | print("test_size : {}".format(test_size))
147 |
148 | # Predictions test
149 | prediction_test = sess.run(Y_pred, feed_dict={X: test_last_X})
150 | print("real stock price : ", end='')
151 | real_value = real_stock[0][-2]
152 | print(real_value)
153 |
154 | print("prediction stock price : ", end='')
155 | prediction_value = (prediction_test*test_last_denom + test_last_min)[-1][-2]
156 | print(prediction_value)
157 |
158 | print("Error rate : ", end='')
159 | print(abs(prediction_value - real_value)/prediction_value * 100)
160 |
161 | # end time setting, print time
162 | elapsedTime = time.time() - startTime
163 | print("it took " + "%.3f"%(elapsedTime) + " s.")
164 |
165 | # Plot losss
166 | plt.figure(1)
167 | plt.plot(losslist, color ="green", label ="Error");
168 | plt.xlabel("Iteration Number")
169 | plt.ylabel("Sum of the Squarred Error")
170 | plt.legend(loc='upper right', frameon=False)
171 |
172 | # Plot predictions
173 | plt.figure(2)
174 | plt.plot(testY, color ="red", label ="Real")
175 | plt.plot(test_predict, color ="blue", label ="Prediction")
176 | plt.xlabel("Time Period")
177 | plt.ylabel("Stock Price")
178 | plt.legend(loc='upper left', frameon=False)
179 | plt.xticks([])
180 | plt.yticks([])
181 | plt.show()
--------------------------------------------------------------------------------
/ms.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 | import time
5 |
6 | tf.set_random_seed(777) # reproducibility
7 | def MinMaxScaler(data):
8 | numerator = data - np.min(data, 0)
9 | denominator = np.max(data, 0) - np.min(data, 0)
10 | # noise term prevents the zero division
11 | return numerator / (denominator + 1e-7)
12 |
13 | # train Parameters
14 | timesteps = seq_length = 7
15 | data_dim = 5
16 | hidden_dim = 10
17 | output_dim = 1
18 | learing_rate = 0.01
19 | iterations = 500
20 |
21 | # Choose stock
22 | stock = "NASDAQ:MSFT"
23 |
24 | # start time setting
25 | startTime = time.time()
26 |
27 | # data scrolling parts
28 | from pandas_datareader import data, wb
29 | import datetime
30 |
31 | start = datetime.datetime(2010, 1, 2)
32 | end = datetime.datetime(2017, 7, 14)
33 | df = data.DataReader(
34 | stock, # name
35 | "google", # data source
36 | start, # start
37 | end # end
38 | )
39 |
40 | # Convert pandas dataframe to numpy array
41 | xy = df.as_matrix()
42 |
43 | # Open, High, Low, Volume, Close
44 | test_min = np.min(xy,0)
45 | test_max = np.max(xy,0)
46 | denom = test_max - test_min
47 |
48 | xy = MinMaxScaler(xy)
49 | x = xy
50 | y = xy[:, [-2]] # Close as label
51 |
52 | # data for Prediction
53 | start = datetime.datetime(2017, 7, 18)
54 | end = datetime.datetime(2017, 7, 26)
55 | df = data.DataReader(
56 | stock, # name
57 | "google", # data source
58 | start, # start
59 | end # end
60 | )
61 |
62 | test_last_X = df.as_matrix().reshape(1,7,5);
63 |
64 | test_last_min = np.min(test_last_X, 0)
65 | test_last_max = np.max(test_last_X, 0)
66 | test_last_denom = test_last_max - test_last_min
67 |
68 | # real Prediction data
69 | start = datetime.datetime(2017, 7, 27)
70 | end = datetime.datetime(2017, 7, 27)
71 | df = data.DataReader(
72 | stock, # name
73 | "google", # data source
74 | start, # start
75 | end # end
76 | )
77 |
78 | real_stock = df.as_matrix()
79 |
80 | # build a dataset
81 | dataX = []
82 | dataY = []
83 | for i in range(0, len(y) - seq_length):
84 | _x = x[i:i + seq_length]
85 | _y = y[i + seq_length] # Next close price
86 | # print(_x, "->", _y)
87 | dataX.append(_x)
88 | dataY.append(_y)
89 |
90 | # train/test split 70 / 30
91 | train_size = int(len(dataY) * 0.7)
92 | test_size = len(dataY) - train_size
93 | trainX, testX = np.array(dataX[0:train_size]), np.array(
94 | dataX[train_size:len(dataX)])
95 | trainY, testY = np.array(dataY[0:train_size]), np.array(
96 | dataY[train_size:len(dataY)])
97 |
98 | # input place holders
99 | X = tf.placeholder(tf.float32, [None, seq_length, data_dim], name='input_X')
100 | Y = tf.placeholder(tf.float32, [None, 1], name='intput_Y')
101 |
102 | # build a LSTM network
103 | cell = tf.contrib.rnn.BasicLSTMCell(
104 | num_units=hidden_dim, state_is_tuple=True, activation=tf.tanh)
105 | outputs, _states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
106 |
107 | Y_pred = tf.contrib.layers.fully_connected(
108 | outputs[:, -1], output_dim, activation_fn=None) # We use the last cell's output
109 |
110 | # cost/loss
111 | loss = tf.reduce_sum(tf.square(Y_pred - Y), name='losses_sum') # sum of the squares
112 |
113 | # optimizer
114 | optimizer = tf.train.AdamOptimizer(learing_rate)
115 | train = optimizer.minimize(loss, name='train')
116 |
117 | # RMSE
118 | targets = tf.placeholder(tf.float32, [None, 1], name='targets')
119 | predictions = tf.placeholder(tf.float32, [None, 1], name='predictions')
120 | rmse = tf.sqrt(tf.reduce_mean(tf.square(targets - predictions)), name='rmse')
121 |
122 | with tf.Session() as sess:
123 | init = tf.global_variables_initializer()
124 | sess.run(init)
125 |
126 | # Tensorboard
127 | merged = tf.summary.merge_all()
128 | writer = tf.summary.FileWriter("./tensorflowlog", sess.graph)
129 |
130 | losslist = [];
131 | # Training step
132 | for i in range(iterations):
133 | _, step_loss = sess.run([train, loss], feed_dict={
134 | X: trainX, Y: trainY})
135 | print("[step: {}] loss: {}".format(i, step_loss))
136 | losslist = np.append(losslist, step_loss)
137 |
138 | # Test step
139 | test_predict = sess.run(Y_pred, feed_dict={X: testX})
140 | rmse = sess.run(rmse, feed_dict={
141 | targets: testY, predictions: test_predict})
142 | print("RMSE: {}".format(rmse))
143 |
144 | # Print train_size, test_size
145 | print("train_size : {}".format(train_size))
146 | print("test_size : {}".format(test_size))
147 |
148 | # Predictions test
149 | prediction_test = sess.run(Y_pred, feed_dict={X: test_last_X})
150 | print("real stock price : ", end='')
151 | real_value = real_stock[0][-2]
152 | print(real_value)
153 |
154 | print("prediction stock price : ", end='')
155 | prediction_value = (prediction_test*test_last_denom + test_last_min)[-1][-2]
156 | print(prediction_value)
157 |
158 | print("Error rate : ", end='')
159 | print(abs(prediction_value - real_value)/prediction_value * 100)
160 |
161 | # end time setting, print time
162 | elapsedTime = time.time() - startTime
163 | print("it took " + "%.3f"%(elapsedTime) + " s.")
164 |
165 | # Plot losss
166 | plt.figure(1)
167 | plt.plot(losslist, color ="green", label ="Error");
168 | plt.xlabel("Iteration Number")
169 | plt.ylabel("Sum of the Squarred Error")
170 | plt.legend(loc='upper right', frameon=False)
171 |
172 | # Plot predictions
173 | plt.figure(2)
174 | plt.plot(testY, color ="red", label ="Real")
175 | plt.plot(test_predict, color ="blue", label ="Prediction")
176 | plt.xlabel("Time Period")
177 | plt.ylabel("Stock Price")
178 | plt.legend(loc='upper left', frameon=False)
179 | plt.xticks([])
180 | plt.yticks([])
181 | plt.show()
--------------------------------------------------------------------------------
/samsung.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 | import time
5 |
6 | tf.set_random_seed(777) # reproducibility
7 | def MinMaxScaler(data):
8 | numerator = data - np.min(data, 0)
9 | denominator = np.max(data, 0) - np.min(data, 0)
10 | # noise term prevents the zero division
11 | return numerator / (denominator + 1e-7)
12 |
13 | # train Parameters
14 | timesteps = seq_length = 7
15 | data_dim = 5
16 | hidden_dim = 10
17 | output_dim = 1
18 | learing_rate = 0.01
19 | iterations = 500
20 |
21 | # Choose stock
22 | stock = "KRX:005930"
23 |
24 | # start time setting
25 | startTime = time.time()
26 |
27 | # data scrolling parts
28 | from pandas_datareader import data, wb
29 | import datetime
30 |
31 | start = datetime.datetime(2010, 1, 2)
32 | end = datetime.datetime(2017, 7, 14)
33 | df = data.DataReader(
34 | stock, # name
35 | "google", # data source
36 | start, # start
37 | end # end
38 | )
39 |
40 | # Convert pandas dataframe to numpy array
41 | xy = df.as_matrix()
42 |
43 | # Open, High, Low, Volume, Close
44 | test_min = np.min(xy,0)
45 | test_max = np.max(xy,0)
46 | denom = test_max - test_min
47 |
48 | xy = MinMaxScaler(xy)
49 | x = xy
50 | y = xy[:, [-2]] # Close as label
51 |
52 | # data for Prediction
53 | start = datetime.datetime(2017, 7, 18)
54 | end = datetime.datetime(2017, 7, 26)
55 | df = data.DataReader(
56 | stock, # name
57 | "google", # data source
58 | start, # start
59 | end # end
60 | )
61 |
62 | test_last_X = df.as_matrix().reshape(1,7,5);
63 |
64 | test_last_min = np.min(test_last_X, 0)
65 | test_last_max = np.max(test_last_X, 0)
66 | test_last_denom = test_last_max - test_last_min
67 |
68 | # real Prediction data
69 | start = datetime.datetime(2017, 7, 27)
70 | end = datetime.datetime(2017, 7, 27)
71 | df = data.DataReader(
72 | stock, # name
73 | "google", # data source
74 | start, # start
75 | end # end
76 | )
77 |
78 | real_stock = df.as_matrix()
79 |
80 | # build a dataset
81 | dataX = []
82 | dataY = []
83 | for i in range(0, len(y) - seq_length):
84 | _x = x[i:i + seq_length]
85 | _y = y[i + seq_length] # Next close price
86 | # print(_x, "->", _y)
87 | dataX.append(_x)
88 | dataY.append(_y)
89 |
90 | # train/test split 70 / 30
91 | train_size = int(len(dataY) * 0.7)
92 | test_size = len(dataY) - train_size
93 | trainX, testX = np.array(dataX[0:train_size]), np.array(
94 | dataX[train_size:len(dataX)])
95 | trainY, testY = np.array(dataY[0:train_size]), np.array(
96 | dataY[train_size:len(dataY)])
97 |
98 | # input place holders
99 | X = tf.placeholder(tf.float32, [None, seq_length, data_dim], name='input_X')
100 | Y = tf.placeholder(tf.float32, [None, 1], name='intput_Y')
101 |
102 | # build a LSTM network
103 | cell = tf.contrib.rnn.BasicLSTMCell(
104 | num_units=hidden_dim, state_is_tuple=True, activation=tf.tanh)
105 | outputs, _states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
106 |
107 | Y_pred = tf.contrib.layers.fully_connected(
108 | outputs[:, -1], output_dim, activation_fn=None) # We use the last cell's output
109 |
110 | # cost/loss
111 | loss = tf.reduce_sum(tf.square(Y_pred - Y), name='losses_sum') # sum of the squares
112 |
113 | # optimizer
114 | optimizer = tf.train.AdamOptimizer(learing_rate)
115 | train = optimizer.minimize(loss, name='train')
116 |
117 | # RMSE
118 | targets = tf.placeholder(tf.float32, [None, 1], name='targets')
119 | predictions = tf.placeholder(tf.float32, [None, 1], name='predictions')
120 | rmse = tf.sqrt(tf.reduce_mean(tf.square(targets - predictions)), name='rmse')
121 |
122 | with tf.Session() as sess:
123 | init = tf.global_variables_initializer()
124 | sess.run(init)
125 |
126 | # Tensorboard
127 | merged = tf.summary.merge_all()
128 | writer = tf.summary.FileWriter("./tensorflowlog", sess.graph)
129 |
130 | losslist = [];
131 | # Training step
132 | for i in range(iterations):
133 | _, step_loss = sess.run([train, loss], feed_dict={
134 | X: trainX, Y: trainY})
135 | print("[step: {}] loss: {}".format(i, step_loss))
136 | losslist = np.append(losslist, step_loss)
137 |
138 | # Test step
139 | test_predict = sess.run(Y_pred, feed_dict={X: testX})
140 | rmse = sess.run(rmse, feed_dict={
141 | targets: testY, predictions: test_predict})
142 | print("RMSE: {}".format(rmse))
143 |
144 | # Print train_size, test_size
145 | print("train_size : {}".format(train_size))
146 | print("test_size : {}".format(test_size))
147 |
148 | # Predictions test
149 | prediction_test = sess.run(Y_pred, feed_dict={X: test_last_X})
150 | print("real stock price : ", end='')
151 | real_value = real_stock[0][-2]
152 | print(real_value)
153 |
154 | print("prediction stock price : ", end='')
155 | prediction_value = (prediction_test*test_last_denom + test_last_min)[-1][-2]
156 | print(prediction_value)
157 |
158 | print("Error rate : ", end='')
159 | print(abs(prediction_value - real_value)/prediction_value * 100)
160 |
161 | # end time setting, print time
162 | elapsedTime = time.time() - startTime
163 | print("it took " + "%.3f"%(elapsedTime) + " s.")
164 |
165 | # Plot losss
166 | plt.figure(1)
167 | plt.plot(losslist, color ="green", label ="Error");
168 | plt.xlabel("Iteration Number")
169 | plt.ylabel("Sum of the Squarred Error")
170 | plt.legend(loc='upper right', frameon=False)
171 |
172 | # Plot predictions
173 | plt.figure(2)
174 | plt.plot(testY, color ="red", label ="Real")
175 | plt.plot(test_predict, color ="blue", label ="Prediction")
176 | plt.xlabel("Time Period")
177 | plt.ylabel("Stock Price")
178 | plt.legend(loc='upper left', frameon=False)
179 | plt.xticks([])
180 | plt.yticks([])
181 | plt.show()
--------------------------------------------------------------------------------
/amazon.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 | import time
5 |
6 | tf.set_random_seed(777) # reproducibility
7 | def MinMaxScaler(data):
8 | numerator = data - np.min(data, 0)
9 | denominator = np.max(data, 0) - np.min(data, 0)
10 | # noise term prevents the zero division
11 | return numerator / (denominator + 1e-7)
12 |
13 | # train Parameters
14 | timesteps = seq_length = 7
15 | data_dim = 5
16 | hidden_dim = 10
17 | output_dim = 1
18 | learing_rate = 0.01
19 | iterations = 500
20 |
21 | # Choose stock
22 | stock = "NASDAQ:AMZN"
23 |
24 | # start time setting
25 | startTime = time.time()
26 |
27 | # data scrolling parts
28 | from pandas_datareader import data, wb
29 | import datetime
30 |
31 | start = datetime.datetime(2010, 1, 2)
32 | end = datetime.datetime(2017, 7, 14)
33 | df = data.DataReader(
34 | stock, # name
35 | "google", # data source
36 | start, # start
37 | end # end
38 | )
39 |
40 | # Convert pandas dataframe to numpy array
41 | xy = df.as_matrix()
42 |
43 | # Open, High, Low, Volume, Close
44 | test_min = np.min(xy,0)
45 | test_max = np.max(xy,0)
46 | denom = test_max - test_min
47 |
48 | xy = MinMaxScaler(xy)
49 | x = xy
50 | y = xy[:, [-2]] # Close as label
51 |
52 | # data for Prediction
53 | start = datetime.datetime(2017, 7, 18)
54 | end = datetime.datetime(2017, 7, 26)
55 | df = data.DataReader(
56 | stock, # name
57 | "google", # data source
58 | start, # start
59 | end # end
60 | )
61 |
62 | test_last_X = df.as_matrix().reshape(1,7,5);
63 |
64 | test_last_min = np.min(test_last_X, 0)
65 | test_last_max = np.max(test_last_X, 0)
66 | test_last_denom = test_last_max - test_last_min
67 |
68 | # real Prediction data
69 | start = datetime.datetime(2017, 7, 27)
70 | end = datetime.datetime(2017, 7, 27)
71 | df = data.DataReader(
72 | stock, # name
73 | "google", # data source
74 | start, # start
75 | end # end
76 | )
77 |
78 | real_stock = df.as_matrix()
79 |
80 | # build a dataset
81 | dataX = []
82 | dataY = []
83 | for i in range(0, len(y) - seq_length):
84 | _x = x[i:i + seq_length]
85 | _y = y[i + seq_length] # Next close price
86 | # print(_x, "->", _y)
87 | dataX.append(_x)
88 | dataY.append(_y)
89 |
90 | # train/test split 70 / 30
91 | train_size = int(len(dataY) * 0.7)
92 | test_size = len(dataY) - train_size
93 | trainX, testX = np.array(dataX[0:train_size]), np.array(
94 | dataX[train_size:len(dataX)])
95 | trainY, testY = np.array(dataY[0:train_size]), np.array(
96 | dataY[train_size:len(dataY)])
97 |
98 | # input place holders
99 | X = tf.placeholder(tf.float32, [None, seq_length, data_dim], name='input_X')
100 | Y = tf.placeholder(tf.float32, [None, 1], name='intput_Y')
101 |
102 | # build a LSTM network
103 | cell = tf.contrib.rnn.BasicLSTMCell(
104 | num_units=hidden_dim, state_is_tuple=True, activation=tf.tanh)
105 | outputs, _states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
106 |
107 | Y_pred = tf.contrib.layers.fully_connected(
108 | outputs[:, -1], output_dim, activation_fn=None) # We use the last cell's output
109 |
110 | # cost/loss
111 | loss = tf.reduce_sum(tf.square(Y_pred - Y), name='losses_sum') # sum of the squares
112 |
113 | # optimizer
114 | optimizer = tf.train.AdamOptimizer(learing_rate)
115 | train = optimizer.minimize(loss, name='train')
116 |
117 | # RMSE
118 | targets = tf.placeholder(tf.float32, [None, 1], name='targets')
119 | predictions = tf.placeholder(tf.float32, [None, 1], name='predictions')
120 | rmse = tf.sqrt(tf.reduce_mean(tf.square(targets - predictions)), name='rmse')
121 |
122 | with tf.Session() as sess:
123 | init = tf.global_variables_initializer()
124 | sess.run(init)
125 |
126 | # Tensorboard
127 | merged = tf.summary.merge_all()
128 | writer = tf.summary.FileWriter("./tensorflowlog", sess.graph)
129 |
130 | losslist = [];
131 | # Training step
132 | for i in range(iterations):
133 | _, step_loss = sess.run([train, loss], feed_dict={
134 | X: trainX, Y: trainY})
135 | print("[step: {}] loss: {}".format(i, step_loss))
136 | losslist = np.append(losslist, step_loss)
137 |
138 | # Test step
139 | test_predict = sess.run(Y_pred, feed_dict={X: testX})
140 | rmse = sess.run(rmse, feed_dict={
141 | targets: testY, predictions: test_predict})
142 | print("RMSE: {}".format(rmse))
143 |
144 | # Print train_size, test_size
145 | print("train_size : {}".format(train_size))
146 | print("test_size : {}".format(test_size))
147 |
148 | # Predictions test
149 | prediction_test = sess.run(Y_pred, feed_dict={X: test_last_X})
150 | print("real stock price : ", end='')
151 | real_value = real_stock[0][-2]
152 | print(real_value)
153 |
154 | print("prediction stock price : ", end='')
155 | prediction_value = (prediction_test*test_last_denom + test_last_min)[-1][-2]
156 | print(prediction_value)
157 |
158 | print("Error rate : ", end='')
159 | print(abs(prediction_value - real_value)/prediction_value * 100)
160 |
161 | # end time setting, print time
162 | elapsedTime = time.time() - startTime
163 | print("it took " + "%.3f"%(elapsedTime) + " s.")
164 |
165 | # Plot losss
166 | plt.figure(1)
167 | plt.plot(losslist, color ="green", label ="Error");
168 | plt.xlabel("Iteration Number")
169 | plt.ylabel("Sum of the Squarred Error")
170 | plt.legend(loc='upper right', frameon=False)
171 |
172 | # Plot predictions
173 | plt.figure(2)
174 | plt.plot(testY, color ="red", label ="Real")
175 | plt.plot(test_predict, color ="blue", label ="Prediction")
176 | plt.xlabel("Time Period")
177 | plt.ylabel("Stock Price")
178 | plt.legend(loc='upper left', frameon=False)
179 | plt.xticks([])
180 | plt.yticks([])
181 | plt.show()
--------------------------------------------------------------------------------
/berkshire.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 | import time
5 |
6 | tf.set_random_seed(777) # reproducibility
7 | def MinMaxScaler(data):
8 | numerator = data - np.min(data, 0)
9 | denominator = np.max(data, 0) - np.min(data, 0)
10 | # noise term prevents the zero division
11 | return numerator / (denominator + 1e-7)
12 |
13 | # train Parameters
14 | timesteps = seq_length = 7
15 | data_dim = 5
16 | hidden_dim = 10
17 | output_dim = 1
18 | learing_rate = 0.01
19 | iterations = 500
20 |
21 | # Choose stock
22 | stock = "NYSE:BRK.A"
23 |
24 | # start time setting
25 | startTime = time.time()
26 |
27 | # data scrolling parts
28 | from pandas_datareader import data, wb
29 | import datetime
30 |
31 | start = datetime.datetime(2010, 1, 2)
32 | end = datetime.datetime(2017, 7, 14)
33 | df = data.DataReader(
34 | stock, # name
35 | "google", # data source
36 | start, # start
37 | end # end
38 | )
39 |
40 | # Convert pandas dataframe to numpy array
41 | xy = df.as_matrix()
42 |
43 | # Open, High, Low, Volume, Close
44 | test_min = np.min(xy,0)
45 | test_max = np.max(xy,0)
46 | denom = test_max - test_min
47 |
48 | xy = MinMaxScaler(xy)
49 | x = xy
50 | y = xy[:, [-2]] # Close as label
51 |
52 | # data for Prediction
53 | start = datetime.datetime(2017, 7, 18)
54 | end = datetime.datetime(2017, 7, 26)
55 | df = data.DataReader(
56 | stock, # name
57 | "google", # data source
58 | start, # start
59 | end # end
60 | )
61 |
62 | test_last_X = df.as_matrix().reshape(1,7,5);
63 |
64 | test_last_min = np.min(test_last_X, 0)
65 | test_last_max = np.max(test_last_X, 0)
66 | test_last_denom = test_last_max - test_last_min
67 |
68 | # real Prediction data
69 | start = datetime.datetime(2017, 7, 27)
70 | end = datetime.datetime(2017, 7, 27)
71 | df = data.DataReader(
72 | stock, # name
73 | "google", # data source
74 | start, # start
75 | end # end
76 | )
77 |
78 | real_stock = df.as_matrix()
79 |
80 | # build a dataset
81 | dataX = []
82 | dataY = []
83 | for i in range(0, len(y) - seq_length):
84 | _x = x[i:i + seq_length]
85 | _y = y[i + seq_length] # Next close price
86 | # print(_x, "->", _y)
87 | dataX.append(_x)
88 | dataY.append(_y)
89 |
90 | # train/test split 70 / 30
91 | train_size = int(len(dataY) * 0.7)
92 | test_size = len(dataY) - train_size
93 | trainX, testX = np.array(dataX[0:train_size]), np.array(
94 | dataX[train_size:len(dataX)])
95 | trainY, testY = np.array(dataY[0:train_size]), np.array(
96 | dataY[train_size:len(dataY)])
97 |
98 | # input place holders
99 | X = tf.placeholder(tf.float32, [None, seq_length, data_dim], name='input_X')
100 | Y = tf.placeholder(tf.float32, [None, 1], name='intput_Y')
101 |
102 | # build a LSTM network
103 | cell = tf.contrib.rnn.BasicLSTMCell(
104 | num_units=hidden_dim, state_is_tuple=True, activation=tf.tanh)
105 | outputs, _states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
106 |
107 | Y_pred = tf.contrib.layers.fully_connected(
108 | outputs[:, -1], output_dim, activation_fn=None) # We use the last cell's output
109 |
110 | # cost/loss
111 | loss = tf.reduce_sum(tf.square(Y_pred - Y), name='losses_sum') # sum of the squares
112 |
113 | # optimizer
114 | optimizer = tf.train.AdamOptimizer(learing_rate)
115 | train = optimizer.minimize(loss, name='train')
116 |
117 | # RMSE
118 | targets = tf.placeholder(tf.float32, [None, 1], name='targets')
119 | predictions = tf.placeholder(tf.float32, [None, 1], name='predictions')
120 | rmse = tf.sqrt(tf.reduce_mean(tf.square(targets - predictions)), name='rmse')
121 |
122 | with tf.Session() as sess:
123 | init = tf.global_variables_initializer()
124 | sess.run(init)
125 |
126 | # Tensorboard
127 | merged = tf.summary.merge_all()
128 | writer = tf.summary.FileWriter("./tensorflowlog", sess.graph)
129 |
130 | losslist = [];
131 | # Training step
132 | for i in range(iterations):
133 | _, step_loss = sess.run([train, loss], feed_dict={
134 | X: trainX, Y: trainY})
135 | print("[step: {}] loss: {}".format(i, step_loss))
136 | losslist = np.append(losslist, step_loss)
137 |
138 | # Test step
139 | test_predict = sess.run(Y_pred, feed_dict={X: testX})
140 | rmse = sess.run(rmse, feed_dict={
141 | targets: testY, predictions: test_predict})
142 | print("RMSE: {}".format(rmse))
143 |
144 | # Print train_size, test_size
145 | print("train_size : {}".format(train_size))
146 | print("test_size : {}".format(test_size))
147 |
148 | # Predictions test
149 | prediction_test = sess.run(Y_pred, feed_dict={X: test_last_X})
150 | print("real stock price : ", end='')
151 | real_value = real_stock[0][-2]
152 | print(real_value)
153 |
154 | print("prediction stock price : ", end='')
155 | prediction_value = (prediction_test*test_last_denom + test_last_min)[-1][-2]
156 | print(prediction_value)
157 |
158 | print("Error rate : ", end='')
159 | print(abs(prediction_value - real_value)/prediction_value * 100)
160 |
161 | # end time setting, print time
162 | elapsedTime = time.time() - startTime
163 | print("it took " + "%.3f"%(elapsedTime) + " s.")
164 |
165 | # Plot losss
166 | plt.figure(1)
167 | plt.plot(losslist, color ="green", label ="Error");
168 | plt.xlabel("Iteration Number")
169 | plt.ylabel("Sum of the Squarred Error")
170 | plt.legend(loc='upper right', frameon=False)
171 |
172 | # Plot predictions
173 | plt.figure(2)
174 | plt.plot(testY, color ="red", label ="Real")
175 | plt.plot(test_predict, color ="blue", label ="Prediction")
176 | plt.xlabel("Time Period")
177 | plt.ylabel("Stock Price")
178 | plt.legend(loc='upper left', frameon=False)
179 | plt.xticks([])
180 | plt.yticks([])
181 | plt.show()
--------------------------------------------------------------------------------
/hyundai_motor.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 | import time
5 |
6 | tf.set_random_seed(777) # reproducibility
7 | def MinMaxScaler(data):
8 | numerator = data - np.min(data, 0)
9 | denominator = np.max(data, 0) - np.min(data, 0)
10 | # noise term prevents the zero division
11 | return numerator / (denominator + 1e-7)
12 |
13 | # train Parameters
14 | timesteps = seq_length = 7
15 | data_dim = 5
16 | hidden_dim = 10
17 | output_dim = 1
18 | learing_rate = 0.01
19 | iterations = 500
20 |
21 | # Choose stock
22 | stock = "KRX:005380"
23 |
24 | # start time setting
25 | startTime = time.time()
26 |
27 | # data scrolling parts
28 | from pandas_datareader import data, wb
29 | import datetime
30 |
31 | start = datetime.datetime(2010, 1, 2)
32 | end = datetime.datetime(2017, 7, 14)
33 | df = data.DataReader(
34 | stock, # name
35 | "google", # data source
36 | start, # start
37 | end # end
38 | )
39 |
40 | # Convert pandas dataframe to numpy array
41 | xy = df.as_matrix()
42 |
43 | # Open, High, Low, Volume, Close
44 | test_min = np.min(xy,0)
45 | test_max = np.max(xy,0)
46 | denom = test_max - test_min
47 |
48 | xy = MinMaxScaler(xy)
49 | x = xy
50 | y = xy[:, [-2]] # Close as label
51 |
52 | # data for Prediction
53 | start = datetime.datetime(2017, 7, 18)
54 | end = datetime.datetime(2017, 7, 26)
55 | df = data.DataReader(
56 | stock, # name
57 | "google", # data source
58 | start, # start
59 | end # end
60 | )
61 |
62 | test_last_X = df.as_matrix().reshape(1,7,5);
63 |
64 | test_last_min = np.min(test_last_X, 0)
65 | test_last_max = np.max(test_last_X, 0)
66 | test_last_denom = test_last_max - test_last_min
67 |
68 | # real Prediction data
69 | start = datetime.datetime(2017, 7, 27)
70 | end = datetime.datetime(2017, 7, 27)
71 | df = data.DataReader(
72 | stock, # name
73 | "google", # data source
74 | start, # start
75 | end # end
76 | )
77 |
78 | real_stock = df.as_matrix()
79 |
80 | # build a dataset
81 | dataX = []
82 | dataY = []
83 | for i in range(0, len(y) - seq_length):
84 | _x = x[i:i + seq_length]
85 | _y = y[i + seq_length] # Next close price
86 | # print(_x, "->", _y)
87 | dataX.append(_x)
88 | dataY.append(_y)
89 |
90 | # train/test split 70 / 30
91 | train_size = int(len(dataY) * 0.7)
92 | test_size = len(dataY) - train_size
93 | trainX, testX = np.array(dataX[0:train_size]), np.array(
94 | dataX[train_size:len(dataX)])
95 | trainY, testY = np.array(dataY[0:train_size]), np.array(
96 | dataY[train_size:len(dataY)])
97 |
98 | # input place holders
99 | X = tf.placeholder(tf.float32, [None, seq_length, data_dim], name='input_X')
100 | Y = tf.placeholder(tf.float32, [None, 1], name='intput_Y')
101 |
102 | # build a LSTM network
103 | cell = tf.contrib.rnn.BasicLSTMCell(
104 | num_units=hidden_dim, state_is_tuple=True, activation=tf.tanh)
105 | outputs, _states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
106 |
107 | Y_pred = tf.contrib.layers.fully_connected(
108 | outputs[:, -1], output_dim, activation_fn=None) # We use the last cell's output
109 |
110 | # cost/loss
111 | loss = tf.reduce_sum(tf.square(Y_pred - Y), name='losses_sum') # sum of the squares
112 |
113 | # optimizer
114 | optimizer = tf.train.AdamOptimizer(learing_rate)
115 | train = optimizer.minimize(loss, name='train')
116 |
117 | # RMSE
118 | targets = tf.placeholder(tf.float32, [None, 1], name='targets')
119 | predictions = tf.placeholder(tf.float32, [None, 1], name='predictions')
120 | rmse = tf.sqrt(tf.reduce_mean(tf.square(targets - predictions)), name='rmse')
121 |
122 | with tf.Session() as sess:
123 | init = tf.global_variables_initializer()
124 | sess.run(init)
125 |
126 | # Tensorboard
127 | merged = tf.summary.merge_all()
128 | writer = tf.summary.FileWriter("./tensorflowlog", sess.graph)
129 |
130 | losslist = [];
131 | # Training step
132 | for i in range(iterations):
133 | _, step_loss = sess.run([train, loss], feed_dict={
134 | X: trainX, Y: trainY})
135 | print("[step: {}] loss: {}".format(i, step_loss))
136 | losslist = np.append(losslist, step_loss)
137 |
138 | # Test step
139 | test_predict = sess.run(Y_pred, feed_dict={X: testX})
140 | rmse = sess.run(rmse, feed_dict={
141 | targets: testY, predictions: test_predict})
142 | print("RMSE: {}".format(rmse))
143 |
144 | # Print train_size, test_size
145 | print("train_size : {}".format(train_size))
146 | print("test_size : {}".format(test_size))
147 |
148 | # Predictions test
149 | prediction_test = sess.run(Y_pred, feed_dict={X: test_last_X})
150 | print("real stock price : ", end='')
151 | real_value = real_stock[0][-2]
152 | print(real_value)
153 |
154 | print("prediction stock price : ", end='')
155 | prediction_value = (prediction_test*test_last_denom + test_last_min)[-1][-2]
156 | print(prediction_value)
157 |
158 | print("Error rate : ", end='')
159 | print(abs(prediction_value - real_value)/prediction_value * 100)
160 |
161 | # end time setting, print time
162 | elapsedTime = time.time() - startTime
163 | print("it took " + "%.3f"%(elapsedTime) + " s.")
164 |
165 | # Plot losss
166 | plt.figure(1)
167 | plt.plot(losslist, color ="green", label ="Error");
168 | plt.xlabel("Iteration Number")
169 | plt.ylabel("Sum of the Squared Error")
170 | plt.legend(loc='upper right', frameon=False)
171 |
172 | # Plot predictions
173 | plt.figure(2)
174 | plt.plot(testY, color ="red", label ="Real")
175 | plt.plot(test_predict, color ="blue", label ="Prediction")
176 | plt.xlabel("Time Period")
177 | plt.ylabel("Stock Price")
178 | plt.legend(loc='upper left', frameon=False)
179 | plt.xticks([])
180 | plt.yticks([])
181 | plt.show()
--------------------------------------------------------------------------------
/posco.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 | import time
5 |
6 | tf.set_random_seed(777) # reproducibility
7 | def MinMaxScaler(data):
8 | numerator = data - np.min(data, 0)
9 | denominator = np.max(data, 0) - np.min(data, 0)
10 | # noise term prevents the zero division
11 | return numerator / (denominator + 1e-7)
12 |
13 | # train Parameters
14 | timesteps = seq_length = 7
15 | data_dim = 5
16 | hidden_dim = 10
17 | output_dim = 1
18 | learing_rate = 0.01
19 | iterations = 500
20 |
21 | # Choose stock
22 | stock = "KRX:005490"
23 |
24 | # start time setting
25 | startTime = time.time()
26 |
27 | # data scrolling parts
28 | from pandas_datareader import data, wb
29 | import datetime
30 |
31 | start = datetime.datetime(2010, 1, 2)
32 | end = datetime.datetime(2017, 7, 14)
33 | df = data.DataReader(
34 | stock, # name
35 | "google", # data source
36 | start, # start
37 | end # end
38 | )
39 |
40 | # Convert pandas dataframe to numpy array
41 | xy = df.as_matrix()
42 |
43 | # Open, High, Low, Volume, Close
44 | test_min = np.min(xy,0)
45 | test_max = np.max(xy,0)
46 | denom = test_max - test_min
47 |
48 | xy = MinMaxScaler(xy)
49 | x = xy
50 | y = xy[:, [-2]] # Close as label
51 |
52 | # data for Prediction
53 | start = datetime.datetime(2017, 7, 18)
54 | end = datetime.datetime(2017, 7, 26)
55 | df = data.DataReader(
56 | stock, # name
57 | "google", # data source
58 | start, # start
59 | end # end
60 | )
61 |
62 | test_last_X = df.as_matrix().reshape(1,7,5);
63 |
64 | test_last_min = np.min(test_last_X, 0)
65 | test_last_max = np.max(test_last_X, 0)
66 | test_last_denom = test_last_max - test_last_min
67 |
68 | # real Prediction data
69 | start = datetime.datetime(2017, 7, 27)
70 | end = datetime.datetime(2017, 7, 27)
71 | df = data.DataReader(
72 | stock, # name
73 | "google", # data source
74 | start, # start
75 | end # end
76 | )
77 |
78 | real_stock = df.as_matrix()
79 |
80 | # build a dataset
81 | dataX = []
82 | dataY = []
83 | for i in range(0, len(y) - seq_length):
84 | _x = x[i:i + seq_length]
85 | _y = y[i + seq_length] # Next close price
86 | # print(_x, "->", _y)
87 | dataX.append(_x)
88 | dataY.append(_y)
89 |
90 | # train/test split 70 / 30
91 | train_size = int(len(dataY) * 0.7)
92 | test_size = len(dataY) - train_size
93 | trainX, testX = np.array(dataX[0:train_size]), np.array(
94 | dataX[train_size:len(dataX)])
95 | trainY, testY = np.array(dataY[0:train_size]), np.array(
96 | dataY[train_size:len(dataY)])
97 |
98 | # input place holders
99 | X = tf.placeholder(tf.float32, [None, seq_length, data_dim], name='input_X')
100 | Y = tf.placeholder(tf.float32, [None, 1], name='intput_Y')
101 |
102 | # build a LSTM network
103 | cell = tf.contrib.rnn.BasicLSTMCell(
104 | num_units=hidden_dim, state_is_tuple=True, activation=tf.tanh)
105 | outputs, _states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
106 |
107 | Y_pred = tf.contrib.layers.fully_connected(
108 | outputs[:, -1], output_dim, activation_fn=None) # We use the last cell's output
109 |
110 | # cost/loss
111 | loss = tf.reduce_sum(tf.square(Y_pred - Y), name='losses_sum') # sum of the squares
112 |
113 | # optimizer
114 | optimizer = tf.train.AdamOptimizer(learing_rate)
115 | train = optimizer.minimize(loss, name='train')
116 |
117 | # RMSE
118 | targets = tf.placeholder(tf.float32, [None, 1], name='targets')
119 | predictions = tf.placeholder(tf.float32, [None, 1], name='predictions')
120 | rmse = tf.sqrt(tf.reduce_mean(tf.square(targets - predictions)), name='rmse')
121 |
122 | with tf.Session() as sess:
123 | init = tf.global_variables_initializer()
124 | sess.run(init)
125 |
126 | # Tensorboard
127 | merged = tf.summary.merge_all()
128 | writer = tf.summary.FileWriter("./tensorflowlog", sess.graph)
129 |
130 | losslist = [];
131 | # Training step
132 | for i in range(iterations):
133 | _, step_loss = sess.run([train, loss], feed_dict={
134 | X: trainX, Y: trainY})
135 | print("[step: {}] loss: {}".format(i, step_loss))
136 | losslist = np.append(losslist, step_loss)
137 |
138 | # Test step
139 | test_predict = sess.run(Y_pred, feed_dict={X: testX})
140 | rmse = sess.run(rmse, feed_dict={
141 | targets: testY, predictions: test_predict})
142 | print("RMSE: {}".format(rmse))
143 |
144 | # Print train_size, test_size
145 | print("train_size : {}".format(train_size))
146 | print("test_size : {}".format(test_size))
147 |
148 | # Predictions test
149 | prediction_test = sess.run(Y_pred, feed_dict={X: test_last_X})
150 | print("real stock price : ", end='')
151 | real_value = real_stock[0][-2]
152 | print(real_value)
153 |
154 | print("prediction stock price : ", end='')
155 | prediction_value = (prediction_test*test_last_denom + test_last_min)[-1][-2]
156 | print(prediction_value)
157 |
158 | print("Error rate : ", end='')
159 | print(abs(prediction_value - real_value)/prediction_value * 100)
160 |
161 | # end time setting, print time
162 | elapsedTime = time.time() - startTime
163 | print("it took " + "%.3f"%(elapsedTime) + " s.")
164 |
165 | # Plot losss
166 | plt.figure(1)
167 | plt.plot(losslist, color ="green", label ="Error");
168 | plt.xlabel("Iteration Number")
169 | plt.ylabel("Sum of the Squarred Error")
170 | plt.legend(loc='upper right', frameon=False)
171 |
172 | # Plot predictions
173 | plt.figure(2)
174 | plt.plot(testY, color ="red", label ="Real")
175 | plt.plot(test_predict, color ="blue", label ="Prediction")
176 | plt.xlabel("Time Period")
177 | plt.ylabel("Stock Price")
178 | plt.legend(loc='upper left', frameon=False)
179 | plt.xticks([])
180 | plt.yticks([])
181 | plt.show()
--------------------------------------------------------------------------------
/sk_hynix.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 | import time
5 |
6 | tf.set_random_seed(777) # reproducibility
7 | def MinMaxScaler(data):
8 | numerator = data - np.min(data, 0)
9 | denominator = np.max(data, 0) - np.min(data, 0)
10 | # noise term prevents the zero division
11 | return numerator / (denominator + 1e-7)
12 |
13 | # train Parameters
14 | timesteps = seq_length = 7
15 | data_dim = 5
16 | hidden_dim = 10
17 | output_dim = 1
18 | learing_rate = 0.01
19 | iterations = 500
20 |
21 | # Choose stock
22 | stock = "KRX:000660"
23 |
24 | # start time setting
25 | startTime = time.time()
26 |
27 | # data scrolling parts
28 | from pandas_datareader import data, wb
29 | import datetime
30 |
31 | start = datetime.datetime(2010, 1, 2)
32 | end = datetime.datetime(2017, 7, 14)
33 | df = data.DataReader(
34 | stock, # name
35 | "google", # data source
36 | start, # start
37 | end # end
38 | )
39 |
40 | # Convert pandas dataframe to numpy array
41 | xy = df.as_matrix()
42 |
43 | # Open, High, Low, Volume, Close
44 | test_min = np.min(xy,0)
45 | test_max = np.max(xy,0)
46 | denom = test_max - test_min
47 |
48 | xy = MinMaxScaler(xy)
49 | x = xy
50 | y = xy[:, [-2]] # Close as label
51 |
52 | # data for Prediction
53 | start = datetime.datetime(2017, 7, 18)
54 | end = datetime.datetime(2017, 7, 26)
55 | df = data.DataReader(
56 | stock, # name
57 | "google", # data source
58 | start, # start
59 | end # end
60 | )
61 |
62 | test_last_X = df.as_matrix().reshape(1,7,5);
63 |
64 | test_last_min = np.min(test_last_X, 0)
65 | test_last_max = np.max(test_last_X, 0)
66 | test_last_denom = test_last_max - test_last_min
67 |
68 | # real Prediction data
69 | start = datetime.datetime(2017, 7, 27)
70 | end = datetime.datetime(2017, 7, 27)
71 | df = data.DataReader(
72 | stock, # name
73 | "google", # data source
74 | start, # start
75 | end # end
76 | )
77 |
78 | real_stock = df.as_matrix()
79 |
80 | # build a dataset
81 | dataX = []
82 | dataY = []
83 | for i in range(0, len(y) - seq_length):
84 | _x = x[i:i + seq_length]
85 | _y = y[i + seq_length] # Next close price
86 | # print(_x, "->", _y)
87 | dataX.append(_x)
88 | dataY.append(_y)
89 |
90 | # train/test split 70 / 30
91 | train_size = int(len(dataY) * 0.7)
92 | test_size = len(dataY) - train_size
93 | trainX, testX = np.array(dataX[0:train_size]), np.array(
94 | dataX[train_size:len(dataX)])
95 | trainY, testY = np.array(dataY[0:train_size]), np.array(
96 | dataY[train_size:len(dataY)])
97 |
98 | # input place holders
99 | X = tf.placeholder(tf.float32, [None, seq_length, data_dim], name='input_X')
100 | Y = tf.placeholder(tf.float32, [None, 1], name='intput_Y')
101 |
102 | # build a LSTM network
103 | cell = tf.contrib.rnn.BasicLSTMCell(
104 | num_units=hidden_dim, state_is_tuple=True, activation=tf.tanh)
105 | outputs, _states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
106 |
107 | Y_pred = tf.contrib.layers.fully_connected(
108 | outputs[:, -1], output_dim, activation_fn=None) # We use the last cell's output
109 |
110 | # cost/loss
111 | loss = tf.reduce_sum(tf.square(Y_pred - Y), name='losses_sum') # sum of the squares
112 |
113 | # optimizer
114 | optimizer = tf.train.AdamOptimizer(learing_rate)
115 | train = optimizer.minimize(loss, name='train')
116 |
117 | # RMSE
118 | targets = tf.placeholder(tf.float32, [None, 1], name='targets')
119 | predictions = tf.placeholder(tf.float32, [None, 1], name='predictions')
120 | rmse = tf.sqrt(tf.reduce_mean(tf.square(targets - predictions)), name='rmse')
121 |
122 | with tf.Session() as sess:
123 | init = tf.global_variables_initializer()
124 | sess.run(init)
125 |
126 | # Tensorboard
127 | merged = tf.summary.merge_all()
128 | writer = tf.summary.FileWriter("./tensorflowlog", sess.graph)
129 |
130 | losslist = [];
131 | # Training step
132 | for i in range(iterations):
133 | _, step_loss = sess.run([train, loss], feed_dict={
134 | X: trainX, Y: trainY})
135 | print("[step: {}] loss: {}".format(i, step_loss))
136 | losslist = np.append(losslist, step_loss)
137 |
138 | # Test step
139 | test_predict = sess.run(Y_pred, feed_dict={X: testX})
140 | rmse = sess.run(rmse, feed_dict={
141 | targets: testY, predictions: test_predict})
142 | print("RMSE: {}".format(rmse))
143 |
144 | # Print train_size, test_size
145 | print("train_size : {}".format(train_size))
146 | print("test_size : {}".format(test_size))
147 |
148 | # Predictions test
149 | prediction_test = sess.run(Y_pred, feed_dict={X: test_last_X})
150 | print("real stock price : ", end='')
151 | real_value = real_stock[0][-2]
152 | print(real_value)
153 |
154 | print("prediction stock price : ", end='')
155 | prediction_value = (prediction_test*test_last_denom + test_last_min)[-1][-2]
156 | print(prediction_value)
157 |
158 | print("Error rate : ", end='')
159 | print(abs(prediction_value - real_value)/prediction_value * 100)
160 |
161 | # end time setting, print time
162 | elapsedTime = time.time() - startTime
163 | print("it took " + "%.3f"%(elapsedTime) + " s.")
164 |
165 | # Plot losss
166 | plt.figure(1)
167 | plt.plot(losslist, color ="green", label ="Error");
168 | plt.xlabel("Iteration Number")
169 | plt.ylabel("Sum of the Squarred Error")
170 | plt.legend(loc='upper right', frameon=False)
171 |
172 | # Plot predictions
173 | plt.figure(2)
174 | plt.plot(testY, color ="red", label ="Real")
175 | plt.plot(test_predict, color ="blue", label ="Prediction")
176 | plt.xlabel("Time Period")
177 | plt.ylabel("Stock Price")
178 | plt.legend(loc='upper left', frameon=False)
179 | plt.xticks([])
180 | plt.yticks([])
181 | plt.show()
--------------------------------------------------------------------------------
/Alphabet.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 | import time
5 |
6 | tf.set_random_seed(777) # reproducibility
7 | def MinMaxScaler(data):
8 | numerator = data - np.min(data, 0)
9 | denominator = np.max(data, 0) - np.min(data, 0)
10 | # noise term prevents the zero division
11 | return numerator / (denominator + 1e-7)
12 |
13 | # train Parameters
14 | timesteps = seq_length = 7
15 | data_dim = 5
16 | hidden_dim = 10
17 | output_dim = 1
18 | learing_rate = 0.01
19 | iterations = 500
20 |
21 | # Choose stock
22 | stock = "NASDAQ:GOOGL"
23 |
24 | # start time setting
25 | startTime = time.time()
26 |
27 | # data scrolling parts
28 | from pandas_datareader import data, wb
29 | import datetime
30 |
31 | start = datetime.datetime(2010, 1, 2)
32 | end = datetime.datetime(2017, 7, 14)
33 | df = data.DataReader(
34 | stock, # name
35 | "google", # data source
36 | start, # start
37 | end # end
38 | )
39 |
40 | # Convert pandas dataframe to numpy array
41 | xy = df.as_matrix()
42 |
43 | # Open, High, Low, Volume, Close
44 | test_min = np.min(xy,0)
45 | test_max = np.max(xy,0)
46 | denom = test_max - test_min
47 |
48 | xy = MinMaxScaler(xy)
49 | x = xy
50 | y = xy[:, [-2]] # Close as label
51 |
52 | # data for Prediction
53 | start = datetime.datetime(2017, 7, 18)
54 | end = datetime.datetime(2017, 7, 26)
55 | df = data.DataReader(
56 | stock, # name
57 | "google", # data source
58 | start, # start
59 | end # end
60 | )
61 |
62 | test_last_X = df.as_matrix().reshape(1,7,5);
63 |
64 | test_last_min = np.min(test_last_X, 0)
65 | test_last_max = np.max(test_last_X, 0)
66 | test_last_denom = test_last_max - test_last_min
67 |
68 | # real Prediction data
69 | start = datetime.datetime(2017, 7, 27)
70 | end = datetime.datetime(2017, 7, 27)
71 | df = data.DataReader(
72 | stock, # name
73 | "google", # data source
74 | start, # start
75 | end # end
76 | )
77 |
78 | real_stock = df.as_matrix()
79 |
80 | # build a dataset
81 | dataX = []
82 | dataY = []
83 | for i in range(0, len(y) - seq_length):
84 | _x = x[i:i + seq_length]
85 | _y = y[i + seq_length] # Next close price
86 | # print(_x, "->", _y)
87 | dataX.append(_x)
88 | dataY.append(_y)
89 |
90 | # train/test split 70 / 30
91 | train_size = int(len(dataY) * 0.7)
92 | test_size = len(dataY) - train_size
93 | trainX, testX = np.array(dataX[0:train_size]), np.array(
94 | dataX[train_size:len(dataX)])
95 | trainY, testY = np.array(dataY[0:train_size]), np.array(
96 | dataY[train_size:len(dataY)])
97 |
98 | # input place holders
99 | X = tf.placeholder(tf.float32, [None, seq_length, data_dim], name='input_X')
100 | Y = tf.placeholder(tf.float32, [None, 1], name='intput_Y')
101 |
102 | # build a LSTM network
103 | cell = tf.contrib.rnn.BasicLSTMCell(
104 | num_units=hidden_dim, state_is_tuple=True, activation=tf.tanh)
105 | outputs, _states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
106 |
107 | Y_pred = tf.contrib.layers.fully_connected(
108 | outputs[:, -1], output_dim, activation_fn=None) # We use the last cell's output
109 |
110 | # cost/loss
111 | loss = tf.reduce_sum(tf.square(Y_pred - Y), name='losses_sum') # sum of the squares
112 |
113 | # optimizer
114 | optimizer = tf.train.AdamOptimizer(learing_rate)
115 | train = optimizer.minimize(loss, name='train')
116 |
117 | # RMSE
118 | targets = tf.placeholder(tf.float32, [None, 1], name='targets')
119 | predictions = tf.placeholder(tf.float32, [None, 1], name='predictions')
120 | rmse = tf.sqrt(tf.reduce_mean(tf.square(targets - predictions)), name='rmse')
121 |
122 | with tf.Session() as sess:
123 | init = tf.global_variables_initializer()
124 | sess.run(init)
125 |
126 | # Tensorboard
127 | merged = tf.summary.merge_all()
128 | writer = tf.summary.FileWriter("./tensorflowlog", sess.graph)
129 |
130 | losslist = [];
131 | # Training step
132 | for i in range(iterations):
133 | _, step_loss = sess.run([train, loss], feed_dict={
134 | X: trainX, Y: trainY})
135 | print("[step: {}] loss: {}".format(i, step_loss))
136 | losslist = np.append(losslist, step_loss)
137 |
138 | # Test step
139 | test_predict = sess.run(Y_pred, feed_dict={X: testX})
140 | rmse = sess.run(rmse, feed_dict={
141 | targets: testY, predictions: test_predict})
142 | print("RMSE: {}".format(rmse))
143 |
144 | # Print train_size, test_size
145 | print("train_size : {}".format(train_size))
146 | print("test_size : {}".format(test_size))
147 |
148 | # Predictions test
149 | prediction_test = sess.run(Y_pred, feed_dict={X: test_last_X})
150 | print("real stock price : ", end='')
151 | real_value = real_stock[0][-2]
152 | print(real_value)
153 |
154 | print("prediction stock price : ", end='')
155 | prediction_value = (prediction_test*test_last_denom + test_last_min)[-1][-2]
156 | print(prediction_value)
157 |
158 | print("Error rate : ", end='')
159 | print(abs(prediction_value - real_value)/prediction_value * 100)
160 |
161 | # end time setting, print time
162 | elapsedTime = time.time() - startTime
163 | print("it took " + "%.3f"%(elapsedTime) + " s.")
164 |
165 | # Plot losss
166 | plt.figure(1)
167 | plt.plot(losslist, color ="green", label ="Error");
168 | plt.xlabel("Iteration Number")
169 | plt.ylabel("Sum of the Squarred Error")
170 | plt.legend(loc='upper right', frameon=False)
171 |
172 | # Plot predictions
173 | plt.figure(2)
174 | plt.plot(testY, color ="red", label ="Real")
175 | plt.plot(test_predict, color ="blue", label ="Prediction")
176 | plt.xlabel("Time Period")
177 | plt.ylabel("Stock Price")
178 | plt.legend(loc='upper left', frameon=False)
179 | plt.xticks([])
180 | plt.yticks([])
181 | plt.show()
--------------------------------------------------------------------------------
/korea_electric_power_corporation.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 | import time
5 |
6 | tf.set_random_seed(777) # reproducibility
7 | def MinMaxScaler(data):
8 | numerator = data - np.min(data, 0)
9 | denominator = np.max(data, 0) - np.min(data, 0)
10 | # noise term prevents the zero division
11 | return numerator / (denominator + 1e-7)
12 |
13 | # train Parameters
14 | timesteps = seq_length = 7
15 | data_dim = 5
16 | hidden_dim = 10
17 | output_dim = 1
18 | learing_rate = 0.01
19 | iterations = 500
20 |
21 | # Choose stock
22 | stock = "KRX:015760"
23 |
24 | # start time setting
25 | startTime = time.time()
26 |
27 | # data scrolling parts
28 | from pandas_datareader import data, wb
29 | import datetime
30 |
31 | start = datetime.datetime(2010, 1, 2)
32 | end = datetime.datetime(2017, 7, 14)
33 | df = data.DataReader(
34 | stock, # name
35 | "google", # data source
36 | start, # start
37 | end # end
38 | )
39 |
40 | # Convert pandas dataframe to numpy array
41 | xy = df.as_matrix()
42 |
43 | # Open, High, Low, Volume, Close
44 | test_min = np.min(xy,0)
45 | test_max = np.max(xy,0)
46 | denom = test_max - test_min
47 |
48 | xy = MinMaxScaler(xy)
49 | x = xy
50 | y = xy[:, [-2]] # Close as label
51 |
52 | # data for Prediction
53 | start = datetime.datetime(2017, 7, 18)
54 | end = datetime.datetime(2017, 7, 26)
55 | df = data.DataReader(
56 | stock, # name
57 | "google", # data source
58 | start, # start
59 | end # end
60 | )
61 |
62 | test_last_X = df.as_matrix().reshape(1,7,5);
63 |
64 | test_last_min = np.min(test_last_X, 0)
65 | test_last_max = np.max(test_last_X, 0)
66 | test_last_denom = test_last_max - test_last_min
67 |
68 | # real Prediction data
69 | start = datetime.datetime(2017, 7, 27)
70 | end = datetime.datetime(2017, 7, 27)
71 | df = data.DataReader(
72 | stock, # name
73 | "google", # data source
74 | start, # start
75 | end # end
76 | )
77 |
78 | real_stock = df.as_matrix()
79 |
80 | # build a dataset
81 | dataX = []
82 | dataY = []
83 | for i in range(0, len(y) - seq_length):
84 | _x = x[i:i + seq_length]
85 | _y = y[i + seq_length] # Next close price
86 | # print(_x, "->", _y)
87 | dataX.append(_x)
88 | dataY.append(_y)
89 |
90 | # train/test split 70 / 30
91 | train_size = int(len(dataY) * 0.7)
92 | test_size = len(dataY) - train_size
93 | trainX, testX = np.array(dataX[0:train_size]), np.array(
94 | dataX[train_size:len(dataX)])
95 | trainY, testY = np.array(dataY[0:train_size]), np.array(
96 | dataY[train_size:len(dataY)])
97 |
98 | # input place holders
99 | X = tf.placeholder(tf.float32, [None, seq_length, data_dim], name='input_X')
100 | Y = tf.placeholder(tf.float32, [None, 1], name='intput_Y')
101 |
102 | # build a LSTM network
103 | cell = tf.contrib.rnn.BasicLSTMCell(
104 | num_units=hidden_dim, state_is_tuple=True, activation=tf.tanh)
105 | outputs, _states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
106 |
107 | Y_pred = tf.contrib.layers.fully_connected(
108 | outputs[:, -1], output_dim, activation_fn=None) # We use the last cell's output
109 |
110 | # cost/loss
111 | loss = tf.reduce_sum(tf.square(Y_pred - Y), name='losses_sum') # sum of the squares
112 |
113 | # optimizer
114 | optimizer = tf.train.AdamOptimizer(learing_rate)
115 | train = optimizer.minimize(loss, name='train')
116 |
117 | # RMSE
118 | targets = tf.placeholder(tf.float32, [None, 1], name='targets')
119 | predictions = tf.placeholder(tf.float32, [None, 1], name='predictions')
120 | rmse = tf.sqrt(tf.reduce_mean(tf.square(targets - predictions)), name='rmse')
121 |
122 | with tf.Session() as sess:
123 | init = tf.global_variables_initializer()
124 | sess.run(init)
125 |
126 | # Tensorboard
127 | merged = tf.summary.merge_all()
128 | writer = tf.summary.FileWriter("./tensorflowlog", sess.graph)
129 |
130 | losslist = [];
131 | # Training step
132 | for i in range(iterations):
133 | _, step_loss = sess.run([train, loss], feed_dict={
134 | X: trainX, Y: trainY})
135 | print("[step: {}] loss: {}".format(i, step_loss))
136 | losslist = np.append(losslist, step_loss)
137 |
138 | # Test step
139 | test_predict = sess.run(Y_pred, feed_dict={X: testX})
140 | rmse = sess.run(rmse, feed_dict={
141 | targets: testY, predictions: test_predict})
142 | print("RMSE: {}".format(rmse))
143 |
144 | # Print train_size, test_size
145 | print("train_size : {}".format(train_size))
146 | print("test_size : {}".format(test_size))
147 |
148 | # Predictions test
149 | prediction_test = sess.run(Y_pred, feed_dict={X: test_last_X})
150 | print("real stock price : ", end='')
151 | real_value = real_stock[0][-2]
152 | print(real_value)
153 |
154 | print("prediction stock price : ", end='')
155 | prediction_value = (prediction_test*test_last_denom + test_last_min)[-1][-2]
156 | print(prediction_value)
157 |
158 | print("Error rate : ", end='')
159 | print(abs(prediction_value - real_value)/prediction_value * 100)
160 |
161 | # end time setting, print time
162 | elapsedTime = time.time() - startTime
163 | print("it took " + "%.3f"%(elapsedTime) + " s.")
164 |
165 | # Plot losss
166 | plt.figure(1)
167 | plt.plot(losslist, color ="green", label ="Error");
168 | plt.xlabel("Iteration Number")
169 | plt.ylabel("Sum of the Squarred Error")
170 | plt.legend(loc='upper right', frameon=False)
171 |
172 | # Plot predictions
173 | plt.figure(2)
174 | plt.plot(testY, color ="red", label ="Real")
175 | plt.plot(test_predict, color ="blue", label ="Prediction")
176 | plt.xlabel("Time Period")
177 | plt.ylabel("Stock Price")
178 | plt.legend(loc='upper left', frameon=False)
179 | plt.xticks([])
180 | plt.yticks([])
181 | plt.show()
--------------------------------------------------------------------------------
/samsung_yahoo.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | import matplotlib
4 | import time
5 | import os
6 |
7 | tf.set_random_seed(777) # reproducibility
8 |
9 | if "DISPLAY" not in os.environ:
10 | # remove Travis CI Error
11 | matplotlib.use('Agg')
12 |
13 | import matplotlib.pyplot as plt
14 |
15 | def MinMaxScaler(data):
16 | numerator = data - np.min(data, 0)
17 | denominator = np.max(data, 0) - np.min(data, 0)
18 | # noise term prevents the zero division
19 | return numerator / (denominator + 1e-7)
20 |
21 | # train Parameters
22 | timesteps = seq_length = 8
23 | data_dim = 6
24 | hidden_dim = 10
25 | output_dim = 1
26 | learing_rate = 0.01
27 | iterations = 500
28 |
29 | # Choose stock
30 | stock = "SSNLF"
31 |
32 | # start time setting
33 | startTime = time.time()
34 |
35 | # data scrolling parts
36 | import pandas_datareader.data as web
37 | import datetime
38 |
39 | start = datetime.datetime(2010, 1, 2)
40 | end = datetime.datetime(2017, 7, 14)
41 | df = web.DataReader(
42 | stock, # name
43 | 'yahoo', # data source
44 | start, # start
45 | end # end
46 | )
47 |
48 | # Convert pandas dataframe to numpy array
49 | xy = df.as_matrix()
50 |
51 | # Open, High, Low, Volume, Close
52 | test_min = np.min(xy,0)
53 | test_max = np.max(xy,0)
54 | denom = test_max - test_min
55 |
56 | xy = MinMaxScaler(xy)
57 | x = xy
58 | y = xy[:, [-3]] # Close as label
59 |
60 | # data for Prediction
61 | start = datetime.datetime(2017, 7, 18)
62 | end = datetime.datetime(2017, 7, 26)
63 | df = web.DataReader(
64 | stock, # name
65 | 'yahoo', # data source
66 | start, # start
67 | end # end
68 | )
69 |
70 |
71 | test_last_X = df.as_matrix().reshape(1,8,6);
72 |
73 | test_last_min = np.min(test_last_X, 0)
74 | test_last_max = np.max(test_last_X, 0)
75 | test_last_denom = test_last_max - test_last_min
76 |
77 |
78 |
79 | # real Prediction data
80 | start = datetime.datetime(2017, 7, 27)
81 | end = datetime.datetime(2017, 7, 27)
82 | df = web.DataReader(
83 | stock, # name
84 | 'yahoo', # data source
85 | start, # start
86 | end # end
87 | )
88 |
89 | real_stock = df.as_matrix()
90 |
91 | # build a dataset
92 | dataX = []
93 | dataY = []
94 | for i in range(0, len(y) - seq_length):
95 | _x = x[i:i + seq_length]
96 | _y = y[i + seq_length] # Next close price
97 | # print(_x, "->", _y)
98 | dataX.append(_x)
99 | dataY.append(_y)
100 |
101 | # train/test split 70 / 30
102 | train_size = int(len(dataY) * 0.7)
103 | test_size = len(dataY) - train_size
104 | trainX, testX = np.array(dataX[0:train_size]), np.array(
105 | dataX[train_size:len(dataX)])
106 | trainY, testY = np.array(dataY[0:train_size]), np.array(
107 | dataY[train_size:len(dataY)])
108 |
109 | # input place holders
110 | X = tf.placeholder(tf.float32, [None, seq_length, data_dim], name='input_X')
111 | Y = tf.placeholder(tf.float32, [None, 1], name='intput_Y')
112 |
113 | # build a LSTM network
114 | cell = tf.contrib.rnn.BasicLSTMCell(
115 | num_units=hidden_dim, state_is_tuple=True, activation=tf.tanh)
116 | outputs, _states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
117 |
118 | Y_pred = tf.contrib.layers.fully_connected(
119 | outputs[:, -1], output_dim, activation_fn=None) # We use the last cell's output
120 |
121 | # cost/loss
122 | loss = tf.reduce_sum(tf.square(Y_pred - Y), name='losses_sum') # sum of the squares
123 |
124 | # optimizer
125 | optimizer = tf.train.AdamOptimizer(learing_rate)
126 | train = optimizer.minimize(loss, name='train')
127 |
128 | # RMSE
129 | targets = tf.placeholder(tf.float32, [None, 1], name='targets')
130 | predictions = tf.placeholder(tf.float32, [None, 1], name='predictions')
131 | rmse = tf.sqrt(tf.reduce_mean(tf.square(targets - predictions)), name='rmse')
132 |
133 | with tf.Session() as sess:
134 | init = tf.global_variables_initializer()
135 | sess.run(init)
136 |
137 | # Tensorboard
138 | merged = tf.summary.merge_all()
139 | writer = tf.summary.FileWriter("./tensorflowlog", sess.graph)
140 |
141 | losslist = [];
142 | # Training step
143 | for i in range(iterations):
144 | _, step_loss = sess.run([train, loss], feed_dict={
145 | X: trainX, Y: trainY})
146 | print("[step: {}] loss: {}".format(i, step_loss))
147 | losslist = np.append(losslist, step_loss)
148 |
149 | # Test step
150 | test_predict = sess.run(Y_pred, feed_dict={X: testX})
151 | rmse = sess.run(rmse, feed_dict={
152 | targets: testY, predictions: test_predict})
153 | print("RMSE: {}".format(rmse))
154 |
155 | # Print train_size, test_size
156 | print("train_size : {}".format(train_size))
157 | print("test_size : {}".format(test_size))
158 |
159 | # Predictions test
160 | prediction_test = sess.run(Y_pred, feed_dict={X: test_last_X})
161 | print("real stock price : ", end='')
162 | real_value = real_stock[0][-2]
163 | print(real_value)
164 |
165 | print("prediction stock price : ", end='')
166 | prediction_value = (prediction_test*test_last_denom + test_last_min)[-1][-2]
167 | print(prediction_value)
168 |
169 | print("Error rate : ", end='')
170 | print(abs(prediction_value - real_value)/prediction_value * 100)
171 |
172 | # end time setting, print time
173 | elapsedTime = time.time() - startTime
174 | print("it took " + "%.3f"%(elapsedTime) + " s.")
175 |
176 | # Plot losss
177 | plt.figure(1)
178 | plt.plot(losslist, color ="green", label ="Error");
179 | plt.xlabel("Iteration Number")
180 | plt.ylabel("Sum of the Squarred Error")
181 | plt.legend(loc='upper right', frameon=False)
182 |
183 | # Plot predictions
184 | plt.figure(2)
185 | plt.plot(testY, color ="red", label ="Real")
186 | plt.plot(test_predict, color ="blue", label ="Prediction")
187 | plt.xlabel("Time Period")
188 | plt.ylabel("Stock Price")
189 | plt.legend(loc='upper left', frameon=False)
190 | plt.xticks([])
191 | plt.yticks([])
192 | plt.show()
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "{}"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright {yyyy} {name of copyright owner}
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------