├── README.md ├── cross_vali_cnn.py ├── cross_vali_data_convert_merge.py ├── cross_vali_gru.py ├── cross_vali_input_data.py └── cross_vali_lstm.py /README.md: -------------------------------------------------------------------------------- 1 | # Fall detection for CSI data 2 | 3 | Implement deep-learning methods CNN(Convolutional Neutral Network), GRU(Gated Recurrent 4 | Unit) and LSTM(Long Short Term Memory) in Wi-Fi Channel State Information analysis. 5 | 6 | My implementation is based on the projects: https://github.com/ermongroup/Wifi_Activity_Recognition, 7 | 8 | ## Usage 9 | 10 | ### Prerequisites 11 | 1. Python 2.7 12 | 2. Python packages : numpy, pandas, matplotlib, sklearn, tensorflow >= 1.0 13 | 3. dataset : download [here](https://drive.google.com/open?id=1AvafhK9raj4CslHtGKGexHIOTJgXMCG9) 14 | 15 | 16 | 17 | ### Running 18 | 19 | 1. Run the **cross_vali_data_convert_merge.py**, which generate the training data in "input_files" folder. 20 | 2. Run the **cross_vali_lstm.py/cross_vali_gru.py/cross_vali_cnn.py** 21 | 22 | 23 | 24 | ## References 25 | 26 | 27 | * Yousefi S , Narui H , Dayal S , et al. A Survey on Behavior Recognition Using WiFi Channel State Information[J]. IEEE Communications Magazine, 2017, 55(10):98-104. 28 | * Hanni Cheng, Jin Zhang, Yayu Gao and Xiaojun Hei, ”Implementing Deep Learning in Wi-Fi 29 | Channel State Information Analysis for Fall Detection,” IEEE International Conference on Consumer 30 | Electronics - Taiwan (ICCE-TW 2019) 31 | 32 | 33 | -------------------------------------------------------------------------------- /cross_vali_cnn.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | import numpy as np 3 | import tensorflow as tf 4 | import sklearn as sk 5 | from sklearn.model_selection import KFold, cross_val_score 6 | import csv 7 | from sklearn.utils import shuffle 8 | import matplotlib 9 | matplotlib.use("Agg") 10 | import matplotlib.pyplot as plt 11 | import os 12 | 13 | window_size = 500 14 | threshold = 60 15 | 16 | # Parameters 17 | learning_rate = 0.0001 18 | training_iters = 2000 19 | batch_size = 64 20 | 21 | # Import WiFi Activity data 22 | # csv_convert(window_size,threshold) 23 | from cross_vali_input_data import csv_import, DataSet 24 | 25 | # Network Parameters 26 | n_input = 90 # WiFi activity data input (img shape: 90*window_size) 27 | n_steps = window_size # timesteps 28 | #n_hidden = 200 # hidden layer num of features original 200 29 | n_classes = 7 # WiFi activity total classes 30 | display_step = 100 31 | 32 | # Output folder 33 | OUTPUT_FOLDER_PATTERN = "CNN_LR{0}_BATCHSIZE{1}/" 34 | output_folder = OUTPUT_FOLDER_PATTERN.format(learning_rate, batch_size) 35 | if not os.path.exists(output_folder): 36 | os.makedirs(output_folder) 37 | 38 | 39 | def weight_variable(shape): 40 | inital = tf.truncated_normal(shape, stddev=0.1) 41 | return tf.Variable(inital) 42 | 43 | 44 | def bias_variable(shape): 45 | inital = tf.constant(0.1, shape=shape) 46 | return tf.Variable(inital) 47 | 48 | 49 | def conv2d(x, W): 50 | return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') 51 | 52 | 53 | def max_pool(x): 54 | return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') 55 | 56 | 57 | # tf Graph input 58 | x = tf.placeholder("float", [None, n_steps, n_input]) 59 | y = tf.placeholder("float", [None, n_classes]) 60 | x_image = tf.reshape(x, [-1, 500,90,1]) 61 | 62 | w_conv1 = weight_variable([5, 5, 1, 32]) 63 | b_conv1 = bias_variable([32]) 64 | 65 | h_conv1 = tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1) 66 | h_pool1 = max_pool(h_conv1) 67 | 68 | w_conv2 = weight_variable([5, 5, 32, 64]) 69 | b_conv2 = bias_variable([64]) 70 | h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2) 71 | h_pool2 = max_pool(h_conv2) 72 | 73 | w_fc1 = weight_variable([125 * 23 * 64, 1024]) 74 | b_fc1 = bias_variable([1024]) 75 | h_pool2_flat = tf.reshape(h_pool2, [-1, 125 * 23 * 64]) 76 | h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1) 77 | keep_prob = tf.placeholder("float") 78 | h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) 79 | 80 | w_fc2 = weight_variable([1024, n_classes]) 81 | b_fc2 = bias_variable([n_classes]) 82 | 83 | pred = tf.matmul(h_fc1_drop, w_fc2) + b_fc2 84 | 85 | # Define loss and optimizer 86 | cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y)) 87 | optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) 88 | 89 | # Evaluate model 90 | correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) 91 | accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) 92 | 93 | # Initializing the variables 94 | init = tf.global_variables_initializer() 95 | cvscores = [] 96 | confusion_sum = [[0 for i in range(7)] for j in range(7)] 97 | 98 | # data import 99 | x_bed, x_fall, x_pickup, x_run, x_sitdown, x_standup, x_walk, \ 100 | y_bed, y_fall, y_pickup, y_run, y_sitdown, y_standup, y_walk = csv_import() 101 | 102 | 103 | print(" bed =", len(x_bed), " fall=", len(x_fall), " pickup =", len(x_pickup), " run=", len(x_run), " sitdown=", 104 | len(x_sitdown), " standup=", len(x_standup), " walk=", len(x_walk)) 105 | 106 | # data shuffle 107 | x_bed, y_bed = shuffle(x_bed, y_bed, random_state=0) 108 | x_fall, y_fall = shuffle(x_fall, y_fall, random_state=0) 109 | x_pickup, y_pickup = shuffle(x_pickup, y_pickup, random_state=0) 110 | x_run, y_run = shuffle(x_run, y_run, random_state=0) 111 | x_sitdown, y_sitdown = shuffle(x_sitdown, y_sitdown, random_state=0) 112 | x_standup, y_standup = shuffle(x_standup, y_standup, random_state=0) 113 | x_walk, y_walk = shuffle(x_walk, y_walk, random_state=0) 114 | 115 | # k_fold 116 | kk = 10 117 | 118 | # Launch the graph 119 | with tf.Session() as sess: 120 | for i in range(kk): 121 | 122 | # Initialization 123 | train_loss = [] 124 | train_acc = [] 125 | validation_loss = [] 126 | validation_acc = [] 127 | 128 | # Roll the data 129 | x_bed = np.roll(x_bed, int(len(x_bed) / kk), axis=0) 130 | y_bed = np.roll(y_bed, int(len(y_bed) / kk), axis=0) 131 | x_fall = np.roll(x_fall, int(len(x_fall) / kk), axis=0) 132 | y_fall = np.roll(y_fall, int(len(y_fall) / kk), axis=0) 133 | x_pickup = np.roll(x_pickup, int(len(x_pickup) / kk), axis=0) 134 | y_pickup = np.roll(y_pickup, int(len(y_pickup) / kk), axis=0) 135 | x_run = np.roll(x_run, int(len(x_run) / kk), axis=0) 136 | y_run = np.roll(y_run, int(len(y_run) / kk), axis=0) 137 | x_sitdown = np.roll(x_sitdown, int(len(x_sitdown) / kk), axis=0) 138 | y_sitdown = np.roll(y_sitdown, int(len(y_sitdown) / kk), axis=0) 139 | x_standup = np.roll(x_standup, int(len(x_standup) / kk), axis=0) 140 | y_standup = np.roll(y_standup, int(len(y_standup) / kk), axis=0) 141 | x_walk = np.roll(x_walk, int(len(x_walk) / kk), axis=0) 142 | y_walk = np.roll(y_walk, int(len(y_walk) / kk), axis=0) 143 | 144 | # data separation 145 | wifi_x_train = np.r_[ 146 | x_bed[int(len(x_bed) / kk):], x_fall[int(len(x_fall) / kk):], x_pickup[int(len(x_pickup) / kk):], \ 147 | x_run[int(len(x_run) / kk):], x_sitdown[int(len(x_sitdown)/kk):], x_standup[ 148 | int(len(x_standup) / kk):], x_walk[int( 149 | len(x_walk) / kk):]] 150 | 151 | wifi_y_train = np.r_[ 152 | y_bed[int(len(y_bed) / kk):], y_fall[int(len(y_fall) / kk):], y_pickup[int(len(y_pickup) / kk):], \ 153 | y_run[int(len(y_run) / kk):], y_sitdown[int(len(y_sitdown)/kk):], y_standup[ 154 | int(len(y_standup) / kk):], y_walk[int( 155 | len(y_walk) / kk):]] 156 | 157 | wifi_y_train = wifi_y_train[:, 1:] 158 | 159 | wifi_x_validation = np.r_[ 160 | x_bed[:int(len(x_bed) / kk)], x_fall[:int(len(x_fall) / kk)], x_pickup[:int(len(x_pickup) / kk)], \ 161 | x_run[:int(len(x_run) / kk)], x_sitdown[:int(len(x_sitdown)/kk)], x_standup[ 162 | :int(len(x_standup) / kk)], x_walk[:int( 163 | len(x_walk) / kk)]] 164 | 165 | wifi_y_validation = np.r_[ 166 | y_bed[:int(len(y_bed) / kk)], y_fall[:int(len(y_fall) / kk)], y_pickup[:int(len(y_pickup) / kk)], \ 167 | y_run[:int(len(y_run) / kk)], y_sitdown[:int(len(y_sitdown)/kk)], y_standup[ 168 | :int(len(y_standup) / kk)], y_walk[:int( 169 | len(y_walk) / kk)]] 170 | 171 | wifi_y_validation = wifi_y_validation[:, 1:] 172 | 173 | # data set 174 | wifi_train = DataSet(wifi_x_train, wifi_y_train) 175 | wifi_validation = DataSet(wifi_x_validation, wifi_y_validation) 176 | print(wifi_x_train.shape, wifi_y_train.shape, wifi_x_validation.shape, wifi_y_validation.shape) 177 | saver = tf.train.Saver() 178 | sess.run(init) 179 | step = 1 180 | 181 | # Keep training until reach max iterations 182 | while step < training_iters: 183 | batch_x, batch_y = wifi_train.next_batch(batch_size) 184 | x_vali = wifi_validation.images[:] 185 | y_vali = wifi_validation.labels[:] 186 | # Reshape data to get 28 seq of 28 elements 187 | batch_x = batch_x.reshape((batch_size, n_steps, n_input)) 188 | x_vali = x_vali.reshape((-1, n_steps, n_input)) 189 | # Run optimization op (backprop) 190 | sess.run(optimizer, feed_dict={x: batch_x, y: batch_y, keep_prob: 0.5}) 191 | 192 | # Calculate batch accuracy 193 | acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y, keep_prob : 1.0}) 194 | acc_vali = sess.run(accuracy, feed_dict={x: x_vali, y: y_vali, keep_prob : 1.0}) 195 | # Calculate batch loss 196 | loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y, keep_prob : 1.0}) 197 | loss_vali = sess.run(cost, feed_dict={x: x_vali, y: y_vali, keep_prob:1.0}) 198 | 199 | # Store the accuracy and loss 200 | train_acc.append(acc) 201 | train_loss.append(loss) 202 | validation_acc.append(acc_vali) 203 | validation_loss.append(loss_vali) 204 | 205 | if step % display_step == 0: 206 | print("Iter " + str(step) + ", Minibatch Training Loss= " + \ 207 | "{:.6f}".format(loss) + ", Training Accuracy= " + \ 208 | "{:.5f}".format(acc) + ", Minibatch Validation Loss= " + \ 209 | "{:.6f}".format(loss_vali) + ", Validation Accuracy= " + \ 210 | "{:.5f}".format(acc_vali)) 211 | step += 1 212 | 213 | # Calculate the confusion_matrix 214 | cvscores.append(acc_vali * 100) 215 | y_p = tf.argmax(pred, 1) 216 | val_accuracy, y_pred = sess.run([accuracy, y_p], feed_dict={x: x_vali, y: y_vali, keep_prob:1.0}) 217 | y_true = np.argmax(y_vali, 1) 218 | print(sk.metrics.confusion_matrix(y_true, y_pred)) 219 | confusion = sk.metrics.confusion_matrix(y_true, y_pred) 220 | confusion_sum = confusion_sum + confusion 221 | 222 | # Save the Accuracy curve 223 | fig = plt.figure(2 * i - 1) 224 | plt.plot(train_acc) 225 | plt.plot(validation_acc) 226 | plt.xlabel("n_epoch") 227 | plt.ylabel("Accuracy") 228 | plt.legend(["train_acc", "validation_acc"], loc=4) 229 | plt.ylim([0, 1]) 230 | plt.savefig((output_folder + "Accuracy_" + str(i) + ".png"), dpi=150) 231 | 232 | # Save the Loss curve 233 | fig = plt.figure(2 * i) 234 | plt.plot(train_loss) 235 | plt.plot(validation_loss) 236 | plt.xlabel("n_epoch") 237 | plt.ylabel("Loss") 238 | plt.legend(["train_loss", "validation_loss"], loc=1) 239 | plt.ylim([0, 2]) 240 | plt.savefig((output_folder + "Loss_" + str(i) + ".png"), dpi=150) 241 | 242 | np.save(output_folder + "cnn_trainloss_"+ str(i) +".npy",np.array(train_loss)) 243 | np.save(output_folder + "cnn_valloss_" + str(i) + ".npy",np.array(validation_loss)) 244 | np.save(output_folder + "cnn_trainacc_"+ str(i) + ".npy", np.array(train_acc)) 245 | np.save(output_folder + "cnn_valacc_" + str(i) + ".npy", np.array(validation_acc)) 246 | print("Optimization Finished!") 247 | print("%.1f%% (+/- %.1f%%)" % (np.mean(cvscores), np.std(cvscores))) 248 | saver.save(sess, output_folder + "model.ckpt") 249 | 250 | # Save the confusion_matrix 251 | np.savetxt(output_folder + "confusion_matrix.txt", confusion_sum, delimiter=",", fmt='%d') 252 | np.savetxt(output_folder + "accuracy.txt", (np.mean(cvscores), np.std(cvscores)), delimiter=".", fmt='%.1f') 253 | -------------------------------------------------------------------------------- /cross_vali_data_convert_merge.py: -------------------------------------------------------------------------------- 1 | import numpy as np, numpy 2 | import csv 3 | import glob 4 | import os 5 | 6 | window_size = 1000 7 | threshold = 60 8 | slide_size = 200 # less than window_size!!! 9 | 10 | 11 | def dataimport(path1, path2): 12 | xx = np.empty([0, window_size, 90], float) 13 | yy = np.empty([0, 8], float) 14 | 15 | ###Input data### 16 | # data import from csv 17 | input_csv_files = sorted(glob.glob(path1)) 18 | for f in input_csv_files: 19 | print("input_file_name=", f) 20 | data = [[float(elm) for elm in v] for v in csv.reader(open(f, "r"))] 21 | tmp1 = np.array(data) 22 | x2 = np.empty([0, window_size, 90], float) 23 | 24 | # data import by slide window 25 | k = 0 26 | while k <= (len(tmp1) + 1 - 2 * window_size): 27 | x = np.dstack(np.array(tmp1[k:k + window_size, 1:91]).T) 28 | x2 = np.concatenate((x2, x), axis=0) 29 | k += slide_size 30 | 31 | xx = np.concatenate((xx, x2), axis=0) 32 | xx = xx.reshape(len(xx), -1) 33 | 34 | ###Annotation data### 35 | # data import from csv 36 | annotation_csv_files = sorted(glob.glob(path2)) 37 | for ff in annotation_csv_files: 38 | print("annotation_file_name=", ff) 39 | ano_data = [[str(elm) for elm in v] for v in csv.reader(open(ff, "r"))] 40 | tmp2 = np.array(ano_data) 41 | 42 | # data import by slide window 43 | y = np.zeros(((len(tmp2) + 1 - 2 * window_size) // slide_size + 1, 8)) 44 | k = 0 45 | while k <= (len(tmp2) + 1 - 2 * window_size): 46 | y_pre = np.stack(np.array(tmp2[k:k + window_size])) 47 | bed = 0 48 | fall = 0 49 | walk = 0 50 | pickup = 0 51 | run = 0 52 | sitdown = 0 53 | standup = 0 54 | noactivity = 0 55 | for j in range(window_size): 56 | if y_pre[j] == "bed": 57 | bed += 1 58 | elif y_pre[j] == "fall": 59 | fall += 1 60 | elif y_pre[j] == "walk": 61 | walk += 1 62 | elif y_pre[j] == "pickup": 63 | pickup += 1 64 | elif y_pre[j] == "run": 65 | run += 1 66 | elif y_pre[j] == "sitdown": 67 | sitdown += 1 68 | elif y_pre[j] == "standup": 69 | standup += 1 70 | else: 71 | noactivity += 1 72 | 73 | if bed > window_size * threshold / 100: 74 | y[k / slide_size, :] = np.array([0, 1, 0, 0, 0, 0, 0, 0]) 75 | elif fall > window_size * threshold / 100: 76 | y[k / slide_size, :] = np.array([0, 0, 1, 0, 0, 0, 0, 0]) 77 | elif walk > window_size * threshold / 100: 78 | y[k / slide_size, :] = np.array([0, 0, 0, 1, 0, 0, 0, 0]) 79 | elif pickup > window_size * threshold / 100: 80 | y[k / slide_size, :] = np.array([0, 0, 0, 0, 1, 0, 0, 0]) 81 | elif run > window_size * threshold / 100: 82 | y[k / slide_size, :] = np.array([0, 0, 0, 0, 0, 1, 0, 0]) 83 | elif sitdown > window_size * threshold / 100: 84 | y[k / slide_size, :] = np.array([0, 0, 0, 0, 0, 0, 1, 0]) 85 | elif standup > window_size * threshold / 100: 86 | y[k / slide_size, :] = np.array([0, 0, 0, 0, 0, 0, 0, 1]) 87 | else: 88 | y[k / slide_size, :] = np.array([2, 0, 0, 0, 0, 0, 0, 0]) 89 | k += slide_size 90 | 91 | yy = np.concatenate((yy, y), axis=0) 92 | print(xx.shape, yy.shape) 93 | return (xx, yy) 94 | 95 | 96 | #### Main #### 97 | if not os.path.exists("input_files/"): 98 | os.makedirs("input_files/") 99 | 100 | for i, label in enumerate(["bed", "fall", "pickup", "run", "sitdown", "standup", "walk"]): 101 | filepath1 = "dataset/input_*" + str(label) + "*.csv" 102 | filepath2 = "dataset/annotation_*" + str(label) + "*.csv" 103 | outputfilename1 = "input_files/xx_" + str(window_size) + "_" + str(threshold) + "_" + label + ".csv" 104 | outputfilename2 = "input_files/yy_" + str(window_size) + "_" + str(threshold) + "_" + label + ".csv" 105 | 106 | x, y = dataimport(filepath1, filepath2) 107 | 108 | with open(outputfilename1, "w") as f: 109 | writer = csv.writer(f, lineterminator="\n") 110 | writer.writerows(x) 111 | with open(outputfilename2, "w") as f: 112 | writer = csv.writer(f, lineterminator="\n") 113 | writer.writerows(y) 114 | print(label + "finish!") 115 | -------------------------------------------------------------------------------- /cross_vali_gru.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import sklearn as sk 3 | from sklearn.metrics import confusion_matrix 4 | import matplotlib 5 | matplotlib.use("Agg") 6 | import matplotlib.pyplot as plt 7 | import tensorflow as tf 8 | import numpy as np 9 | import sys 10 | from tensorflow.contrib import rnn 11 | from sklearn.model_selection import KFold, cross_val_score 12 | import csv 13 | from sklearn.utils import shuffle 14 | import os 15 | 16 | # Import WiFi Activity data 17 | # csv_convert(window_size,threshold) 18 | from cross_vali_input_data import csv_import, DataSet 19 | 20 | window_size = 500 21 | threshold = 60 22 | 23 | # Parameters 24 | learning_rate = 0.0001 25 | training_iters = 2000 26 | batch_size = 200 27 | display_step = 100 28 | 29 | # Network Parameters 30 | n_input = 90 # WiFi activity data input (img shape: 90*window_size) 31 | n_steps = window_size # timesteps 32 | n_hidden = 200 # hidden layer num of features original 200 33 | n_classes = 7 # WiFi activity total classes 34 | 35 | # Output folder 36 | OUTPUT_FOLDER_PATTERN = "GRU_LR{0}_BATCHSIZE{1}_NHIDDEN{2}/" 37 | output_folder = OUTPUT_FOLDER_PATTERN.format(learning_rate, batch_size, n_hidden) 38 | if not os.path.exists(output_folder): 39 | os.makedirs(output_folder) 40 | 41 | # tf Graph input 42 | x = tf.placeholder("float", [None, n_steps, n_input]) 43 | y = tf.placeholder("float", [None, n_classes]) 44 | 45 | # Define weights 46 | weights = { 47 | 'out': tf.Variable(tf.random_normal([n_hidden, n_classes])) 48 | } 49 | biases = { 50 | 'out': tf.Variable(tf.random_normal([n_classes])) 51 | } 52 | 53 | def RNN(x, weights, biases): 54 | 55 | # Prepare data shape to match `rnn` function requirements 56 | # Current data input shape: (batch_size, n_steps, n_input) 57 | # Required shape: 'n_steps' tensors list of shape (batch_size, n_input) 58 | 59 | # Permuting batch_size and n_steps 60 | x = tf.transpose(x, [1, 0, 2]) 61 | # Reshaping to (n_steps*batch_size, n_input) 62 | x = tf.reshape(x, [-1, n_input]) 63 | # Split to get a list of 'n_steps' tensors of shape (batch_size, n_input) 64 | x = tf.split(x, n_steps, 0) 65 | 66 | # Define a lstm cell with tensorflow 67 | # lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0) 68 | gru_cell = rnn.GRUCell(n_hidden) 69 | 70 | # Get lstm cell output 71 | outputs, states = rnn.static_rnn(gru_cell, x, dtype=tf.float32) 72 | 73 | # Linear activation, using rnn inner loop last output 74 | return tf.matmul(outputs[-1], weights['out']) + biases['out'] 75 | 76 | 77 | ##### main ##### 78 | pred = RNN(x, weights, biases) 79 | 80 | # Define loss and optimizer 81 | cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = pred, labels = y)) 82 | optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) 83 | 84 | # Evaluate model 85 | correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1)) 86 | accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) 87 | 88 | # Initializing the variables 89 | init = tf.global_variables_initializer() 90 | cvscores = [] 91 | confusion_sum = [[0 for i in range(7)] for j in range(7)] 92 | 93 | #data import 94 | x_bed, x_fall, x_pickup, x_run, x_sitdown, x_standup, x_walk, \ 95 | y_bed, y_fall, y_pickup, y_run, y_sitdown, y_standup, y_walk = csv_import() 96 | 97 | print(" bed =",len(x_bed), " fall=", len(x_fall), " pickup =", len(x_pickup), " run=", len(x_run), " sitdown=", len(x_sitdown), " standup=", len(x_standup), " walk=", len(x_walk)) 98 | 99 | #data shuffle 100 | x_bed, y_bed = shuffle(x_bed, y_bed, random_state=0) 101 | x_fall, y_fall = shuffle(x_fall, y_fall, random_state=0) 102 | x_pickup, y_pickup = shuffle(x_pickup, y_pickup, random_state=0) 103 | x_run, y_run = shuffle(x_run, y_run, random_state=0) 104 | x_sitdown, y_sitdown = shuffle(x_sitdown, y_sitdown, random_state=0) 105 | x_standup, y_standup = shuffle(x_standup, y_standup, random_state=0) 106 | x_walk, y_walk = shuffle(x_walk, y_walk, random_state=0) 107 | 108 | 109 | #k_fold 110 | kk = 10 111 | 112 | # Launch the graph 113 | with tf.Session() as sess: 114 | for i in range(kk): 115 | 116 | #Initialization 117 | train_loss = [] 118 | train_acc = [] 119 | validation_loss = [] 120 | validation_acc = [] 121 | 122 | #Roll the data 123 | x_bed = np.roll(x_bed, int(len(x_bed) / kk), axis=0) 124 | y_bed = np.roll(y_bed, int(len(y_bed) / kk), axis=0) 125 | x_fall = np.roll(x_fall, int(len(x_fall) / kk), axis=0) 126 | y_fall = np.roll(y_fall, int(len(y_fall) / kk), axis=0) 127 | x_pickup = np.roll(x_pickup, int(len(x_pickup) / kk), axis=0) 128 | y_pickup = np.roll(y_pickup, int(len(y_pickup) / kk), axis=0) 129 | x_run = np.roll(x_run, int(len(x_run) / kk), axis=0) 130 | y_run = np.roll(y_run, int(len(y_run) / kk), axis=0) 131 | x_sitdown = np.roll(x_sitdown, int(len(x_sitdown) / kk), axis=0) 132 | y_sitdown = np.roll(y_sitdown, int(len(y_sitdown) / kk), axis=0) 133 | x_standup = np.roll(x_standup, int(len(x_standup) / kk), axis=0) 134 | y_standup = np.roll(y_standup, int(len(y_standup) / kk), axis=0) 135 | x_walk = np.roll(x_walk, int(len(x_walk) / kk), axis=0) 136 | y_walk = np.roll(y_walk, int(len(y_walk) / kk), axis=0) 137 | 138 | #data separation 139 | wifi_x_train = np.r_[x_bed[int(len(x_bed) / kk):], x_fall[int(len(x_fall) / kk):], x_pickup[int(len(x_pickup) / kk):], \ 140 | x_run[int(len(x_run) / kk):], x_sitdown[int(len(x_sitdown) / kk):], x_standup[int(len(x_standup) / kk):], x_walk[int(len(x_walk) / kk):]] 141 | 142 | wifi_y_train = np.r_[y_bed[int(len(y_bed) / kk):], y_fall[int(len(y_fall) / kk):], y_pickup[int(len(y_pickup) / kk):], \ 143 | y_run[int(len(y_run) / kk):], y_sitdown[int(len(y_sitdown) / kk):], y_standup[int(len(y_standup) / kk):], y_walk[int(len(y_walk) / kk):]] 144 | 145 | wifi_y_train = wifi_y_train[:,1:] 146 | 147 | wifi_x_validation = np.r_[x_bed[:int(len(x_bed) / kk)], x_fall[:int(len(x_fall) / kk)], x_pickup[:int(len(x_pickup) / kk)], \ 148 | x_run[:int(len(x_run) / kk)], x_sitdown[:int(len(x_sitdown) / kk)], x_standup[:int(len(x_standup) / kk)], x_walk[:int(len(x_walk) / kk)]] 149 | 150 | wifi_y_validation = np.r_[y_bed[:int(len(y_bed) / kk)], y_fall[:int(len(y_fall) / kk)], y_pickup[:int(len(y_pickup) / kk)], \ 151 | y_run[:int(len(y_run) / kk)], y_sitdown[:int(len(y_sitdown) / kk)], y_standup[:int(len(y_standup) / kk)], y_walk[:int(len(y_walk) / kk)]] 152 | 153 | wifi_y_validation = wifi_y_validation[:,1:] 154 | 155 | #data set 156 | wifi_train = DataSet(wifi_x_train, wifi_y_train) 157 | wifi_validation = DataSet(wifi_x_validation, wifi_y_validation) 158 | print(wifi_x_train.shape, wifi_y_train.shape, wifi_x_validation.shape, wifi_y_validation.shape) 159 | saver = tf.train.Saver() 160 | sess.run(init) 161 | step = 1 162 | 163 | # Keep training until reach max iterations 164 | while step < training_iters: 165 | batch_x, batch_y = wifi_train.next_batch(batch_size) 166 | x_vali = wifi_validation.images[:] 167 | y_vali = wifi_validation.labels[:] 168 | # Reshape data to get 28 seq of 28 elements 169 | batch_x = batch_x.reshape((batch_size, n_steps, n_input)) 170 | x_vali = x_vali.reshape((-1, n_steps, n_input)) 171 | # Run optimization op (backprop) 172 | sess.run(optimizer, feed_dict={x: batch_x, y: batch_y}) 173 | 174 | # Calculate batch accuracy 175 | acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y}) 176 | acc_vali = sess.run(accuracy, feed_dict={x: x_vali, y: y_vali}) 177 | # Calculate batch loss 178 | loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y}) 179 | loss_vali = sess.run(cost, feed_dict={x: x_vali, y: y_vali}) 180 | 181 | # Store the accuracy and loss 182 | train_acc.append(acc) 183 | train_loss.append(loss) 184 | validation_acc.append(acc_vali) 185 | validation_loss.append(loss_vali) 186 | 187 | if step % display_step == 0: 188 | print("Iter " + str(step) + ", Minibatch Training Loss= " + \ 189 | "{:.6f}".format(loss) + ", Training Accuracy= " + \ 190 | "{:.5f}".format(acc) + ", Minibatch Validation Loss= " + \ 191 | "{:.6f}".format(loss_vali) + ", Validation Accuracy= " + \ 192 | "{:.5f}".format(acc_vali) ) 193 | step += 1 194 | 195 | #Calculate the confusion_matrix 196 | cvscores.append(acc_vali * 100) 197 | y_p = tf.argmax(pred, 1) 198 | val_accuracy, y_pred = sess.run([accuracy, y_p], feed_dict={x: x_vali, y: y_vali}) 199 | y_true = np.argmax(y_vali,1) 200 | print(sk.metrics.confusion_matrix(y_true, y_pred)) 201 | confusion = sk.metrics.confusion_matrix(y_true, y_pred) 202 | confusion_sum = confusion_sum + confusion 203 | 204 | #Save the Accuracy curve 205 | fig = plt.figure(2 * i - 1) 206 | plt.plot(train_acc) 207 | plt.plot(validation_acc) 208 | plt.xlabel("n_epoch") 209 | plt.ylabel("Accuracy") 210 | plt.legend(["train_acc","validation_acc"],loc=4) 211 | plt.ylim([0,1]) 212 | plt.savefig((output_folder + "Accuracy_" + str(i) + ".eps"), dpi=150) 213 | 214 | #Save the Loss curve 215 | fig = plt.figure(2 * i) 216 | plt.plot(train_loss) 217 | plt.plot(validation_loss) 218 | plt.xlabel("n_epoch") 219 | plt.ylabel("Loss") 220 | plt.legend(["train_loss","validation_loss"],loc=1) 221 | plt.ylim([0,2]) 222 | plt.savefig((output_folder + "Loss_" + str(i) + ".eps"), dpi=150) 223 | 224 | np.save("gru_trainloss_"+str(i)+".npy",np.array(train_loss)) 225 | np.save("gru_valloss_"+str(i)+".npy",np.array(validation_loss)) 226 | np.save("gru_trainacc_"+str(i)+".npy",np.array(train_acc)) 227 | np.save("gru_valacc_"+str(i)+".npy", np.array(validation_acc)) 228 | print("Optimization Finished!") 229 | print("%.1f%% (+/- %.1f%%)" % (np.mean(cvscores), np.std(cvscores))) 230 | saver.save(sess, output_folder + "model.ckpt") 231 | 232 | #Save the confusion_matrix 233 | np.savetxt(output_folder + "confusion_matrix.txt", confusion_sum, delimiter=",", fmt='%d') 234 | np.savetxt(output_folder + "accuracy.txt", (np.mean(cvscores), np.std(cvscores)), delimiter=".", fmt='%.1f') 235 | 236 | -------------------------------------------------------------------------------- /cross_vali_input_data.py: -------------------------------------------------------------------------------- 1 | """Functions for downloading and reading MNIST data.""" 2 | from __future__ import print_function 3 | import gzip 4 | import os 5 | import numpy as np,numpy 6 | import csv 7 | import glob 8 | import pandas as pd 9 | 10 | class DataSet(object): 11 | def __init__(self, images, labels, fake_data=False): 12 | assert images.shape[0] == labels.shape[0], ( 13 | "images.shape: %s labels.shape: %s" % (images.shape, 14 | labels.shape)) 15 | self._num_examples = images.shape[0] 16 | images = images.reshape(images.shape[0], 17 | images.shape[1] * images.shape[2]) 18 | self._images = images 19 | self._labels = labels 20 | self._epochs_completed = 0 21 | self._index_in_epoch = 0 22 | @property 23 | def images(self): 24 | return self._images 25 | @property 26 | def labels(self): 27 | return self._labels 28 | @property 29 | def num_examples(self): 30 | return self._num_examples 31 | @property 32 | def epochs_completed(self): 33 | return self._epochs_completed 34 | def next_batch(self, batch_size, fake_data=False): 35 | start = self._index_in_epoch 36 | self._index_in_epoch += batch_size 37 | if self._index_in_epoch > self._num_examples: 38 | # Finished epoch 39 | self._epochs_completed += 1 40 | # Shuffle the data 41 | perm = numpy.arange(self._num_examples) 42 | numpy.random.shuffle(perm) 43 | self._images = self._images[perm] 44 | self._labels = self._labels[perm] 45 | # Start next epoch 46 | start = 0 47 | self._index_in_epoch = batch_size 48 | assert batch_size <= self._num_examples 49 | end = self._index_in_epoch 50 | return self._images[start:end], self._labels[start:end] 51 | 52 | def csv_import(): 53 | x_dic = {} 54 | y_dic = {} 55 | print("csv file importing...") 56 | 57 | for i in ["bed", "fall", "pickup", "run", "sitdown", "standup", "walk"]: 58 | # xx = np.array([[ float(elm) for elm in v] for v in csv.reader(open("./input_files/xx_1000_60_" + str(i) + ".csv","r"))]) 59 | # yy = np.array([[ float(elm) for elm in v] for v in csv.reader(open("./input_files/yy_1000_60_" + str(i) + ".csv","r"))]) 60 | 61 | # xx = xx[::2,:] 62 | # yy = yy[::2,:] 63 | 64 | SKIPROW = 2 #Skip every 2 rows -> overlap 800ms to 600ms (To avoid memory error) 65 | num_lines = sum(1 for l in open("input_files/xx_1000_60_" + str(i) + ".csv")) 66 | skip_idx = [x for x in range(1, num_lines) if x % SKIPROW !=0] 67 | 68 | xx = np.array(pd.read_csv("input_files/xx_1000_60_" + str(i) + ".csv", header=None, skiprows = skip_idx)) 69 | yy = np.array(pd.read_csv("input_files/yy_1000_60_" + str(i) + ".csv", header=None, skiprows = skip_idx)) 70 | 71 | # eliminate the NoActivity Data 72 | rows, cols = np.where(yy>0) 73 | xx = np.delete(xx, rows[ np.where(cols==0)],0) 74 | yy = np.delete(yy, rows[ np.where(cols==0)],0) 75 | 76 | xx = xx.reshape(len(xx),1000,90) 77 | if i == "sitdown": 78 | xx = xx[:400,:,:] 79 | 80 | # 1000 Hz to 500 Hz (To avoid memory error) 81 | xx = xx[:,::2,:90] 82 | 83 | x_dic[str(i)] = xx 84 | y_dic[str(i)] = yy 85 | 86 | print(str(i), "finished...", "xx=", xx.shape, "yy=", yy.shape) 87 | 88 | return x_dic["bed"], x_dic["fall"], x_dic["pickup"], x_dic["run"], x_dic["sitdown"], x_dic["standup"], x_dic["walk"], \ 89 | y_dic["bed"], y_dic["fall"], y_dic["pickup"], y_dic["run"], y_dic["sitdown"], y_dic["standup"], y_dic["walk"] 90 | -------------------------------------------------------------------------------- /cross_vali_lstm.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import sklearn as sk 3 | from sklearn.metrics import confusion_matrix 4 | import matplotlib 5 | matplotlib.use("Agg") 6 | import matplotlib.pyplot as plt 7 | import tensorflow as tf 8 | import numpy as np 9 | import sys 10 | from tensorflow.contrib import rnn 11 | from sklearn.model_selection import KFold, cross_val_score 12 | import csv 13 | from sklearn.utils import shuffle 14 | import os 15 | 16 | # Import WiFi Activity data 17 | # csv_convert(window_size,threshold) 18 | from cross_vali_input_data import csv_import, DataSet 19 | 20 | window_size = 500 21 | threshold = 60 22 | 23 | # Parameters 24 | learning_rate = 0.0001 25 | training_iters = 2000 26 | batch_size = 200 27 | display_step = 100 28 | 29 | # Network Parameters 30 | n_input = 90 # WiFi activity data input (img shape: 90*window_size) 31 | n_steps = window_size # timesteps 32 | n_hidden = 200 # hidden layer num of features original 200 33 | n_classes = 7 # WiFi activity total classes 34 | 35 | # Output folder 36 | OUTPUT_FOLDER_PATTERN = "LR{0}_BATCHSIZE{1}_NHIDDEN{2}/" 37 | output_folder = OUTPUT_FOLDER_PATTERN.format(learning_rate, batch_size, n_hidden) 38 | if not os.path.exists(output_folder): 39 | os.makedirs(output_folder) 40 | 41 | # tf Graph input 42 | x = tf.placeholder("float", [None, n_steps, n_input]) 43 | y = tf.placeholder("float", [None, n_classes]) 44 | 45 | # Define weights 46 | weights = { 47 | 'out': tf.Variable(tf.random_normal([n_hidden, n_classes])) 48 | } 49 | biases = { 50 | 'out': tf.Variable(tf.random_normal([n_classes])) 51 | } 52 | 53 | def RNN(x, weights, biases): 54 | 55 | # Prepare data shape to match `rnn` function requirements 56 | # Current data input shape: (batch_size, n_steps, n_input) 57 | # Required shape: 'n_steps' tensors list of shape (batch_size, n_input) 58 | 59 | # Permuting batch_size and n_steps 60 | x = tf.transpose(x, [1, 0, 2]) 61 | # Reshaping to (n_steps*batch_size, n_input) 62 | x = tf.reshape(x, [-1, n_input]) 63 | # Split to get a list of 'n_steps' tensors of shape (batch_size, n_input) 64 | x = tf.split(x, n_steps, 0) 65 | 66 | # Define a lstm cell with tensorflow 67 | lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0) 68 | 69 | # Get lstm cell output 70 | outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32) 71 | 72 | # Linear activation, using rnn inner loop last output 73 | return tf.matmul(outputs[-1], weights['out']) + biases['out'] 74 | 75 | 76 | ##### main ##### 77 | pred = RNN(x, weights, biases) 78 | 79 | # Define loss and optimizer 80 | cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = pred, labels = y)) 81 | optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) 82 | 83 | # Evaluate model 84 | correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1)) 85 | accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) 86 | 87 | # Initializing the variables 88 | init = tf.global_variables_initializer() 89 | cvscores = [] 90 | confusion_sum = [[0 for i in range(7)] for j in range(7)] 91 | 92 | #data import 93 | x_bed, x_fall, x_pickup, x_run, x_sitdown, x_standup, x_walk, \ 94 | y_bed, y_fall, y_pickup, y_run, y_sitdown, y_standup, y_walk = csv_import() 95 | 96 | print(" bed =",len(x_bed), " fall=", len(x_fall), " pickup =", len(x_pickup), " run=", len(x_run), " sitdown=", len(x_sitdown), " standup=", len(x_standup), " walk=", len(x_walk)) 97 | 98 | #data shuffle 99 | x_bed, y_bed = shuffle(x_bed, y_bed, random_state=0) 100 | x_fall, y_fall = shuffle(x_fall, y_fall, random_state=0) 101 | x_pickup, y_pickup = shuffle(x_pickup, y_pickup, random_state=0) 102 | x_run, y_run = shuffle(x_run, y_run, random_state=0) 103 | x_sitdown, y_sitdown = shuffle(x_sitdown, y_sitdown, random_state=0) 104 | x_standup, y_standup = shuffle(x_standup, y_standup, random_state=0) 105 | x_walk, y_walk = shuffle(x_walk, y_walk, random_state=0) 106 | 107 | 108 | #k_fold 109 | kk = 10 110 | 111 | # Launch the graph 112 | with tf.Session() as sess: 113 | for i in range(kk): 114 | 115 | #Initialization 116 | train_loss = [] 117 | train_acc = [] 118 | validation_loss = [] 119 | validation_acc = [] 120 | 121 | #Roll the data 122 | x_bed = np.roll(x_bed, int(len(x_bed) / kk), axis=0) 123 | y_bed = np.roll(y_bed, int(len(y_bed) / kk), axis=0) 124 | x_fall = np.roll(x_fall, int(len(x_fall) / kk), axis=0) 125 | y_fall = np.roll(y_fall, int(len(y_fall) / kk), axis=0) 126 | x_pickup = np.roll(x_pickup, int(len(x_pickup) / kk), axis=0) 127 | y_pickup = np.roll(y_pickup, int(len(y_pickup) / kk), axis=0) 128 | x_run = np.roll(x_run, int(len(x_run) / kk), axis=0) 129 | y_run = np.roll(y_run, int(len(y_run) / kk), axis=0) 130 | x_sitdown = np.roll(x_sitdown, int(len(x_sitdown) / kk), axis=0) 131 | y_sitdown = np.roll(y_sitdown, int(len(y_sitdown) / kk), axis=0) 132 | x_standup = np.roll(x_standup, int(len(x_standup) / kk), axis=0) 133 | y_standup = np.roll(y_standup, int(len(y_standup) / kk), axis=0) 134 | x_walk = np.roll(x_walk, int(len(x_walk) / kk), axis=0) 135 | y_walk = np.roll(y_walk, int(len(y_walk) / kk), axis=0) 136 | 137 | #data separation 138 | wifi_x_train = np.r_[x_bed[int(len(x_bed) / kk):], x_fall[int(len(x_fall) / kk):], x_pickup[int(len(x_pickup) / kk):], \ 139 | x_run[int(len(x_run) / kk):], x_sitdown[int(len(x_sitdown) / kk):], x_standup[int(len(x_standup) / kk):], x_walk[int(len(x_walk) / kk):]] 140 | 141 | wifi_y_train = np.r_[y_bed[int(len(y_bed) / kk):], y_fall[int(len(y_fall) / kk):], y_pickup[int(len(y_pickup) / kk):], \ 142 | y_run[int(len(y_run) / kk):], y_sitdown[int(len(y_sitdown) / kk):], y_standup[int(len(y_standup) / kk):], y_walk[int(len(y_walk) / kk):]] 143 | 144 | wifi_y_train = wifi_y_train[:,1:] 145 | 146 | wifi_x_validation = np.r_[x_bed[:int(len(x_bed) / kk)], x_fall[:int(len(x_fall) / kk)], x_pickup[:int(len(x_pickup) / kk)], \ 147 | x_run[:int(len(x_run) / kk)], x_sitdown[:int(len(x_sitdown) / kk)], x_standup[:int(len(x_standup) / kk)], x_walk[:int(len(x_walk) / kk)]] 148 | 149 | wifi_y_validation = np.r_[y_bed[:int(len(y_bed) / kk)], y_fall[:int(len(y_fall) / kk)], y_pickup[:int(len(y_pickup) / kk)], \ 150 | y_run[:int(len(y_run) / kk)], y_sitdown[:int(len(y_sitdown) / kk)], y_standup[:int(len(y_standup) / kk)], y_walk[:int(len(y_walk) / kk)]] 151 | 152 | wifi_y_validation = wifi_y_validation[:,1:] 153 | 154 | #data set 155 | wifi_train = DataSet(wifi_x_train, wifi_y_train) 156 | wifi_validation = DataSet(wifi_x_validation, wifi_y_validation) 157 | print(wifi_x_train.shape, wifi_y_train.shape, wifi_x_validation.shape, wifi_y_validation.shape) 158 | saver = tf.train.Saver() 159 | sess.run(init) 160 | step = 1 161 | 162 | # Keep training until reach max iterations 163 | while step < training_iters: 164 | batch_x, batch_y = wifi_train.next_batch(batch_size) 165 | x_vali = wifi_validation.images[:] 166 | y_vali = wifi_validation.labels[:] 167 | # Reshape data to get 28 seq of 28 elements 168 | batch_x = batch_x.reshape((batch_size, n_steps, n_input)) 169 | x_vali = x_vali.reshape((-1, n_steps, n_input)) 170 | # Run optimization op (backprop) 171 | sess.run(optimizer, feed_dict={x: batch_x, y: batch_y}) 172 | 173 | # Calculate batch accuracy 174 | acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y}) 175 | acc_vali = sess.run(accuracy, feed_dict={x: x_vali, y: y_vali}) 176 | # Calculate batch loss 177 | loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y}) 178 | loss_vali = sess.run(cost, feed_dict={x: x_vali, y: y_vali}) 179 | 180 | # Store the accuracy and loss 181 | train_acc.append(acc) 182 | train_loss.append(loss) 183 | validation_acc.append(acc_vali) 184 | validation_loss.append(loss_vali) 185 | 186 | if step % display_step == 0: 187 | print("Iter " + str(step) + ", Minibatch Training Loss= " + \ 188 | "{:.6f}".format(loss) + ", Training Accuracy= " + \ 189 | "{:.5f}".format(acc) + ", Minibatch Validation Loss= " + \ 190 | "{:.6f}".format(loss_vali) + ", Validation Accuracy= " + \ 191 | "{:.5f}".format(acc_vali) ) 192 | step += 1 193 | 194 | #Calculate the confusion_matrix 195 | cvscores.append(acc_vali * 100) 196 | y_p = tf.argmax(pred, 1) 197 | val_accuracy, y_pred = sess.run([accuracy, y_p], feed_dict={x: x_vali, y: y_vali}) 198 | y_true = np.argmax(y_vali,1) 199 | print(sk.metrics.confusion_matrix(y_true, y_pred)) 200 | confusion = sk.metrics.confusion_matrix(y_true, y_pred) 201 | confusion_sum = confusion_sum + confusion 202 | 203 | #Save the Accuracy curve 204 | fig = plt.figure(2 * i - 1) 205 | plt.plot(train_acc) 206 | plt.plot(validation_acc) 207 | plt.xlabel("n_epoch") 208 | plt.ylabel("Accuracy") 209 | plt.legend(["train_acc","validation_acc"],loc=4) 210 | plt.ylim([0,1]) 211 | plt.savefig((output_folder + "Accuracy_" + str(i) + ".eps"), dpi=150) 212 | 213 | #Save the Loss curve 214 | fig = plt.figure(2 * i) 215 | plt.plot(train_loss) 216 | plt.plot(validation_loss) 217 | plt.xlabel("n_epoch") 218 | plt.ylabel("Loss") 219 | plt.legend(["train_loss","validation_loss"],loc=1) 220 | plt.ylim([0,2]) 221 | plt.savefig((output_folder + "Loss_" + str(i) + ".eps"), dpi=150) 222 | 223 | np.save(output_folder+"lstm_trainloss_"+str(i)+".npy",np.array(train_loss)) 224 | np.save(output_folder+"lstm_valloss_"+ str(i)+".npy",np.array(validation_loss)) 225 | np.save(output_folder+"lstm_trainacc_"+str(i)+".npy",np.array(train_acc)) 226 | np.save(output_folder+"lstm_valacc_"+str(i)+".npy", np.array(validation_acc)) 227 | 228 | print("Optimization Finished!") 229 | print("%.1f%% (+/- %.1f%%)" % (np.mean(cvscores), np.std(cvscores))) 230 | saver.save(sess, output_folder + "model.ckpt") 231 | 232 | #Save the confusion_matrix 233 | np.savetxt(output_folder + "confusion_matrix.txt", confusion_sum, delimiter=",", fmt='%d') 234 | np.savetxt(output_folder + "accuracy.txt", (np.mean(cvscores), np.std(cvscores)), delimiter=".", fmt='%.1f') 235 | 236 | --------------------------------------------------------------------------------