├── 08361481.pdf ├── Houston_cnn.py ├── Houston_dcnn.py ├── Houston_dhcnet.py ├── Houston_generate.py ├── README.md ├── data_Houston.py └── deformable_conv.py /08361481.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ordinarycore/DHCNet/9eb9395328f4541ad86a75b5f5cfde91f8bc2f5d/08361481.pdf -------------------------------------------------------------------------------- /Houston_cnn.py: -------------------------------------------------------------------------------- 1 | # -- coding: utf-8 -- 2 | 3 | import scipy.io 4 | import tensorflow as tf 5 | from tensorflow.contrib import slim 6 | import numpy as np 7 | from data_Houston import patch_size, num_band 8 | import time 9 | import os 10 | import scipy.ndimage 11 | from deformable_conv import deformable_convolution 12 | 13 | # 神经网络参数 14 | num_classes = 15 15 | Train_Batch_Size = 150 16 | Learning_Rate_Base = 0.1 17 | Training_Steps = 1401 18 | 19 | 20 | def train(): 21 | Training_Data = scipy.io.loadmat(os.path.join(os.getcwd(), 'data/Training_Data.mat'))['Training_Data'] 22 | Testing_Data = scipy.io.loadmat(os.path.join(os.getcwd(), 'data/Testing_Data.mat'))['Testing_Data'] 23 | Training_Label = scipy.io.loadmat(os.path.join(os.getcwd(), 'data/Training_Label.mat'))['Training_Label'] 24 | Testing_Label = scipy.io.loadmat(os.path.join(os.getcwd(), 'data/Testing_Label.mat'))['Testing_Label'] 25 | All_Patches = scipy.io.loadmat(os.path.join(os.getcwd(), 'data/All_Patches.mat'))['All_Patches'] 26 | All_Labels = scipy.io.loadmat(os.path.join(os.getcwd(), 'data/All_Labels.mat'))['All_Labels'] 27 | 28 | num_train = Training_Data.shape[0] 29 | num_test = Testing_Data.shape[0] 30 | num_total = All_Patches.shape[0] 31 | 32 | 33 | x = tf.placeholder(tf.float32, [None, patch_size, patch_size, num_band], name='x_input') 34 | y = tf.placeholder(tf.float32, [None, num_classes], name='y_input') 35 | training_flag = tf.placeholder(tf.bool) 36 | 37 | # conv1 38 | weights1 = tf.get_variable("weigts1", shape=[3, 3, num_band, 96], 39 | dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(False)) 40 | conv1 = tf.nn.conv2d(x, weights1, strides=[1, 1, 1, 1], padding='SAME') 41 | conv1 = tf.layers.batch_normalization(conv1, training=training_flag) 42 | conv1 = tf.nn.relu(conv1) 43 | 44 | # conv2 45 | weights2 = tf.get_variable("weigts2", shape=[3, 3, 96, 96], 46 | dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(False)) 47 | conv2 = tf.nn.conv2d(conv1, weights2, strides=[1, 1, 1, 1], padding='SAME') 48 | conv2 = tf.layers.batch_normalization(conv2, training=training_flag) 49 | conv2 = tf.nn.relu(conv2) 50 | 51 | pool1 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') 52 | 53 | # conv3 54 | weights3 = tf.get_variable("weigts3", shape=[3, 3, 96, 108], 55 | dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(False)) 56 | conv3 = tf.nn.conv2d(pool1, weights3, strides=[1, 1, 1, 1], padding='SAME') 57 | conv3 = tf.layers.batch_normalization(conv3, training=training_flag) 58 | conv3 = tf.nn.relu(conv3) 59 | 60 | # conv4 61 | weights4 = tf.get_variable("weigts4", shape=[3, 3, 108, 108], 62 | dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(False)) 63 | conv4 = tf.nn.conv2d(conv3, weights4, strides=[1, 1, 1, 1], padding='SAME') 64 | conv4 = tf.layers.batch_normalization(conv4, training=training_flag) 65 | conv4 = tf.nn.relu(conv4) 66 | 67 | pool2 = tf.nn.max_pool(conv4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') 68 | 69 | # conv5 70 | weights5 = tf.get_variable("weigts5", shape=[3, 3, 108, 128], 71 | dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(False)) 72 | conv5 = tf.nn.conv2d(pool2, weights5, strides=[1, 1, 1, 1], padding='SAME') 73 | conv5 = tf.layers.batch_normalization(conv5, training=training_flag) 74 | conv5 = tf.nn.relu(conv5) 75 | 76 | # conv6 77 | weights6 = tf.get_variable("weigts6", shape=[3, 3, 128, 128], 78 | dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(False)) 79 | conv6 = tf.nn.conv2d(conv5, weights6, strides=[1, 1, 1, 1], padding='SAME') 80 | conv6 = tf.layers.batch_normalization(conv6, training=training_flag) 81 | conv6 = tf.nn.relu(conv6) 82 | 83 | net = slim.avg_pool2d(conv6, 7, padding='VALID') 84 | 85 | net = slim.flatten(net) 86 | 87 | # fc1 88 | weights7 = tf.get_variable("weigts7", shape=[128, 200], 89 | dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(False)) 90 | fc1 = tf.matmul(net, weights7) 91 | fc1 = tf.layers.batch_normalization(fc1, training=training_flag) 92 | fc1 = tf.nn.relu(fc1) 93 | 94 | # dropout 95 | net = slim.dropout(fc1, 0.5, is_training=training_flag) 96 | 97 | # fc2 98 | weights8 = tf.get_variable("weigts8", shape=[200, num_classes], 99 | dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(False)) 100 | biases8 = tf.get_variable("biases8", shape=[num_classes], 101 | dtype=tf.float32, initializer=tf.zeros_initializer()) 102 | pred = tf.matmul(net, weights8) + biases8 103 | 104 | 105 | output = tf.argmax(pred, 1) 106 | # Define loss and optimizer 107 | cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y) 108 | 109 | loss = tf.reduce_mean(cross_entropy) 110 | global_step = tf.Variable(0, trainable=False) 111 | learning_rate = tf.train.exponential_decay( 112 | Learning_Rate_Base, global_step, 113 | 700, 0.25) 114 | optimizer = tf.train.MomentumOptimizer(learning_rate, 0.9) 115 | 116 | update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) 117 | with tf.control_dependencies(update_ops): 118 | train_op = optimizer.minimize(loss, global_step=global_step) 119 | # Define accuracy 120 | correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) 121 | 122 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) 123 | 124 | init = tf.initialize_all_variables() 125 | 126 | saver = tf.train.Saver({'weights1': weights1, 'weights2': weights2, 'weights3': weights3, 127 | 'weights4': weights4, 'weights5': weights5, 'weights6': weights6, 128 | 'weights7': weights7, 'weights8': weights8, 'biases8': biases8}) 129 | 130 | 131 | with tf.Session() as sess: 132 | sess.run(init) 133 | saver.restore(sess, os.path.join(os.getcwd(), "model/cnn.ckpt")) 134 | 135 | for i in range(Training_Steps): 136 | start_time = time.time() 137 | idx = np.random.choice(num_train, size=Train_Batch_Size, replace=False) 138 | batch_x = Training_Data[idx, :] 139 | batch_y = Training_Label[idx, :] 140 | sess.run(train_op, feed_dict={x: batch_x, y: batch_y, training_flag: True}) 141 | 142 | # Display logs per epoch step 143 | if i % 100 == 0: 144 | batch_cost, train_acc = sess.run([loss, accuracy], feed_dict={x: batch_x, y: batch_y, training_flag: False}) 145 | duration = time.time() - start_time 146 | print("Steps", '%04d,' % i, "Loss=%.4f," % batch_cost, 147 | "Training Accuracy=%.4f" % train_acc, "time:%.4f s" % duration) 148 | if i == 1400: 149 | sum = 0.0 150 | test_outlabel = [] 151 | for k in range(0, int(num_test/100)): 152 | test_x = [Testing_Data[i + k * 100] for i in range(0, 100)] 153 | test_y = [Testing_Label[i + k * 100] for i in range(0, 100)] 154 | test_accuracy, out_label = sess.run([accuracy, output],feed_dict={x: test_x, y: test_y, training_flag: False}) 155 | test_outlabel.extend(out_label) 156 | sum += test_accuracy * 100 157 | test_x = [Testing_Data[i] for i in range(int(num_test/100)*100, num_test)] 158 | test_y = [Testing_Label[i] for i in range(int(num_test/100)*100, num_test)] 159 | test_accuracy, out_label = sess.run([accuracy, output], feed_dict={x: test_x, y: test_y, training_flag: False}) 160 | test_outlabel.extend(out_label) 161 | sum += test_accuracy * (num_test - int(num_test/100)*100) 162 | print("The Test Accuracy is :", sum / num_test) 163 | 164 | test_outlabel = np.array(test_outlabel) 165 | test_ind = {} 166 | test_ind['Test_Outlabel'] = test_outlabel 167 | scipy.io.savemat(os.path.join(os.getcwd(), 'result/Test_Outlabel'), test_ind) 168 | 169 | sum = 0.0 170 | Draw_Label = [] 171 | for k in range(0, int(num_total/100)): 172 | test_x = [All_Patches[i + k * 100] for i in range(0, 100)] 173 | test_y = [All_Labels[i + k * 100] for i in range(0, 100)] 174 | test_accuracy, out_label = sess.run([accuracy, output], 175 | feed_dict={x: test_x, y: test_y, training_flag: False}) 176 | Draw_Label.extend(out_label) 177 | sum += test_accuracy * 100 178 | test_x = [All_Patches[i] for i in range(int(num_total/100)*100, num_total)] 179 | test_y = [All_Labels[i] for i in range(int(num_total/100)*100, num_total)] 180 | test_accuracy, out_label = sess.run([accuracy, output], 181 | feed_dict={x: test_x, y: test_y, training_flag: False}) 182 | Draw_Label.extend(out_label) 183 | sum += test_accuracy * (num_total - int(num_total/100)*100) 184 | print("The Test Accuracy is :", sum / num_total) 185 | 186 | Draw_Label = np.array(Draw_Label) 187 | test_ind = {} 188 | test_ind['Draw_Label'] = Draw_Label 189 | scipy.io.savemat(os.path.join(os.getcwd(),'result/Draw_Label'), test_ind) 190 | 191 | 192 | 193 | def main(argv=None): 194 | train() 195 | 196 | 197 | 198 | if __name__ == '__main__': 199 | tf.app.run() 200 | -------------------------------------------------------------------------------- /Houston_dcnn.py: -------------------------------------------------------------------------------- 1 | # -- coding: utf-8 -- 2 | 3 | import scipy.io 4 | import tensorflow as tf 5 | from tensorflow.contrib import slim 6 | import numpy as np 7 | from data_Houston import patch_size, num_band 8 | import time 9 | import os 10 | import scipy.ndimage 11 | from deformable_conv import deformable_convolution 12 | 13 | # 神经网络参数 14 | num_classes = 15 15 | Train_Batch_Size = 150 16 | Learning_Rate_Base = 0.1 17 | Training_Steps = 1401 18 | 19 | 20 | def train(): 21 | Training_Data = scipy.io.loadmat(os.path.join(os.getcwd(), 'data/Training_Data.mat'))['Training_Data'] 22 | Testing_Data = scipy.io.loadmat(os.path.join(os.getcwd(), 'data/Testing_Data.mat'))['Testing_Data'] 23 | Training_Label = scipy.io.loadmat(os.path.join(os.getcwd(), 'data/Training_Label.mat'))['Training_Label'] 24 | Testing_Label = scipy.io.loadmat(os.path.join(os.getcwd(), 'data/Testing_Label.mat'))['Testing_Label'] 25 | All_Patches = scipy.io.loadmat(os.path.join(os.getcwd(), 'data/All_Patches.mat'))['All_Patches'] 26 | All_Labels = scipy.io.loadmat(os.path.join(os.getcwd(), 'data/All_Labels.mat'))['All_Labels'] 27 | 28 | num_train = Training_Data.shape[0] 29 | num_test = Testing_Data.shape[0] 30 | num_total = All_Patches.shape[0] 31 | 32 | 33 | x = tf.placeholder(tf.float32, [None, patch_size, patch_size, num_band], name='x_input') 34 | y = tf.placeholder(tf.float32, [None, num_classes], name='y_input') 35 | training_flag = tf.placeholder(tf.bool) 36 | 37 | # conv1 38 | weights1 = tf.get_variable("weigts1", shape=[3, 3, num_band, 96], 39 | dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(False)) 40 | conv1 = tf.nn.conv2d(x, weights1, strides=[1, 1, 1, 1], padding='SAME') 41 | conv1 = tf.layers.batch_normalization(conv1, training=training_flag) 42 | conv1 = tf.nn.relu(conv1) 43 | 44 | # conv2 45 | weights2 = tf.get_variable("weigts2", shape=[3, 3, 96, 96], 46 | dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(False)) 47 | conv2 = tf.nn.conv2d(conv1, weights2, strides=[1, 1, 1, 1], padding='SAME') 48 | conv2 = tf.layers.batch_normalization(conv2, training=training_flag) 49 | conv2 = tf.nn.relu(conv2) 50 | 51 | pool1 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') 52 | 53 | # conv3 54 | weights3 = tf.get_variable("weigts3", shape=[3, 3, 96, 108], 55 | dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(False)) 56 | conv3 = tf.nn.conv2d(pool1, weights3, strides=[1, 1, 1, 1], padding='SAME') 57 | conv3 = tf.layers.batch_normalization(conv3, training=training_flag) 58 | conv3 = tf.nn.relu(conv3) 59 | 60 | # conv4 61 | weights4 = tf.get_variable("weigts4", shape=[3, 3, 108, 108], 62 | dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(False)) 63 | conv4 = tf.nn.conv2d(conv3, weights4, strides=[1, 1, 1, 1], padding='SAME') 64 | conv4 = tf.layers.batch_normalization(conv4, training=training_flag) 65 | conv4 = tf.nn.relu(conv4) 66 | 67 | pool2 = tf.nn.max_pool(conv4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') 68 | 69 | # conv5 70 | weights5 = tf.get_variable("weigts5", shape=[3, 3, 108, 128], 71 | dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(False)) 72 | conv5 = tf.nn.conv2d(pool2, weights5, strides=[1, 1, 1, 1], padding='SAME') 73 | conv5 = tf.layers.batch_normalization(conv5, training=training_flag) 74 | conv5 = tf.nn.relu(conv5) 75 | 76 | weights_d6 = tf.Variable(tf.zeros([3, 3, 128, 256]) + 0.001, name="weights_d6") 77 | offset6 = tf.nn.conv2d(conv5, weights_d6, strides=[1, 1, 1, 1], padding='SAME') 78 | offset_img6 = deformable_convolution(conv5, offset6) 79 | 80 | # conv6 81 | weights6 = tf.get_variable("weigts6", shape=[3, 3, 128, 128], 82 | dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(False)) 83 | conv6 = tf.nn.conv2d(offset_img6, weights6, strides=[1, 1, 1, 1], padding='SAME') 84 | conv6 = tf.layers.batch_normalization(conv6, training=training_flag) 85 | conv6 = tf.nn.relu(conv6) 86 | 87 | net = slim.avg_pool2d(conv6, 7, padding='VALID') 88 | 89 | net = slim.flatten(net) 90 | 91 | # fc1 92 | weights7 = tf.get_variable("weigts7", shape=[128, 200], 93 | dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(False)) 94 | fc1 = tf.matmul(net, weights7) 95 | fc1 = tf.layers.batch_normalization(fc1, training=training_flag) 96 | fc1 = tf.nn.relu(fc1) 97 | 98 | # dropout 99 | net = slim.dropout(fc1, 0.5, is_training=training_flag) 100 | 101 | # fc2 102 | weights8 = tf.get_variable("weigts8", shape=[200, num_classes], 103 | dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(False)) 104 | biases8 = tf.get_variable("biases8", shape=[num_classes], 105 | dtype=tf.float32, initializer=tf.zeros_initializer()) 106 | pred = tf.matmul(net, weights8) + biases8 107 | 108 | output = tf.argmax(pred, 1) 109 | 110 | cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y) 111 | 112 | loss = tf.reduce_mean(cross_entropy) 113 | global_step = tf.Variable(0, trainable=False) 114 | learning_rate = tf.train.exponential_decay( 115 | Learning_Rate_Base, global_step, 116 | 700, 0.25) 117 | optimizer = tf.train.MomentumOptimizer(learning_rate, 0.9) 118 | 119 | update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) 120 | with tf.control_dependencies(update_ops): 121 | train_op = optimizer.minimize(loss, global_step=global_step) 122 | # Define accuracy 123 | correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) 124 | 125 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) 126 | 127 | init = tf.initialize_all_variables() 128 | 129 | saver = tf.train.Saver({'weights1': weights1, 'weights2': weights2, 'weights3': weights3, 130 | 'weights4': weights4, 'weights5': weights5, 'weights6': weights6, 131 | 'weights7': weights7, 'weights8': weights8, 'biases8': biases8}) 132 | 133 | with tf.Session() as sess: 134 | sess.run(init) 135 | saver.restore(sess, os.path.join(os.getcwd(), "model/cnn.ckpt")) 136 | 137 | for i in range(Training_Steps): 138 | start_time = time.time() 139 | idx = np.random.choice(num_train, size=Train_Batch_Size, replace=False) 140 | batch_x = Training_Data[idx, :] 141 | batch_y = Training_Label[idx, :] 142 | sess.run(train_op, feed_dict={x: batch_x, y: batch_y, training_flag: True}) 143 | 144 | # Display logs per epoch step 145 | if i % 100 == 0: 146 | batch_cost, train_acc = sess.run([loss, accuracy], 147 | feed_dict={x: batch_x, y: batch_y, training_flag: False}) 148 | duration = time.time() - start_time 149 | print("Steps", '%04d,' % i, "Loss=%.4f," % batch_cost, 150 | "Training Accuracy=%.4f" % train_acc, "time:%.4f s" % duration) 151 | if i == 1400: 152 | sum = 0.0 153 | test_outlabel = [] 154 | for k in range(0, int(num_test / 100)): 155 | test_x = [Testing_Data[i + k * 100] for i in range(0, 100)] 156 | test_y = [Testing_Label[i + k * 100] for i in range(0, 100)] 157 | test_accuracy, out_label = sess.run([accuracy, output], 158 | feed_dict={x: test_x, y: test_y, training_flag: False}) 159 | test_outlabel.extend(out_label) 160 | sum += test_accuracy * 100 161 | test_x = [Testing_Data[i] for i in range(int(num_test / 100) * 100, num_test)] 162 | test_y = [Testing_Label[i] for i in range(int(num_test / 100) * 100, num_test)] 163 | test_accuracy, out_label = sess.run([accuracy, output], 164 | feed_dict={x: test_x, y: test_y, training_flag: False}) 165 | test_outlabel.extend(out_label) 166 | sum += test_accuracy * (num_test - int(num_test / 100) * 100) 167 | print("The Test Accuracy is :", sum / num_test) 168 | 169 | test_outlabel = np.array(test_outlabel) 170 | test_ind = {} 171 | test_ind['Test_Outlabel'] = test_outlabel 172 | scipy.io.savemat(os.path.join(os.getcwd(), 'result/Test_Outlabel'), test_ind) 173 | 174 | sum = 0.0 175 | Draw_Label = [] 176 | for k in range(0, int(num_total / 100)): 177 | test_x = [All_Patches[i + k * 100] for i in range(0, 100)] 178 | test_y = [All_Labels[i + k * 100] for i in range(0, 100)] 179 | test_accuracy, out_label = sess.run([accuracy, output], 180 | feed_dict={x: test_x, y: test_y, training_flag: False}) 181 | Draw_Label.extend(out_label) 182 | sum += test_accuracy * 100 183 | test_x = [All_Patches[i] for i in range(int(num_total / 100) * 100, num_total)] 184 | test_y = [All_Labels[i] for i in range(int(num_total / 100) * 100, num_total)] 185 | test_accuracy, out_label = sess.run([accuracy, output], 186 | feed_dict={x: test_x, y: test_y, training_flag: False}) 187 | Draw_Label.extend(out_label) 188 | sum += test_accuracy * (num_total - int(num_total / 100) * 100) 189 | print("The Test Accuracy is :", sum / num_total) 190 | 191 | Draw_Label = np.array(Draw_Label) 192 | test_ind = {} 193 | test_ind['Draw_Label'] = Draw_Label 194 | scipy.io.savemat(os.path.join(os.getcwd(), 'result/Draw_Label'), test_ind) 195 | 196 | 197 | 198 | def main(argv=None): 199 | train() 200 | 201 | 202 | 203 | if __name__ == '__main__': 204 | tf.app.run() 205 | -------------------------------------------------------------------------------- /Houston_dhcnet.py: -------------------------------------------------------------------------------- 1 | # -- coding: utf-8 -- 2 | 3 | import scipy.io 4 | import tensorflow as tf 5 | from tensorflow.contrib import slim 6 | import numpy as np 7 | from data_Houston import patch_size, num_band 8 | import time 9 | import os 10 | import scipy.ndimage 11 | from deformable_conv import deformable_convolution 12 | 13 | # 神经网络参数 14 | num_classes = 15 15 | Train_Batch_Size = 150 16 | Learning_Rate_Base = 0.1 17 | Training_Steps = 1401 18 | 19 | 20 | def train(): 21 | Training_Data = scipy.io.loadmat(os.path.join(os.getcwd(), 'data/Training_Data.mat'))['Training_Data'] 22 | Testing_Data = scipy.io.loadmat(os.path.join(os.getcwd(), 'data/Testing_Data.mat'))['Testing_Data'] 23 | Training_Label = scipy.io.loadmat(os.path.join(os.getcwd(), 'data/Training_Label.mat'))['Training_Label'] 24 | Testing_Label = scipy.io.loadmat(os.path.join(os.getcwd(), 'data/Testing_Label.mat'))['Testing_Label'] 25 | All_Patches = scipy.io.loadmat(os.path.join(os.getcwd(), 'data/All_Patches.mat'))['All_Patches'] 26 | All_Labels = scipy.io.loadmat(os.path.join(os.getcwd(), 'data/All_Labels.mat'))['All_Labels'] 27 | 28 | num_train = Training_Data.shape[0] 29 | num_test = Testing_Data.shape[0] 30 | num_total = All_Patches.shape[0] 31 | 32 | x = tf.placeholder(tf.float32, [None, patch_size, patch_size, num_band], name='x_input') 33 | y = tf.placeholder(tf.float32, [None, num_classes], name='y_input') 34 | training_flag = tf.placeholder(tf.bool) 35 | 36 | # conv1 37 | weights1 = tf.get_variable("weigts1", shape=[3, 3, num_band, 96], 38 | dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(False)) 39 | conv1 = tf.nn.conv2d(x, weights1, strides=[1, 1, 1, 1], padding='SAME') 40 | conv1 = tf.layers.batch_normalization(conv1, training=training_flag) 41 | conv1 = tf.nn.relu(conv1) 42 | 43 | # conv2 44 | weights2 = tf.get_variable("weigts2", shape=[3, 3, 96, 96], 45 | dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(False)) 46 | conv2 = tf.nn.conv2d(conv1, weights2, strides=[1, 1, 1, 1], padding='SAME') 47 | conv2 = tf.layers.batch_normalization(conv2, training=training_flag) 48 | conv2 = tf.nn.relu(conv2) 49 | 50 | pool1 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') 51 | 52 | # conv3 53 | weights3 = tf.get_variable("weigts3", shape=[3, 3, 96, 108], 54 | dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(False)) 55 | conv3 = tf.nn.conv2d(pool1, weights3, strides=[1, 1, 1, 1], padding='SAME') 56 | conv3 = tf.layers.batch_normalization(conv3, training=training_flag) 57 | conv3 = tf.nn.relu(conv3) 58 | 59 | # conv4 60 | weights4 = tf.get_variable("weigts4", shape=[3, 3, 108, 108], 61 | dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(False)) 62 | conv4 = tf.nn.conv2d(conv3, weights4, strides=[1, 1, 1, 1], padding='SAME') 63 | conv4 = tf.layers.batch_normalization(conv4, training=training_flag) 64 | conv4 = tf.nn.relu(conv4) 65 | 66 | weights_d4 = tf.Variable(tf.zeros([3, 3, 108, 216]) + 0.001, name="weights_d4") 67 | offset4 = tf.nn.conv2d(conv4, weights_d4, strides=[1, 1, 1, 1], padding='SAME') 68 | offset_img4 = deformable_convolution(conv4, offset4) 69 | 70 | # conv_pool 71 | weights_pool = tf.get_variable("weigts_pool", shape=[2, 2, 108, 108], 72 | dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(False)) 73 | conv_pool = tf.nn.conv2d(offset_img4, weights_pool, strides=[1, 2, 2, 1], padding='SAME') 74 | conv_pool = tf.layers.batch_normalization(conv_pool, training=training_flag) 75 | conv_pool = tf.nn.relu(conv_pool) 76 | 77 | # conv5 78 | weights5 = tf.get_variable("weigts5", shape=[3, 3, 108, 128], 79 | dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(False)) 80 | conv5 = tf.nn.conv2d(conv_pool, weights5, strides=[1, 1, 1, 1], padding='SAME') 81 | conv5 = tf.layers.batch_normalization(conv5, training=training_flag) 82 | conv5 = tf.nn.relu(conv5) 83 | 84 | weights_d6 = tf.Variable(tf.zeros([3, 3, 128, 256]) + 0.001, name="weights_d6") 85 | offset6 = tf.nn.conv2d(conv5, weights_d6, strides=[1, 1, 1, 1], padding='SAME') 86 | offset_img6 = deformable_convolution(conv5, offset6) 87 | 88 | # conv6 89 | weights6 = tf.get_variable("weigts6", shape=[3, 3, 128, 128], 90 | dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(False)) 91 | conv6 = tf.nn.conv2d(offset_img6, weights6, strides=[1, 1, 1, 1], padding='SAME') 92 | conv6 = tf.layers.batch_normalization(conv6, training=training_flag) 93 | conv6 = tf.nn.relu(conv6) 94 | 95 | net = slim.avg_pool2d(conv6, 7, padding='VALID') 96 | 97 | net = slim.flatten(net) 98 | 99 | # fc1 100 | weights7 = tf.get_variable("weigts7", shape=[128, 200], 101 | dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(False)) 102 | fc1 = tf.matmul(net, weights7) 103 | fc1 = tf.layers.batch_normalization(fc1, training=training_flag) 104 | fc1 = tf.nn.relu(fc1) 105 | 106 | # dropout 107 | net = slim.dropout(fc1, 0.5, is_training=training_flag) 108 | 109 | # fc2 110 | weights8 = tf.get_variable("weigts8", shape=[200, num_classes], 111 | dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(False)) 112 | biases8 = tf.get_variable("biases8", shape=[num_classes], 113 | dtype=tf.float32, initializer=tf.zeros_initializer()) 114 | pred = tf.matmul(net, weights8) + biases8 115 | 116 | 117 | output = tf.argmax(pred, 1) 118 | 119 | # Define loss and optimizer 120 | cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y) 121 | 122 | loss = tf.reduce_mean(cross_entropy) 123 | global_step = tf.Variable(0, trainable=False) 124 | learning_rate = tf.train.exponential_decay( 125 | Learning_Rate_Base, global_step, 126 | 700, 0.25) 127 | optimizer = tf.train.MomentumOptimizer(learning_rate, 0.9) 128 | 129 | update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) 130 | with tf.control_dependencies(update_ops): 131 | train_op = optimizer.minimize(loss, global_step=global_step) 132 | # Define accuracy 133 | correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) 134 | 135 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) 136 | 137 | init = tf.initialize_all_variables() 138 | 139 | saver = tf.train.Saver({'weights1': weights1, 'weights2': weights2, 'weights3': weights3, 140 | 'weights4': weights4, 'weights5': weights5, 'weights6': weights6, 141 | 'weights7': weights7, 'weights8': weights8, 'biases8': biases8}) 142 | 143 | 144 | with tf.Session() as sess: 145 | sess.run(init) 146 | saver.restore(sess, os.path.join(os.getcwd(), "model/cnn.ckpt")) 147 | 148 | for i in range(Training_Steps): 149 | start_time = time.time() 150 | idx = np.random.choice(num_train, size=Train_Batch_Size, replace=False) 151 | batch_x = Training_Data[idx, :] 152 | batch_y = Training_Label[idx, :] 153 | sess.run(train_op, feed_dict={x: batch_x, y: batch_y, training_flag: True}) 154 | 155 | # Display logs per epoch step 156 | if i % 100 == 0: 157 | batch_cost, train_acc = sess.run([loss, accuracy], 158 | feed_dict={x: batch_x, y: batch_y, training_flag: False}) 159 | duration = time.time() - start_time 160 | print("Steps", '%04d,' % i, "Loss=%.4f," % batch_cost, 161 | "Training Accuracy=%.4f" % train_acc, "time:%.4f s" % duration) 162 | if i == 1400: 163 | sum = 0.0 164 | test_outlabel = [] 165 | for k in range(0, int(num_test / 100)): 166 | test_x = [Testing_Data[i + k * 100] for i in range(0, 100)] 167 | test_y = [Testing_Label[i + k * 100] for i in range(0, 100)] 168 | test_accuracy, out_label = sess.run([accuracy, output], 169 | feed_dict={x: test_x, y: test_y, training_flag: False}) 170 | test_outlabel.extend(out_label) 171 | sum += test_accuracy * 100 172 | test_x = [Testing_Data[i] for i in range(int(num_test / 100) * 100, num_test)] 173 | test_y = [Testing_Label[i] for i in range(int(num_test / 100) * 100, num_test)] 174 | test_accuracy, out_label = sess.run([accuracy, output], 175 | feed_dict={x: test_x, y: test_y, training_flag: False}) 176 | test_outlabel.extend(out_label) 177 | sum += test_accuracy * (num_test - int(num_test / 100) * 100) 178 | print("The Test Accuracy is :", sum / num_test) 179 | 180 | test_outlabel = np.array(test_outlabel) 181 | test_ind = {} 182 | test_ind['Test_Outlabel'] = test_outlabel 183 | scipy.io.savemat(os.path.join(os.getcwd(), 'result/Test_Outlabel'), test_ind) 184 | 185 | sum = 0.0 186 | Draw_Label = [] 187 | for k in range(0, int(num_total / 100)): 188 | test_x = [All_Patches[i + k * 100] for i in range(0, 100)] 189 | test_y = [All_Labels[i + k * 100] for i in range(0, 100)] 190 | test_accuracy, out_label = sess.run([accuracy, output], 191 | feed_dict={x: test_x, y: test_y, training_flag: False}) 192 | Draw_Label.extend(out_label) 193 | sum += test_accuracy * 100 194 | test_x = [All_Patches[i] for i in range(int(num_total / 100) * 100, num_total)] 195 | test_y = [All_Labels[i] for i in range(int(num_total / 100) * 100, num_total)] 196 | test_accuracy, out_label = sess.run([accuracy, output], 197 | feed_dict={x: test_x, y: test_y, training_flag: False}) 198 | Draw_Label.extend(out_label) 199 | sum += test_accuracy * (num_total - int(num_total / 100) * 100) 200 | print("The Test Accuracy is :", sum / num_total) 201 | 202 | Draw_Label = np.array(Draw_Label) 203 | test_ind = {} 204 | test_ind['Draw_Label'] = Draw_Label 205 | scipy.io.savemat(os.path.join(os.getcwd(), 'result/Draw_Label'), test_ind) 206 | 207 | 208 | 209 | def main(argv=None): 210 | train() 211 | 212 | 213 | 214 | if __name__ == '__main__': 215 | tf.app.run() 216 | -------------------------------------------------------------------------------- /Houston_generate.py: -------------------------------------------------------------------------------- 1 | # -- coding: utf-8 -- 2 | 3 | import scipy.io 4 | import tensorflow as tf 5 | from tensorflow.contrib import slim 6 | import numpy as np 7 | from data_Houston import patch_size, prepare_data, num_band 8 | import time 9 | import os 10 | import scipy.ndimage 11 | 12 | 13 | # 神经网络参数 14 | num_classes = 15 15 | Train_Batch_Size = 200 16 | Learning_Rate_Base = 0.1 17 | Training_Steps = 1401 18 | 19 | 20 | def train(): 21 | Training_Data, Testing_Data, Training_Label, Testing_Label, All_Patches, All_Labels = prepare_data() 22 | test_ind = {} 23 | test_ind['Training_Data'] = Training_Data 24 | scipy.io.savemat(os.path.join(os.getcwd(), 'data/Training_Data'), test_ind) 25 | test_ind = {} 26 | test_ind['Testing_Data'] = Testing_Data 27 | scipy.io.savemat(os.path.join(os.getcwd(), 'data/Testing_Data'), test_ind) 28 | test_ind = {} 29 | test_ind['Training_Label'] = Training_Label 30 | scipy.io.savemat(os.path.join(os.getcwd(), 'data/Training_Label'), test_ind) 31 | test_ind = {} 32 | test_ind['Testing_Label'] = Testing_Label 33 | scipy.io.savemat(os.path.join(os.getcwd(), 'data/Testing_Label'), test_ind) 34 | test_ind = {} 35 | test_ind['All_Patches'] = All_Patches 36 | scipy.io.savemat(os.path.join(os.getcwd(), 'data/All_Patches'), test_ind) 37 | test_ind = {} 38 | test_ind['All_Labels'] = All_Labels 39 | scipy.io.savemat(os.path.join(os.getcwd(),'data/All_Labels'), test_ind) 40 | 41 | 42 | num_train = Training_Data.shape[0] 43 | num_test = Testing_Data.shape[0] 44 | num_total = All_Patches.shape[0] 45 | x = tf.placeholder(tf.float32, [None, patch_size, patch_size, num_band], name='x_input') 46 | y = tf.placeholder(tf.float32, [None, num_classes], name='y_input') 47 | training_flag = tf.placeholder(tf.bool) 48 | 49 | # conv1 50 | weights1 = tf.get_variable("weigts1", shape=[3, 3, num_band, 96], 51 | dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(False)) 52 | conv1 = tf.nn.conv2d(x, weights1, strides=[1, 1, 1, 1], padding='SAME') 53 | conv1 = tf.layers.batch_normalization(conv1, training=training_flag) 54 | conv1 = tf.nn.relu(conv1) 55 | 56 | # conv2 57 | weights2 = tf.get_variable("weigts2", shape=[3, 3, 96, 96], 58 | dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(False)) 59 | conv2 = tf.nn.conv2d(conv1, weights2, strides=[1, 1, 1, 1], padding='SAME') 60 | conv2 = tf.layers.batch_normalization(conv2, training=training_flag) 61 | conv2 = tf.nn.relu(conv2) 62 | 63 | pool1 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') 64 | 65 | # conv3 66 | weights3 = tf.get_variable("weigts3", shape=[3, 3, 96, 108], 67 | dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(False)) 68 | conv3 = tf.nn.conv2d(pool1, weights3, strides=[1, 1, 1, 1], padding='SAME') 69 | conv3 = tf.layers.batch_normalization(conv3, training=training_flag) 70 | conv3 = tf.nn.relu(conv3) 71 | 72 | # conv4 73 | weights4 = tf.get_variable("weigts4", shape=[3, 3, 108, 108], 74 | dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(False)) 75 | conv4 = tf.nn.conv2d(conv3, weights4, strides=[1, 1, 1, 1], padding='SAME') 76 | conv4 = tf.layers.batch_normalization(conv4, training=training_flag) 77 | conv4 = tf.nn.relu(conv4) 78 | 79 | pool2 = tf.nn.max_pool(conv4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') 80 | 81 | # conv5 82 | weights5 = tf.get_variable("weigts5", shape=[3, 3, 108, 128], 83 | dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(False)) 84 | conv5 = tf.nn.conv2d(pool2, weights5, strides=[1, 1, 1, 1], padding='SAME') 85 | conv5 = tf.layers.batch_normalization(conv5, training=training_flag) 86 | conv5 = tf.nn.relu(conv5) 87 | 88 | # conv6 89 | weights6 = tf.get_variable("weigts6", shape=[3, 3, 128, 128], 90 | dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(False)) 91 | conv6 = tf.nn.conv2d(conv5, weights6, strides=[1, 1, 1, 1], padding='SAME') 92 | conv6 = tf.layers.batch_normalization(conv6, training=training_flag) 93 | conv6 = tf.nn.relu(conv6) 94 | 95 | net = slim.avg_pool2d(conv6, 7, padding='VALID') 96 | 97 | net = slim.flatten(net) 98 | 99 | # fc1 100 | weights7 = tf.get_variable("weigts7", shape=[128, 200], 101 | dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(False)) 102 | fc1 = tf.matmul(net, weights7) 103 | fc1 = tf.layers.batch_normalization(fc1, training=training_flag) 104 | fc1 = tf.nn.relu(fc1) 105 | 106 | # dropout 107 | net = slim.dropout(fc1, 0.5, is_training=training_flag) 108 | 109 | # fc2 110 | weights8 = tf.get_variable("weigts8", shape=[200, num_classes], 111 | dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(False)) 112 | biases8 = tf.get_variable("biases8", shape=[num_classes], 113 | dtype=tf.float32, initializer=tf.zeros_initializer()) 114 | pred = tf.matmul(net, weights8) + biases8 115 | 116 | output = tf.argmax(pred, 1) 117 | 118 | cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y) 119 | 120 | loss = tf.reduce_mean(cross_entropy) 121 | global_step = tf.Variable(0, trainable=False) 122 | learning_rate = tf.train.exponential_decay( 123 | Learning_Rate_Base, global_step, 124 | 700, 0.25) 125 | optimizer = tf.train.MomentumOptimizer(learning_rate, 0.9) 126 | 127 | update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) 128 | with tf.control_dependencies(update_ops): 129 | train_op = optimizer.minimize(loss, global_step=global_step) 130 | # Define accuracy 131 | correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) 132 | 133 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) 134 | 135 | init = tf.initialize_all_variables() 136 | 137 | saver = tf.train.Saver({'weights1':weights1,'weights2':weights2,'weights3':weights3, 138 | 'weights4': weights4,'weights5': weights5,'weights6': weights6, 139 | 'weights7': weights7,'weights8': weights8,'biases8': biases8}) 140 | 141 | 142 | with tf.Session() as sess: 143 | sess.run(init) 144 | saver.save(sess, os.path.join(os.getcwd(), "model/cnn.ckpt")) 145 | print("save ok") 146 | for i in range(Training_Steps): 147 | idx = np.random.choice(num_train, size=Train_Batch_Size, replace=False) 148 | batch_x = Training_Data[idx, :] 149 | batch_y = Training_Label[idx, :] 150 | sess.run(train_op, feed_dict={x: batch_x, y: batch_y, training_flag: True}) 151 | 152 | 153 | def main(argv=None): 154 | train() 155 | 156 | 157 | if __name__ == '__main__': 158 | tf.app.run() 159 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DHCNet 2 | The Tensorflow implement of the DHCNet. 3 | Our paper has been published on IEEE Xplore. If you find it is useful, please kindly cite this paper "J. Zhu, L. Fang and P. Ghamisi, "Deformable Convolutional Neural Networks for Hyperspectral Image Classification," in IEEE Geoscience and Remote Sensing Letters, vol. 15, no. 8, pp. 1254-1258, Aug. 2018." 4 | -------------------------------------------------------------------------------- /data_Houston.py: -------------------------------------------------------------------------------- 1 | # -- coding: utf-8 -- 2 | 3 | import scipy.io 4 | import numpy as np 5 | import random 6 | from random import shuffle 7 | import os 8 | from skimage.util import pad 9 | import scipy.ndimage 10 | import math 11 | import pandas as pd 12 | 13 | 14 | patch_size = 25 15 | num_band = 3 16 | 17 | def prepare_data(): 18 | Data = scipy.io.loadmat(os.path.join(os.getcwd(), 'data/Houston_pca.mat'))['Houston_pca'] 19 | Label = scipy.io.loadmat(os.path.join(os.getcwd(), 'data/Houston_gt.mat'))['Houston_gt'] 20 | 21 | # 获取数据size 22 | Height, Width, Band = Data.shape[0], Data.shape[1], Data.shape[2] 23 | # 获取分类数,np.unique()将返回一个不同元素从小到大排列的列表,类型0为未分类 24 | Num_Classes = len(np.unique(Label))-1 25 | 26 | # 归一化数据 27 | Data = Data.astype(float) 28 | for band in range(Band): 29 | Data[:, :, band] = (Data[:, :, band]-np.min(Data[:, :, band]))/(np.max(Data[:, :, band])-np.min(Data[:, :, band])) 30 | 31 | # 提前填充数据,在Height和Width上各添加patch_size(utils.py定义)-1个0像素 32 | Data_Padding = np.zeros((Height+int(patch_size-1), Width+int(patch_size-1), Band)) 33 | for band in range(Band): 34 | # 中间填充数据 35 | Data_Padding[:, :, band] = pad(Data[:, :, band], int((patch_size-1)/2), 'symmetric') 36 | 37 | 38 | def Patch(height_index, width_index): 39 | """ function to extract patches from the orignal data """ 40 | # 将数据切片,大小为patch_size*patch_size 41 | height_slice = slice(height_index, height_index + patch_size) 42 | width_slice = slice(width_index, width_index + patch_size) 43 | patch = Data_Padding[height_slice, width_slice, :] 44 | return np.array(patch) 45 | 46 | # 创建分类和分类索引号列表,列表内创建列表[[],[]...] 47 | Classes= [] 48 | for k in range(Num_Classes): 49 | Classes.append([]) 50 | 51 | All_Patches, All_Labels =[], [] 52 | for j in range(0, Width): 53 | for i in range(0, Height): 54 | curr_patch = Patch(i, j) 55 | curr_label = Label[i, j] 56 | if(curr_label!=0): 57 | Classes[curr_label - 1].append(curr_patch) 58 | All_Patches.append(curr_patch) 59 | All_Labels.append(curr_label-1) 60 | 61 | # 每个分类的数目 62 | Num_Each_Class=[] 63 | for k in range(Num_Classes): 64 | Num_Each_Class.append(len(Classes[k])) 65 | 66 | 67 | # 数据分离 68 | def DataDivide(Classes_k1, Num_Train_Each_Class_k): 69 | """ function to divide collected patches into training and test patches """ 70 | # np.random.choice()从指定数组中生成指定个元素的数组,replace=True则生成的数可以重复 71 | idx = np.random.choice(len(Classes_k1), Num_Train_Each_Class_k, replace=False) 72 | train_patch = [Classes_k1[i] for i in idx] 73 | # 集合做差 74 | idx_test = np.setdiff1d(range(len(Classes_k1)),idx) 75 | test_patch = [Classes_k1[i] for i in idx_test] 76 | return train_patch, test_patch 77 | 78 | # 制作训练集和测试集 79 | Num_Train_Each_Class = [50]*Num_Classes 80 | 81 | Num_Test_Each_Class = list(np.array(Num_Each_Class) - np.array(Num_Train_Each_Class)) 82 | Train_Patch, Train_Label, Test_Patch, Test_Label = [], [], [], [] 83 | 84 | for k in range(Num_Classes): 85 | # 生成训练集,数据集 86 | train_patch, test_patch = DataDivide(Classes[k], Num_Train_Each_Class[k]) 87 | #Make training and test splits 88 | Train_Patch.append(train_patch) # patches_of_current_class[:-test_split_size] 89 | Test_Patch.extend(test_patch) # patches_of_current_class[-test_split_size:] 90 | # np.full()创建常数组 91 | Test_Label.extend(np.full(Num_Test_Each_Class[k], k, dtype=int)) 92 | 93 | Train_Label = [] 94 | for k in range(Num_Classes): 95 | Train_Label.append([k]*Num_Train_Each_Class[k]) 96 | 97 | 98 | OS_Aug_Num_Training_Each = [] 99 | for k in range(Num_Classes): 100 | OS_Aug_Num_Training_Each.append(len(Train_Label[k])) 101 | 102 | Temp1, Temp2 = [], [] 103 | for k in range(Num_Classes): 104 | Temp1.extend(Train_Patch[k]) 105 | Temp2.extend(Train_Label[k]) 106 | Train_Patch = Temp1 107 | Train_Label = Temp2 108 | 109 | def convertToOneHot(vector, num_classes=None): 110 | """ 111 | Converts an input 1-D vector of integers into an output 112 | 2-D array of one-hot vectors, where an i'th input value 113 | of j will set a '1' in the i'th row, j'th column of the 114 | output array. 115 | 116 | Example: 117 | v = np.array((1, 0, 4)) 118 | one_hot_v = convertToOneHot(v) 119 | print one_hot_v 120 | 121 | [[0 1 0 0 0] 122 | [1 0 0 0 0] 123 | [0 0 0 0 1]] 124 | """ 125 | 126 | assert isinstance(vector, np.ndarray) 127 | assert len(vector) > 0 128 | 129 | if num_classes is None: 130 | num_classes = np.max(vector)+1 131 | else: 132 | assert num_classes > 0 133 | assert num_classes >= np.max(vector) 134 | 135 | result = np.zeros(shape=(len(vector), num_classes)) 136 | result[np.arange(len(vector)), vector] = 1 137 | return result.astype(int) 138 | 139 | # Convert the labels to One-Hot vector 140 | # onehot编码 141 | Train_Patch = np.array(Train_Patch) 142 | Test_Patch = np.array(Test_Patch) 143 | Train_Label = np.array(Train_Label) 144 | Test_Label = np.array(Test_Label) 145 | All_Patches = np.array(All_Patches) 146 | All_Labels = np.array(All_Labels) 147 | 148 | test_ind = {} 149 | test_ind['TestLabel'] = Test_Label 150 | scipy.io.savemat(os.path.join(os.getcwd(), 'result/TestLabel.mat'), test_ind) 151 | 152 | Train_Label = convertToOneHot(Train_Label,num_classes=Num_Classes) 153 | Test_Label = convertToOneHot(Test_Label,num_classes=Num_Classes) 154 | All_Labels = convertToOneHot(All_Labels, num_classes=Num_Classes) 155 | 156 | # Data Summary 157 | df = pd.DataFrame(np.random.randn(Num_Classes, 4), 158 | columns=['Total', 'Training', 'OS&Aug', 'Testing']) 159 | df['Total'] = Num_Each_Class 160 | df['Training'] = Num_Train_Each_Class 161 | df['OS&Aug'] = OS_Aug_Num_Training_Each 162 | df['Testing'] = Num_Test_Each_Class 163 | num_train = len(Train_Patch) 164 | num_test = len(Test_Patch) 165 | print("=======================================================================") 166 | print("Data Summary") 167 | print("=======================================================================") 168 | print('The size of the original HSI data is (%d,%d,%d)' % (Height, Width, Band)) 169 | print('The size of Training data is (%d)' % (num_train)) 170 | print('The size of Test data is (%d)' % (num_test)) 171 | print('The size of each sample is (%d,%d,%d)' % (patch_size, patch_size, Band)) 172 | print('-----------------------------------------------------------------------') 173 | print("The Data Division is") 174 | print(df) 175 | return Train_Patch, Test_Patch, Train_Label, Test_Label, All_Patches, All_Labels 176 | 177 | 178 | 179 | 180 | 181 | -------------------------------------------------------------------------------- /deformable_conv.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division 2 | 3 | 4 | import tensorflow as tf 5 | 6 | 7 | def tf_flatten(a): 8 | """Flatten tensor""" 9 | return tf.reshape(a, [-1]) 10 | 11 | 12 | def tf_repeat(a, repeats, axis=0): 13 | """TensorFlow version of np.repeat for 1D""" 14 | # https://github.com/tensorflow/tensorflow/issues/8521 15 | assert len(a.get_shape()) == 1 16 | 17 | a = tf.expand_dims(a, -1) 18 | a = tf.tile(a, [1, repeats]) 19 | a = tf_flatten(a) 20 | return a 21 | 22 | 23 | def tf_repeat_2d(a, repeats): 24 | """Tensorflow version of np.repeat for 2D""" 25 | 26 | assert len(a.get_shape()) == 2 27 | a = tf.expand_dims(a, 0) 28 | a = tf.tile(a, [repeats, 1, 1]) 29 | return a 30 | 31 | 32 | def tf_batch_map_coordinates(input, coords, order=1): 33 | """Batch version of tf_map_coordinates 34 | 35 | Only supports 2D feature maps 36 | 37 | Parameters 38 | ---------- 39 | input : tf.Tensor. shape = (b, s, s) 40 | coords : tf.Tensor. shape = (b, n_points, 2) 41 | 42 | Returns 43 | ------- 44 | tf.Tensor. shape = (b, s, s) 45 | """ 46 | 47 | input_shape = tf.shape(input) 48 | batch_size = input_shape[0] 49 | input_size = input_shape[1] 50 | n_coords = tf.shape(coords)[1] 51 | 52 | coords = tf.clip_by_value(coords, 0, tf.cast(input_size, 'float32') - 1) 53 | coords_lt = tf.cast(tf.floor(coords), 'int32') 54 | coords_rb = tf.cast(tf.ceil(coords), 'int32') 55 | coords_lb = tf.stack([coords_lt[..., 0], coords_rb[..., 1]], axis=-1) 56 | coords_rt = tf.stack([coords_rb[..., 0], coords_lt[..., 1]], axis=-1) 57 | 58 | idx = tf_repeat(tf.range(batch_size), n_coords) 59 | 60 | def _get_vals_by_coords(input, coords): 61 | indices = tf.stack([ 62 | idx, tf_flatten(coords[..., 0]), tf_flatten(coords[..., 1]) 63 | ], axis=-1) 64 | vals = tf.gather_nd(input, indices) 65 | vals = tf.reshape(vals, (batch_size, n_coords)) 66 | return vals 67 | 68 | vals_lt = _get_vals_by_coords(input, coords_lt) 69 | vals_rb = _get_vals_by_coords(input, coords_rb) 70 | vals_lb = _get_vals_by_coords(input, coords_lb) 71 | vals_rt = _get_vals_by_coords(input, coords_rt) 72 | 73 | coords_offset_lt = coords - tf.cast(coords_lt, 'float32') 74 | vals_t = vals_lt + (vals_rt - vals_lt) * coords_offset_lt[..., 0] 75 | vals_b = vals_lb + (vals_rb - vals_lb) * coords_offset_lt[..., 0] 76 | mapped_vals = vals_t + (vals_b - vals_t) * coords_offset_lt[..., 1] 77 | 78 | return mapped_vals 79 | 80 | 81 | def tf_batch_map_offsets(input, offsets, order=1): 82 | """Batch map offsets into input 83 | 84 | Parameters 85 | --------- 86 | input : tf.Tensor. shape = (b, s, s) 87 | offsets: tf.Tensor. shape = (b, s, s, 2) 88 | 89 | Returns 90 | ------- 91 | tf.Tensor. shape = (b, s, s) 92 | """ 93 | 94 | input_shape = tf.shape(input) 95 | batch_size = input_shape[0] 96 | input_size = input_shape[1] 97 | 98 | offsets = tf.reshape(offsets, (batch_size, -1, 2)) 99 | grid = tf.meshgrid( 100 | tf.range(input_size), tf.range(input_size), indexing='ij' 101 | ) 102 | grid = tf.stack(grid, axis=-1) 103 | grid = tf.cast(grid, 'float32') 104 | grid = tf.reshape(grid, (-1, 2)) 105 | grid = tf_repeat_2d(grid, batch_size) 106 | coords = offsets + grid 107 | 108 | mapped_vals = tf_batch_map_coordinates(input, coords) 109 | return mapped_vals 110 | 111 | 112 | def to_bc_h_w_2(x, x_shape): 113 | """(b, h, w, 2c) -> (b*c, h, w, 2)""" 114 | x = tf.transpose(x, [0, 3, 1, 2]) 115 | x = tf.reshape(x, (-1, int(x_shape[1]), int(x_shape[2]), 2)) 116 | return x 117 | 118 | 119 | def to_bc_h_w(x, x_shape): 120 | """(b, h, w, c) -> (b*c, h, w)""" 121 | x = tf.transpose(x, [0, 3, 1, 2]) 122 | x = tf.reshape(x, (-1, int(x_shape[1]), int(x_shape[2]))) 123 | return x 124 | 125 | 126 | def to_b_h_w_c(x, x_shape): 127 | """(b*c, h, w) -> (b, h, w, c)""" 128 | x = tf.reshape( 129 | x, (-1, int(x_shape[3]), int(x_shape[1]), int(x_shape[2])) 130 | ) 131 | x = tf.transpose(x, [0, 2, 3, 1]) 132 | return x 133 | 134 | 135 | def deformable_convolution(x, offsets): 136 | """Return the deformed featured map""" 137 | x_shape = x.get_shape() 138 | 139 | # offsets: (b*c, h, w, 2) 140 | offsets = to_bc_h_w_2(offsets, x_shape) 141 | 142 | # x: (b*c, h, w) 143 | x = to_bc_h_w(x, x_shape) 144 | 145 | # X_offset: (b*c, h, w) 146 | x_offset = tf_batch_map_offsets(x, offsets) 147 | 148 | # x_offset: (b, h, w, c) 149 | x_offset = to_b_h_w_c(x_offset, x_shape) 150 | 151 | return x_offset --------------------------------------------------------------------------------