├── BuildSPInst_A.py ├── CNNLayer.py ├── GGeModel.py ├── LICENSE ├── LoadData.py ├── README.md ├── data ├── IP_gt.mat ├── IP_gyh.mat ├── tepos.mat ├── trpos.mat └── useful_sp_lab.mat ├── funcCNN.py └── trainGCN.py /BuildSPInst_A.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from LoadData import * 3 | import numpy.matlib 4 | 5 | class GetInst_A(object): 6 | def __init__(self, useful_sp_lab, img3d, gt, trpos, tepos): 7 | self.useful_sp_lab = useful_sp_lab 8 | self.img3d = img3d 9 | [self.r, self.c, self.l] = np.shape(img3d) 10 | self.num_classes = int(np.max(gt)) 11 | self.img2d = np.reshape(img3d,[self.r*self.c, self.l]) 12 | self.sp_num = np.array(np.max(self.useful_sp_lab), dtype='int') 13 | gt = np.array(gt, dtype='int') 14 | self.gt1d = np.reshape(gt, [self.r*self.c]) 15 | self.gt_tr = np.array(np.zeros([self.r*self.c]), dtype='int') 16 | self.gt_te = self.gt1d 17 | self.trpos2d = np.array(trpos, dtype='int') 18 | self.trpos = (self.trpos2d[:,0]-1)*self.c+self.trpos2d[:,1]-1 19 | self.tepos2d = np.array(tepos, dtype='int') 20 | self.tepos = (self.tepos2d[:,0]-1)*self.c+self.tepos2d[:,1]-1 21 | self.tepos2d = self.tepos2d-1 22 | self.trpos2d = self.trpos2d-1 23 | 24 | 25 | 26 | self.sp_mean = np.zeros([self.sp_num, self.l]) 27 | self.sp_center_px = np.zeros([self.sp_num, self.l]) 28 | self.sp_label = np.zeros([self.sp_num]) 29 | self.sp_label_sp = [] 30 | self.ideal_sp_mat01 = np.zeros([self.sp_num, self.sp_num]) 31 | self.trmask_sp = np.zeros([self.sp_num]) 32 | self.temask_sp = np.ones([self.sp_num]) 33 | self.sp_nei = [] 34 | self.sp_nei_includeself = [] 35 | self.sp_label_vec = [] 36 | self.sp_A = [] 37 | self.sp_A_notSym = [] 38 | self.support = [] 39 | self.px_nei_sp = [] 40 | self.px_sp_01 = np.zeros([np.shape(self.trpos2d)[0]+np.shape(self.tepos2d)[0], self.sp_num]) 41 | self.px_sp_A = np.zeros([np.shape(self.trpos2d)[0]+np.shape(self.tepos2d)[0], self.sp_num]) 42 | 43 | self.Q = np.zeros([len(np.argwhere(gt>0)), self.sp_num]) 44 | self.CalSpMean() 45 | self.CalSpNei() 46 | self.CalSpA(scale = 1) 47 | self.ReprojectionQ() 48 | self.PxSpNei() 49 | 50 | 51 | def CalSpMean(self): 52 | self.gt_tr[self.trpos] = self.gt1d[self.trpos] 53 | mark_mat = np.zeros([self.r*self.c]) 54 | mark_mat[self.trpos] = -1 55 | for sp_idx in range(1, self.sp_num+1): 56 | region_pos_2d = np.argwhere(self.useful_sp_lab == sp_idx) 57 | region_pos_1d = region_pos_2d[:, 0]*self.c + region_pos_2d[:, 1] 58 | px_num = np.shape(region_pos_2d)[0] 59 | if np.sum(mark_mat[region_pos_1d])<0: 60 | self.trmask_sp[sp_idx-1] = 1 61 | self.temask_sp[sp_idx-1] = 0 62 | region_fea = self.img2d[region_pos_1d, :] 63 | if self.trmask_sp[sp_idx-1] == 1: 64 | region_labels = self.gt_tr[region_pos_1d] 65 | else: 66 | region_labels = self.gt_te[region_pos_1d] 67 | self.sp_label[sp_idx-1] = np.argmax(np.delete(np.bincount(region_labels), 0))+1 68 | region_pos_idx = np.argwhere(region_labels == self.sp_label[sp_idx-1]) 69 | pos1 = region_pos_1d[region_pos_idx] 70 | self.sp_rps = np.mean(self.img2d[pos1, :], axis = 0) 71 | vj = np.sum(np.power(np.matlib.repmat(self.sp_rps, px_num, 1)-region_fea, 2), axis=1) 72 | vj= np.exp(-0.2*vj) 73 | self.sp_mean[sp_idx-1, :] = np.sum(np.reshape(vj, [np.size(vj), 1])*region_fea, axis=0)/np.sum(vj) 74 | self.sp_label_sp = self.sp_label 75 | x1 = np.array([i for i in range (self.sp_num)],dtype='int') 76 | self.ideal_sp_mat01[x1, np.array(self.sp_label_sp-1, dtype = 'int')] = 1 77 | 78 | 79 | 80 | te_pos1 = np.argwhere(self.trmask_sp==0) 81 | self.ideal_sp_mat01[te_pos1, :] = 0 82 | self.ideal_sp_mat01[:, te_pos1] = 0 83 | 84 | 85 | 86 | 87 | sp_label_mat = np.zeros([self.sp_num, self.num_classes]) 88 | for row_idx in range(np.shape(self.sp_label)[0]): 89 | col_idx = int(self.sp_label[row_idx])-1 90 | sp_label_mat[row_idx, col_idx] = 1 91 | self.sp_label_vec = self.sp_label 92 | self.sp_label = sp_label_mat 93 | self.sp_label_sp = self.sp_label 94 | 95 | def CalSpNei(self): 96 | for sp_idx in range(1, self.sp_num+1): 97 | nei_list = [] 98 | region_pos_2d = np.argwhere(self.useful_sp_lab == sp_idx) 99 | r1 = np.min(region_pos_2d[:, 0]) 100 | r2 = np.max(region_pos_2d[:, 0]) 101 | c1 = np.min(region_pos_2d[:, 1]) 102 | c2 = np.max(region_pos_2d[:, 1]) 103 | for r in range(r1, r2+1): 104 | pos1 = np.argwhere(region_pos_2d[:, 0] == r)[:, 0] 105 | min_col = np.min(region_pos_2d[:, 1][pos1]) 106 | max_col = np.max(region_pos_2d[:, 1][pos1]) 107 | nc1 = min_col-1 108 | nc2 = max_col+1 109 | if nc1>=0: 110 | nei_list.append(self.useful_sp_lab[r, nc1]) 111 | if r>0: 112 | nei_list.append(self.useful_sp_lab[r-1, nc1]) 113 | if r0: 118 | nei_list.append(self.useful_sp_lab[r-1, nc2]) 119 | if r=0: 128 | nei_list.append(self.useful_sp_lab[nr1, c]) 129 | if nr2<=self.r-1: 130 | nei_list.append(self.useful_sp_lab[nr2, c]) 131 | nei_list = list(set(nei_list)) 132 | nei_list = [int(list_item) for list_item in nei_list] 133 | if 0 in nei_list: 134 | nei_list.remove(0) 135 | if sp_idx in nei_list: 136 | nei_list.remove(sp_idx) 137 | self.sp_nei.append(nei_list) 138 | self.sp_nei_includeself = self.sp_nei.copy() 139 | for sp_idx1 in range(1, self.sp_num+1): 140 | self.sp_nei_includeself[sp_idx1-1].append(sp_idx1) 141 | 142 | def CalSpA(self, scale = 1): 143 | sp_A_s1 = np.zeros([self.sp_num, self.sp_num]) 144 | for sp_idx in range(1, self.sp_num+1): 145 | sp_idx0 = sp_idx-1 146 | cen_sp = self.sp_mean[sp_idx0] 147 | nei_idx = self.sp_nei[sp_idx0] 148 | nei_idx0 = np.array([list_item-1 for list_item in nei_idx], dtype=int) 149 | cen_nei = self.sp_mean[nei_idx0, :] 150 | dist1 = self.Eu_dist(cen_sp, cen_nei) 151 | sp_A_s1[sp_idx0, nei_idx0] = dist1 152 | 153 | self.sp_A.append(sp_A_s1) 154 | 155 | for scale_idx in range(0): 156 | self.sp_A.append(self.AddConnection(self.sp_A[-1])) 157 | del self.sp_A[0] 158 | 159 | self.sp_A_notSym = self.sp_A.copy() 160 | self.sp_A_notSym[0] = (self.sp_A_notSym[0].T / np.sum(self.sp_A_notSym[0].T, axis = 0)).T 161 | 162 | 163 | for scale_idx in range(scale): 164 | self.sp_A[scale_idx] = self.SymmetrizationMat(self.sp_A[scale_idx]) 165 | for scale_idx in range(scale-1): 166 | del self.sp_A[0] 167 | 168 | 169 | 170 | def AddConnection(self, A): 171 | A1 = A.copy() 172 | num_rows = np.shape(A)[0] 173 | for row_idx in range(num_rows): 174 | pos1 = np.argwhere(A[row_idx, :]!=0) 175 | for num_nei1 in range(np.size(pos1)): 176 | nei_ori = A[pos1[num_nei1, 0], :].copy() 177 | pos2 = np.argwhere(nei_ori!=0)[:, 0] 178 | nei1 = self.sp_mean[pos2, :] 179 | dist1 = self.Eu_dist(self.sp_mean[row_idx, :], nei1) 180 | A1[row_idx, pos2] = dist1 181 | A1[row_idx, row_idx] = 0 182 | return A1 183 | 184 | def AddConnectionFor01(self, A): 185 | A1 = A.copy() 186 | num_rows = np.shape(A)[0] 187 | for row_idx in range(num_rows): 188 | pos1 = np.argwhere(A[row_idx, :]!=0) 189 | for num_nei1 in range(np.size(pos1)): 190 | nei_ori = A[pos1[num_nei1, 0], :].copy() 191 | pos2 = np.argwhere(nei_ori!=0)[:, 0] 192 | nei1 = self.sp_mean[pos2, :] 193 | A1[row_idx, pos2] = 1 194 | return A1 195 | 196 | 197 | 198 | def SymmetrizationMat(self, mat): 199 | [r, c] = np.shape(mat) 200 | if r!=c: 201 | print('Input is not square matrix') 202 | return 203 | for rows in range(r): 204 | for cols in range(rows, c): 205 | e1 = mat[rows, cols] 206 | e2 = mat[cols, rows] 207 | if e1+e2!=0 and e1*e2 == 0: 208 | mat[rows, cols] = e1+e2 209 | mat[cols, rows] = e1+e2 210 | return mat 211 | def CalSupport(self, A, lam1): 212 | num1 = np.shape(A)[0] 213 | A_ = A+lam1*np.eye(num1) 214 | D_ = np.sum(A_, 1) 215 | D_05 = np.diag(D_**(-0.5)) 216 | support = np.matmul(np.matmul(D_05, A_), D_05) 217 | return support 218 | 219 | def ReprojectionQ(self): 220 | num_all_px = len(self.trpos) + len(self.tepos) 221 | trtepos1d = np.concatenate((self.trpos, self.tepos), axis=0) 222 | trtepos2d = np.concatenate((self.trpos2d, self.tepos2d), axis=0) 223 | for px_idx0 in range(num_all_px): 224 | px_fea = self.img2d[trtepos1d[px_idx0]] 225 | sp_idx1 = self.useful_sp_lab[trtepos2d[px_idx0, 0], trtepos2d[px_idx0, 1]] 226 | neis = self.sp_nei[sp_idx1-1].copy() 227 | neis = [neis_elem-1 for neis_elem in neis] 228 | 229 | nei_sp_fea = self.sp_mean[neis] 230 | px_fea_repmat = np.matlib.repmat(px_fea, len(neis), 1) 231 | Q_row = np.exp(-0.2*np.sum(np.square(px_fea_repmat-nei_sp_fea), axis=1)) 232 | self.Q[px_idx0, neis] = Q_row 233 | self.Q = self.Q/np.reshape(np.sum(self.Q, axis=1), [len(trtepos1d), 1]) 234 | 235 | self.AllPxProcess() 236 | def AllPxProcess(self): 237 | num_all_px = len(self.trpos) + len(self.tepos) 238 | self.trmask = np.zeros([num_all_px]) 239 | self.temask = np.ones([num_all_px]) 240 | self.trmask[0:len(self.trpos)] = 1 241 | self.temask[len(self.trpos):num_all_px] = 1 242 | self.sp_label = np.zeros([num_all_px, self.num_classes]) 243 | trtepos1d = np.concatenate((self.trpos, self.tepos), axis=0) 244 | class_idxes1 = self.gt1d[trtepos1d] 245 | self.sp_label[np.array([i1 for i1 in range(num_all_px)]), class_idxes1-1] = 1 246 | def PxSpNei(self): 247 | trtepos2d = np.concatenate((self.trpos2d, self.tepos2d), axis=0) 248 | px_num = np.shape(trtepos2d)[0] 249 | for px_idx in range(px_num): 250 | pos2d = trtepos2d[px_idx, :] 251 | sp_idx0 = self.useful_sp_lab[pos2d[0], pos2d[1]]-1 252 | self.px_nei_sp.append(self.sp_nei_includeself[sp_idx0]) 253 | self.px_sp_01[px_idx, [elem-1 for elem in self.sp_nei_includeself[sp_idx0]]] = 1 254 | px_fea = self.img3d[pos2d[0], pos2d[1], :] 255 | def Eu_dist(self, vec, mat): 256 | rows = np.shape(mat)[0] 257 | mat1 = np.matlib.repmat(vec, rows, 1) 258 | dist1 = np.exp(-0.2*np.sum(np.power(mat1-mat, 2), axis = 1)) 259 | return dist1 260 | 261 | def CalLap(self, A): 262 | num1 = np.shape(A)[0] 263 | if A[0,0] == 1: 264 | A -= np.eye(num1) 265 | D_ = np.sum(A_, 1) 266 | D_05 = np.diag(D_**(-0.5)) 267 | return np.eye(num1) - np.matmul(np.matmul(D_05, A), D_05) 268 | 269 | 270 | 271 | 272 | 273 | 274 | 275 | -------------------------------------------------------------------------------- /CNNLayer.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import tensorflow as tf 3 | import numpy as np 4 | _LAYER_UIDS = {} 5 | 6 | 7 | def uniform(shape, scale=0.05, name=None): 8 | """Uniform init.""" 9 | initial = tf.random_uniform(shape, minval=-scale, maxval=scale, dtype=tf.float32) 10 | return tf.Variable(initial, name=name) 11 | def zeros(shape, name=None): 12 | """All zeros.""" 13 | initial = tf.zeros(shape, dtype=tf.float32) 14 | return tf.Variable(initial, name=name) 15 | def ones(shape, name=None): 16 | """All ones.""" 17 | initial = tf.ones(shape, dtype=tf.float32) 18 | return tf.Variable(initial, name=name) 19 | def glorot(shape, name=None): 20 | """Glorot & Bengio (AISTATS 2010) init.""" 21 | init_range = np.sqrt(6.0/(shape[0]+shape[1])) 22 | initial = tf.random_uniform(shape, minval=-init_range, maxval=init_range, dtype=tf.float32) 23 | return tf.Variable(initial, name=name) 24 | def weight_variable(shape, name): 25 | initial = tf.truncated_normal(shape, stddev=0.1) 26 | return tf.Variable(initial, name = name) 27 | def bias_variable(shape, name): 28 | initial = tf.constant(0.01, shape=shape) 29 | return tf.Variable(initial, name = name) 30 | def get_layer_uid(layer_name=''): 31 | """Helper function, assigns unique layer IDs.""" 32 | if layer_name not in _LAYER_UIDS: 33 | _LAYER_UIDS[layer_name] = 1 34 | return 1 35 | else: 36 | _LAYER_UIDS[layer_name] += 1 37 | return _LAYER_UIDS[layer_name] 38 | 39 | 40 | def sparse_dropout(x, keep_prob, noise_shape): 41 | """Dropout for sparse tensors.""" 42 | random_tensor = keep_prob 43 | random_tensor += tf.random_uniform(noise_shape) 44 | dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool) 45 | pre_out = tf.sparse_retain(x, dropout_mask) 46 | return pre_out * (1./keep_prob) 47 | 48 | 49 | def dot(x, y, sparse=False): 50 | """Wrapper for tf.matmul (sparse vs dense).""" 51 | if sparse: 52 | res = tf.sparse_tensor_dense_matmul(x, y) 53 | else: 54 | res = tf.matmul(x, y) 55 | return res 56 | 57 | def GCN_dot(support, x, trte_idx): 58 | result=tf.zeros([1,np.shape(support)[0]+1]) 59 | for i in range(np.shape(support)[0]): 60 | t1=tf.constant([[0.0]]) 61 | for i1 in range(np.shape(support)[0]): 62 | t2=tf.constant([[0]]) 63 | for j in range(np.shape(support)[0]): 64 | t2=tf.concat([t1,tf.reshape(support[i,j]*x[j,i1],[1,1])],0) 65 | t2=tf.reshape(tf.reduce_sum(t2),[1,1]) 66 | t1=tf.concat([t1,t2],0) 67 | result=tf.concat([result,t1],1) 68 | return result[1:np.shape(support)[0]+1,1:np.shape(support)[0]] 69 | 70 | 71 | def conv2d(x, W): 72 | return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME') 73 | def max_pool_2x2(x): 74 | return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') 75 | 76 | 77 | class CNNHSI(object): 78 | def __init__(self, dropout = 0, act = tf.nn.softplus, filter1 = [], dim = 0): 79 | self.act = act 80 | self.filter1 = filter1 81 | self.dim = dim 82 | 83 | if dropout!=0: 84 | self.dropout = dropout 85 | else: 86 | self.dropout = 0. 87 | self.vars={} 88 | self.vars['W_conv'] = zeros(self.filter1, 'CNNweight_0') 89 | self.vars['b_conv'] = bias_variable([1], 'CNNbias_0') 90 | def __call__(self, inputs): 91 | outputs = self._call(inputs) 92 | return outputs 93 | def _call(self, inputs): 94 | 95 | x = inputs 96 | h_conv1 = self.act(conv2d(x[:,:,:,0:1], self.vars['W_conv']) + self.vars['b_conv']) 97 | for i in range(1, self.dim): 98 | h_conv1 = tf.concat([h_conv1, self.act(conv2d(x[:,:,:,i:i+1], self.vars['W_conv']) + self.vars['b_conv'])],3) 99 | return h_conv1 100 | 101 | class SoftmaxLayer(object): 102 | def __init__(self, input_num, output_num, dropout = 0, act = tf.nn.softplus, bias = True ): 103 | self.bias = bias 104 | self.act = act 105 | self.output_num = output_num 106 | self.input_num = input_num 107 | self.vars={} 108 | self.vars['weights'] = glorot(shape = [self.input_num, self.output_num], name = 'weight_0') 109 | self.vars['bias'] = uniform(shape = [self.output_num], name = 'bias_0') 110 | # self.is_sparse = is_sparse 111 | def __call__(self, inputs): 112 | outputs = self._call(inputs) 113 | return outputs 114 | def _call(self, inputs): 115 | x = inputs 116 | pre_sup = tf.matmul(x, self.vars['weights']) 117 | return self.act(pre_sup + self.vars['bias']) 118 | 119 | class Layer(object): 120 | """Base layer class. Defines basic API for all layer objects. 121 | Implementation inspired by keras (http://keras.io). 122 | 123 | # Properties 124 | name: String, defines the variable scope of the layer. 125 | logging: Boolean, switches Tensorflow histogram logging on/off 126 | 127 | # Methods 128 | _call(inputs): Defines computation graph of layer 129 | (i.e. takes input, returns output) 130 | __call__(inputs): Wrapper for _call() 131 | _log_vars(): Log all variables 132 | """ 133 | 134 | def __init__(self, **kwargs): 135 | allowed_kwargs = {'name', 'logging'} 136 | for kwarg in kwargs.keys(): 137 | assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg 138 | name = kwargs.get('name') 139 | if not name: 140 | layer = self.__class__.__name__.lower() 141 | name = layer + '_' + str(get_layer_uid(layer)) 142 | self.name = name 143 | self.vars = {} 144 | logging = kwargs.get('logging', False) 145 | self.logging = logging 146 | self.sparse_inputs = False 147 | 148 | def _call(self, inputs): 149 | return inputs 150 | 151 | def __call__(self, inputs): 152 | with tf.name_scope(self.name): 153 | if self.logging and not self.sparse_inputs: 154 | tf.summary.histogram(self.name + '/inputs', inputs) 155 | outputs = self._call(inputs) 156 | if self.logging: 157 | tf.summary.histogram(self.name + '/outputs', outputs) 158 | return outputs 159 | 160 | def _log_vars(self): 161 | for var in self.vars: 162 | tf.summary.histogram(self.name + '/vars/' + var, self.vars[var]) 163 | 164 | class GraphConvolution(Layer): 165 | """Graph convolution layer.""" 166 | def __init__(self, input_dim, output_dim, support, act=tf.nn.softplus, bias=False, 167 | isnorm=False, isSparse = False, **kwargs): 168 | super(GraphConvolution, self).__init__(**kwargs) 169 | self.act = act 170 | self.support = support 171 | self.bias = bias 172 | self.isnorm = isnorm 173 | self.isSparse = isSparse 174 | 175 | with tf.variable_scope(self.name + '_vars'): 176 | for i in range(1): 177 | self.vars['weights_' + str(i)] = uniform([input_dim, output_dim], 178 | name='weights_' + str(i)) 179 | if self.bias: 180 | self.vars['bias'] = zeros([output_dim], name='bias') 181 | 182 | if self.logging: 183 | self._log_vars() 184 | 185 | def _call(self, inputs): 186 | x = inputs 187 | supports = list() 188 | pre_sup = dot(x, self.vars['weights_' + str(0)], sparse = False) 189 | support = dot( self.support, pre_sup, sparse = self.isSparse ) 190 | 191 | 192 | supports.append(support) 193 | output = tf.add_n(supports) 194 | 195 | if self.bias: 196 | output += self.vars['bias'] 197 | if self.isnorm==True: 198 | output = tf.nn.l2_normalize(output, dim=0) 199 | return self.act(output) 200 | 201 | class MLP(Layer): 202 | def __init__(self, input_dim, output_dim, act=tf.nn.softplus, bias=False, 203 | isnorm=False, **kwargs): 204 | super(MLP, self).__init__(**kwargs) 205 | self.act = act 206 | self.bias = bias 207 | self.isnorm = isnorm 208 | 209 | with tf.variable_scope(self.name + '_vars'): 210 | for i in range(1): 211 | self.vars['weights_' + str(i)] = uniform([input_dim, output_dim], 212 | name='weights_' + str(i)) 213 | if self.bias: 214 | self.vars['bias'] = uniform([output_dim], name='bias') 215 | 216 | if self.logging: 217 | self._log_vars() 218 | 219 | def _call(self, inputs): 220 | x = inputs 221 | supports = list() 222 | pre_sup = dot(x, self.vars['weights_' + str(0)], sparse = False) 223 | 224 | 225 | supports.append(pre_sup) 226 | output = tf.add_n(supports) 227 | 228 | output += self.vars['bias'] 229 | return self.act(output) -------------------------------------------------------------------------------- /GGeModel.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import tensorflow as tf 3 | from CNNLayer import * 4 | import numpy as np 5 | from funcCNN import * 6 | from CNNLayer import uniform 7 | from CNNLayer import glorot 8 | 9 | 10 | 11 | def masked_softmax_cross_entropy(preds, labels, mask): 12 | """Softmax cross-entropy loss with masking.""" 13 | loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels) 14 | mask = tf.cast(mask, dtype=tf.float32) 15 | mask /= tf.reduce_mean(mask) 16 | loss *= tf.transpose(mask) 17 | return tf.reduce_mean(tf.transpose(loss)) 18 | 19 | 20 | 21 | 22 | 23 | def masked_accuracy(preds, labels, mask): 24 | """Accuracy with masking.""" 25 | correct_prediction = tf.equal(tf.argmax(preds, 1), tf.argmax(labels, 1)) 26 | accuracy_all = tf.cast(correct_prediction, tf.float32) 27 | mask = tf.cast(mask, dtype=tf.float32) 28 | mask /= tf.reduce_mean(mask) 29 | accuracy_all *= tf.transpose(mask) 30 | return tf.reduce_mean(tf.transpose(accuracy_all)) 31 | 32 | class GGeModel(object): 33 | def __init__(self, features, labels, learning_rate, num_classes, mask, 34 | idea_A_tr, mat01_tr, pos_A_equals_1, nei01): 35 | 36 | self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) 37 | self.classlayers = [] 38 | self.mlplayers = [] 39 | self.globallayers = [] 40 | self.labels = labels 41 | [self.sp_num, self.fea_num] = np.shape(features) 42 | self.inputs = features 43 | self.posx = [] 44 | self.loss = 0 45 | self.idea_A_tr = idea_A_tr 46 | self.mat01_tr = mat01_tr 47 | 48 | self.emd_out = [] 49 | self.adj_pred = [] 50 | self.inputs_W1 = [] 51 | self.C = [] 52 | self.W1 = [] 53 | self.tmp = [] 54 | 55 | self.concat_vec = [] 56 | self.outputs = None 57 | self.num_classes = num_classes 58 | self.mask = mask 59 | self.Wd =[] 60 | self.nei01 = nei01 61 | self.pos_A_equals_1 = pos_A_equals_1 62 | self.CalAttenCoef() 63 | self.para_loss = [tf.abs(tf.Variable(0.0)) + 0.01] 64 | self.para_scale = [tf.abs(tf.Variable(0.5)), tf.abs(tf.Variable(0.1)), tf.abs(tf.Variable(0.1)), tf.abs(tf.Variable(0.1)), tf.abs(tf.Variable(0.1))] 65 | self.para_lo_gl = [tf.abs(tf.Variable(0.5))] 66 | self.build() 67 | 68 | 69 | def _build(self): 70 | global_activations = [] 71 | activations = [] 72 | activations.append(tf.matmul(self.inputs, self.W1)) 73 | for scale_idx in range(2): 74 | self.C.append(self.C[0]*self.nei01[scale_idx]) 75 | self.C[-1] = tf.transpose(tf.transpose(self.C[-1])/tf.reduce_sum(self.C[-1], axis = 1)) 76 | self.C[-1] = tf.SparseTensor(self.pos_A_equals_1[scale_idx], tf.gather_nd(self.C[-1], self.pos_A_equals_1[scale_idx]), self.C[-1].get_shape()) 77 | 78 | self.classlayers.append(GraphConvolution(act = tf.nn.relu, 79 | input_dim = 32, 80 | output_dim = 128, 81 | support = self.C[-1], 82 | bias = True, 83 | isSparse = True 84 | )) 85 | layer = self.classlayers[-1] 86 | hidden = layer(activations[0]) 87 | activations.append(hidden) 88 | activations.append(tf.nn.l2_normalize(activations[1] + self.para_scale[1] * activations[2], dim = 1)) 89 | activations.append(tf.nn.l2_normalize(activations[2] + self.para_scale[2] * activations[1], dim = 1)) 90 | 91 | for scale_idx in range(2): 92 | self.classlayers.append(GraphConvolution(act = lambda x:x, 93 | input_dim = 128, 94 | output_dim = 128, 95 | support = self.C[scale_idx + 1], 96 | bias = True, 97 | isSparse = True 98 | )) 99 | layer = self.classlayers[-1] 100 | hidden = layer(activations[scale_idx + 3]) 101 | activations.append(hidden) 102 | 103 | 104 | self.emd_out = self.para_scale[0]*activations[5] + (1-self.para_scale[0])*activations[6] + self.para_scale[3] * activations[1] + self.para_scale[4] * activations[2] 105 | self.emd_out = tf.nn.l2_normalize(self.emd_out, dim = 1) 106 | 107 | 108 | hidden_Wd = self.emd_out*self.emd_out 109 | E1 = tf.concat([hidden_Wd, tf.ones([self.sp_num, 128])], 1) 110 | E2 = tf.concat([tf.ones([self.sp_num, 128]), hidden_Wd], 1) 111 | E1_2 = tf.matmul(E1, tf.transpose(E2))-2*tf.matmul(self.emd_out, tf.transpose(self.emd_out)) 112 | self.adj_pred = tf.exp(-E1_2) 113 | 114 | 115 | self.mlplayers.append(MLP(act = lambda x:x, 116 | input_dim = 128, 117 | output_dim = self.num_classes, 118 | bias = True 119 | )) 120 | layer = self.mlplayers[-1] 121 | hidden = layer(self.emd_out) 122 | activations.append(hidden) 123 | 124 | 125 | global_A = self.nnGraph(self.adj_pred, 0.75) 126 | self.globallayers.append(GraphConvolution(act = tf.nn.relu, 127 | input_dim = self.fea_num, 128 | output_dim = 128, 129 | support = global_A, 130 | bias = True, 131 | isSparse = True 132 | )) 133 | layer = self.globallayers[-1] 134 | hidden = layer(self.inputs) 135 | global_activations.append(tf.nn.l2_normalize(hidden, dim = 1)) 136 | 137 | self.globallayers.append(GraphConvolution(act = lambda x:x, 138 | input_dim = 128, 139 | output_dim = self.num_classes, 140 | support = global_A, 141 | bias = True, 142 | isSparse = True 143 | )) 144 | layer = self.globallayers[-1] 145 | hidden = layer(global_activations[-1]) 146 | global_activations.append(tf.nn.l2_normalize(hidden, dim = 0)) 147 | 148 | self.concat_vec = self.para_lo_gl[0] * activations[-1] + (1-self.para_lo_gl[0]) * global_activations[-1] 149 | 150 | 151 | 152 | 153 | 154 | 155 | def build(self): 156 | self._build() 157 | self.outputs = self.concat_vec 158 | self._loss() 159 | self._accuracy() 160 | self.opt_op = self.optimizer.minimize(self.loss) 161 | 162 | def _loss(self): 163 | self.loss += self.para_loss[0] * masked_softmax_cross_entropy(self.outputs, self.labels, self.mask) 164 | self.loss += (1.0-self.para_loss[0]) * tf.reduce_mean(tf.square(self.mat01_tr*(self.idea_A_tr - self.adj_pred))) 165 | 166 | def _accuracy(self): 167 | self.accuracy = masked_accuracy(self.outputs, self.labels, self.mask) 168 | 169 | 170 | def CalAttenCoef(self): 171 | self.W1 = uniform([self.fea_num, 32], name='W1') 172 | a_var_part1 = uniform([32, 1], name='a1') 173 | a_var_part2 = uniform([32, 1], name='a2') 174 | self.inputs_W1 = tf.matmul(self.inputs, self.W1) 175 | inputs1 = tf.matmul(self.inputs_W1, a_var_part1) 176 | inputs2 = tf.matmul(self.inputs_W1, a_var_part2) 177 | 178 | inputs2 = tf.transpose(tf.concat([inputs2, tf.ones([self.sp_num, 1])], axis = 1)) 179 | inputs1 = tf.concat([tf.ones([self.sp_num, 1]), inputs1], axis = 1) 180 | 181 | self.C.append(tf.exp(tf.nn.leaky_relu(tf.matmul(inputs1, inputs2)))) 182 | 183 | 184 | 185 | 186 | def nnGraph(self, mat1, th): 187 | arr_idx = tf.where(mat1>=th) 188 | arr_sparse = tf.SparseTensor(arr_idx, tf.gather_nd(mat1, arr_idx), mat1.get_shape()) 189 | s1 = tf.sparse.reduce_sum(arr_sparse, 1) 190 | return tf.sparse.transpose(tf.sparse.transpose(arr_sparse).__div__(s1)) 191 | 192 | 193 | def Dense2Sparse(self, arr_tensor): 194 | arr_idx = tf.where(tf.not_equal(arr_tensor, 0)) 195 | arr_sparse = tf.SparseTensor(arr_idx, tf.gather_nd(arr_tensor, arr_idx), arr_tensor.get_shape()) 196 | return arr_sparse 197 | 198 | 199 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 LEAP-WS 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /LoadData.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy.io as scio 3 | 4 | 5 | def Con2Numpy(var_name): 6 | path = './/data//' 7 | dataFile = path + var_name 8 | data = scio.loadmat(dataFile) 9 | x=data[var_name] 10 | x1=x.astype(float) 11 | return x1 12 | 13 | def load_HSI_data( data_name ): 14 | Data = dict() 15 | img_gyh = data_name+'_gyh' 16 | img_gt = data_name+'_gt' 17 | Data['useful_sp_lab'] = np.array(Con2Numpy('useful_sp_lab'), dtype='int') 18 | Data[img_gt] = np.array(Con2Numpy(img_gt), dtype='int') 19 | Data[img_gyh] = Con2Numpy(img_gyh) 20 | Data['trpos'] = np.array(Con2Numpy('trpos'), dtype='int') 21 | Data['tepos'] = np.array(Con2Numpy('tepos'), dtype='int') 22 | return Data 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MDGCN 2 | ## Description 3 | This is the repository for the PR paper [Multi-Level Graph Learning Network for Hyperspectral Image Classification]. 4 | 5 | Abstract: Graph Convolutional Network (GCN) has emerged as a new technique for hyperspectral image (HSI) classification. However, in current GCN-based methods, the graphs are usually constructed with manual effort and thus is separate from the classification task, which could limit the representation power of GCN. Moreover, the employed graphs often fail to encode the global contextual information in HSI. Hence, we propose a Multi-level Graph Learning Network (MGLN) for HSI classification, where the graph structural information at both local and global levels can be learned in an end-to-end fashion. First, MGLN employs attention mechanism to adaptively characterize the spatial relevance among image regions. Then localized feature representations can be produced and further used to encode the global contextual information. Finally, prediction can be acquired with the help of both local and global contextual information. Experiments on three real-world hyperspectral datasets reveal the superiority of our MGLN when compared with the state-of-the-art methods. 6 | 7 | 8 | ## Requirements 9 | 10 | - Tensorflow (1.4.0) 11 | 12 | ## Usage 13 | 14 | You can conduct classification experiments on hyperspectral images (e.g., Indian Pines) by running the 'trainGCN.py' file. 15 | 16 | ## Cite 17 | Please cite our paper if you use this code in your own work: 18 | 19 | ``` 20 | @article{WAN2022108705, 21 | title = {Multi-level graph learning network for hyperspectral image classification}, 22 | journal = {Pattern Recognition}, 23 | volume = {129}, 24 | pages = {108705}, 25 | year = {2022}, 26 | author = {Sheng Wan and Shirui Pan and Shengwei Zhong and Jie Yang and Jian Yang and Yibing Zhan and Chen Gong} 27 | } 28 | ``` 29 | -------------------------------------------------------------------------------- /data/IP_gt.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LEAP-WS/MGLN/1e2dc4e974325f682311c2e14c4da8554f8096b6/data/IP_gt.mat -------------------------------------------------------------------------------- /data/IP_gyh.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LEAP-WS/MGLN/1e2dc4e974325f682311c2e14c4da8554f8096b6/data/IP_gyh.mat -------------------------------------------------------------------------------- /data/tepos.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LEAP-WS/MGLN/1e2dc4e974325f682311c2e14c4da8554f8096b6/data/tepos.mat -------------------------------------------------------------------------------- /data/trpos.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LEAP-WS/MGLN/1e2dc4e974325f682311c2e14c4da8554f8096b6/data/trpos.mat -------------------------------------------------------------------------------- /data/useful_sp_lab.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LEAP-WS/MGLN/1e2dc4e974325f682311c2e14c4da8554f8096b6/data/useful_sp_lab.mat -------------------------------------------------------------------------------- /funcCNN.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy.io as scio 3 | from sklearn import preprocessing 4 | import tensorflow as tf 5 | 6 | def Con2Numpy(var_name): 7 | dataFile = var_name 8 | data = scio.loadmat(dataFile) 9 | x=data[var_name] 10 | x1=x.astype(float) 11 | return x1 12 | 13 | def Con2Numpy_path(var_name, path): 14 | dataFile = var_name 15 | data = scio.loadmat(path+dataFile) 16 | x=data[var_name] 17 | x1=x.astype(float) 18 | return x1 19 | 20 | 21 | def load_HSI_data_list(path1): 22 | 23 | inst = np.array(Con2Numpy_path('inst', path1), dtype = 'float32') 24 | temask= np.array(Con2Numpy_path('temask', path1), dtype=bool) 25 | trmask=np.array(Con2Numpy_path('trmask', path1), dtype=bool) 26 | y_train=Con2Numpy_path('y_train', path1) 27 | y_test=Con2Numpy_path('y_test', path1) 28 | 29 | return inst, temask, trmask, y_train, y_test 30 | 31 | def load_HSI_data(): 32 | features_pretrain = np.array(Con2Numpy('features_pretrain'), dtype = 'float32') 33 | support_pretrain = np.array(Con2Numpy('support_pretrain'), dtype = 'float32') 34 | 35 | trmask_pretrain = Con2Numpy('trmask_pretrain') 36 | trmask_pretrain = np.array(trmask_pretrain, dtype=bool) 37 | 38 | y_train_pretrain = Con2Numpy('y_train_pretrain') 39 | return features_pretrain, support_pretrain, trmask_pretrain, y_train_pretrain 40 | 41 | def AscSort(x1): 42 | x = x1.copy() 43 | B = np.sort(x) 44 | IX = np.ones(np.size(x)) 45 | for i in range(np.size(x)): 46 | idx = np.argmin(x) 47 | x[idx] = np.max(x)+1 48 | IX[i] = idx 49 | return B, IX 50 | 51 | 52 | 53 | def GetKnn1(inx, data, k): 54 | [datarow, datacol] = np.shape(data) 55 | diffMat = np.tile(inx, (datarow, 1)) - data 56 | distanceMat = np.sqrt(np.sum(diffMat*diffMat, 1)) 57 | [B, IX] = AscSort(distanceMat) 58 | if B[0] == 0: 59 | IX = IX[1:k+1].copy() 60 | else: 61 | IX = IX[0:k].copy() 62 | return IX 63 | 64 | def lle(X, K, ln): 65 | [D, N] = np.shape(X) 66 | index = np.zeros([K, N]) 67 | for i in range(N): 68 | index[:, i] = GetKnn1(X[:, i].T, X[:, 0:ln].T, K) 69 | neighborhood = index[0:K, :] 70 | if K>D: 71 | tol = 0.001 72 | else: 73 | tol = 0 74 | W = np.zeros([K, N]) 75 | W1 = np.zeros([N, N]) 76 | for ii in range(N): 77 | z = X[:, np.array(neighborhood[:, ii], dtype=int)]-np.tile(X[:, ii], (K, 1)).T 78 | C = np.dot(z.T, z) 79 | C = C + np.dot(np.dot(np.eye(K), tol), np.trace(C)) 80 | W[:, ii] = np.dot(np.linalg.inv(C), np.ones([K,1])).reshape((K)) 81 | W[:, ii] = W[:, ii]/np.sum(W[:, ii]) 82 | for i in range(N): 83 | for j in range(K): 84 | W1[np.array(index[j, i], dtype=int), i] = W[j, i] 85 | return W1 86 | 87 | def GetWlle(S1): 88 | S = S1.copy() 89 | S = S.T 90 | Wlle = S+S.T-np.dot(S.T, S) 91 | Wlle = Wlle-np.diag(np.diag(Wlle)) 92 | return Wlle 93 | def GetLabeledData(img2d, trte_idx): 94 | intrte=np.zeros([np.shape(trte_idx)[0],np.shape(img2d)[1]]) 95 | all_num=np.shape(trte_idx)[0] 96 | for i in range(all_num): 97 | print(i) 98 | intrte[i,:] = img2d[trte_idx[i,0], :] 99 | intrte=np.reshape(intrte, (all_num ,40)) 100 | return intrte 101 | def GetMats(trte_idx,y_test,y_train,trnum): 102 | nums=np.shape(trte_idx)[0] 103 | trmask=np.zeros([nums]) 104 | temask=np.zeros([nums]) 105 | ytr=np.zeros([nums,np.shape(y_test)[1]]) 106 | yte=np.zeros([nums,np.shape(y_test)[1]]) 107 | for i in range(trnum): 108 | trmask[i]=1 109 | ytr[i]=y_train[trte_idx[i],:] 110 | for i in range(nums-trnum): 111 | temask[i+trnum]=1 112 | yte[i+trnum]=y_test[trte_idx[i+trnum],:] 113 | return np.array(trmask,dtype=bool),np.array(temask,dtype=bool),ytr,yte 114 | 115 | def Gettrtemask(dim0,trnum,trte): 116 | if trte==0: 117 | trte_mask1=np.ones([trnum]) 118 | trte_mask2=np.zeros([dim0-trnum]) 119 | return np.concatenate([trte_mask1,trte_mask2],axis=0) 120 | elif trte==1: 121 | trte_mask1=np.zeros([trnum]) 122 | trte_mask2=np.ones([dim0-trnum]) 123 | return np.concatenate([trte_mask1,trte_mask2],axis=0) 124 | 125 | def mapminmax01(X): 126 | min_max_scaler = preprocessing.MinMaxScaler() 127 | return min_max_scaler.fit_transform(X) 128 | 129 | def CalSupport_tf(A, lam): 130 | lam1 = lam 131 | A_ = A+lam1*np.eye(np.shape(A)[0]) 132 | D_ = tf.reduce_sum(A_,reduction_indices=0) 133 | D_05 = tf.diag(D_**(-0.5)) 134 | support = tf.matmul(tf.matmul(D_05, A_), D_05) 135 | return support 136 | def CalSupport(A, lam): 137 | lam1 = lam 138 | A_ = A+lam1*np.eye(np.shape(A)[0]) 139 | D_ = np.sum(A_, 1) 140 | D_05 = np.diag(D_**(-0.5)) 141 | support = np.matmul(np.matmul(D_05, A_), D_05) 142 | return support 143 | def arr2sparse(arr): 144 | arr_tensor = tf.constant(np.array(arr),dtype='float32') 145 | arr_idx = tf.where(tf.not_equal(arr_tensor, 0)) 146 | arr_sparse = tf.SparseTensor(arr_idx, tf.gather_nd(arr_tensor, arr_idx), arr_tensor.get_shape()) 147 | return arr_sparse 148 | def AssignLabels(useful_sp_lab, trlabels, telabels, trmask, temask): 149 | [rows, cols] = np.shape(useful_sp_lab) 150 | output_labels = np.zeros([rows, cols]) 151 | sp_num = np.max(useful_sp_lab) 152 | for sp_idx in range(1, sp_num+1): 153 | pos1 = np.argwhere(useful_sp_lab==sp_idx) 154 | if trmask[sp_idx-1, 0] == True: 155 | pred_label = trlabels[sp_idx-1] 156 | else: 157 | pred_label = telabels[sp_idx-1] 158 | output_labels[pos1[:,0], pos1[:,1]] = pred_label+1 159 | return output_labels 160 | def PixelWiseAccuracy(gt, pred_labels, trpos): 161 | num_labels = np.max(gt) 162 | gt[trpos[:,0]-1, trpos[:,1]-1] = 0 163 | err_num = 0 164 | for label_idx in range(1, num_labels+1): 165 | pos1 = np.argwhere(gt == label_idx) 166 | mat1 = gt[pos1[:,0], pos1[:,1]] 167 | mat2 = pred_labels[pos1[:,0], pos1[:,1]] 168 | mat3 = mat1-mat2 169 | err_num += np.shape(np.argwhere(mat3!=0))[0] 170 | return 1-err_num/np.shape(np.argwhere(gt>0))[0] 171 | def Label2D(label_vec, num_classes): 172 | num1 = np.shape(label_vec)[0] 173 | label_mat = np.zeros([num1, num_classes]) 174 | if np.size(np.shape(label_vec)) == 2: 175 | label_vec = np.matlib.reshape(label_vec, num1) 176 | pos_x = [i for i in range(num1)] 177 | label_mat[pos_x, label_vec] = 1 178 | return label_mat 179 | def GetExcelData(gt, pred_labels, trpos): 180 | gt[trpos[:,0]-1, trpos[:,1]-1] = 0 181 | num_classes = np.max(gt) 182 | per_acc = [] 183 | overall_err_num = 0 184 | for lab_idx in range(1, num_classes+1): 185 | pos1 = np.argwhere(gt==lab_idx) 186 | preds = pred_labels[pos1[:,0], pos1[:,1]] 187 | gts = gt[pos1[:,0], pos1[:,1]] 188 | mat3 = gts-preds 189 | per_err_num = np.shape(np.argwhere(mat3!=0))[0] 190 | per_acc.append(1-per_err_num/np.shape(pos1)[0]) 191 | overall_err_num += np.shape(np.argwhere(mat3!=0))[0] 192 | per_acc = np.array(per_acc, dtype='float32') 193 | OA = 1-overall_err_num/np.shape(np.argwhere(gt>0))[0] 194 | AA = np.mean(per_acc) 195 | ##kappa 196 | n = np.shape(np.argwhere(gt!=0))[0] 197 | ab1 = 0 198 | pos0 = np.argwhere(gt==0) 199 | pred_labels[pos0[:,0], pos0[:,1]] = 0 200 | for lab_idx in range(1, num_classes+1): 201 | a1 = np.shape(np.argwhere(gt==lab_idx))[0] 202 | b1 = np.shape(np.argwhere(pred_labels==lab_idx))[0] 203 | ab1 += a1*b1 204 | Pe = ab1/(n*n) 205 | kappa_coef = (OA-Pe)/(1-Pe) 206 | outputs = np.zeros([num_classes+3]) 207 | outputs[0:num_classes] = per_acc 208 | outputs[num_classes] = OA 209 | outputs[num_classes+1] = AA 210 | outputs[num_classes+2] = kappa_coef 211 | return outputs 212 | 213 | def Normalization_2dMat(mat1): 214 | for col_idx in range(np.shape(mat1)[1]): 215 | max_val = np.max(mat1[:, col_idx]) 216 | min_val = np.min(mat1[:, col_idx]) 217 | mat1[:, col_idx] = (mat1[:, col_idx]-min_val)/(max_val-min_val) 218 | return mat1 219 | 220 | def FindQDifference(Q1, Q2): 221 | max1 = np.argmax(Q1) 222 | max2 = np.argmax(Q2) 223 | return [max1, max2] 224 | 225 | def Assign2Gt(pred_lab, tepos2d, gt): 226 | gt1 = gt.copy() 227 | tenum = np.shape(tepos2d)[0] 228 | trnum = np.shape(pred_lab)[0]-tenum 229 | 230 | gt1[tepos2d[:, 0], tepos2d[:, 1]] = pred_lab[trnum:trnum+tenum] 231 | return gt1 232 | 233 | def Cal01DisTrainMat(trmask, lab_vec, full_diag): 234 | sp_num = np.size(trmask) 235 | Mat01 = np.zeros([sp_num, sp_num]) 236 | Mat01_tr = np.zeros([sp_num, sp_num]) 237 | te_pos = np.argwhere(trmask == 0) 238 | lab_vec[te_pos[:,0]] = -1 239 | max_lab = int(np.max(lab_vec)) 240 | for lab_idx0 in range(max_lab): 241 | pos1 = np.argwhere(lab_vec == lab_idx0) 242 | pos2 = np.argwhere(lab_vec >= 0) 243 | for pos0 in list(pos1): 244 | Mat01[np.array([pos0 for i in range(np.size(pos1))]), pos1] = 1 245 | Mat01_tr[np.array([pos0 for i in range(np.size(pos2))]), pos2] = 1 246 | if full_diag == True: 247 | Mat01[np.array([i for i in range(sp_num)]), np.array([i for i in range(sp_num)])] = 1 248 | Mat01_tr[np.array([i for i in range(sp_num)]), np.array([i for i in range(sp_num)])] = 1 249 | elif full_diag == 'zero': 250 | Mat01[np.array([i for i in range(sp_num)]), np.array([i for i in range(sp_num)])] = 0 251 | Mat01_tr[np.array([i for i in range(sp_num)]), np.array([i for i in range(sp_num)])] = 0 252 | 253 | return Mat01, Mat01_tr 254 | 255 | def KnnA(A, k): 256 | num_row = np.shape(A)[0] 257 | for row_idx0 in range(num_row): 258 | kmax_vals = np.sort(A[row_idx0, :])[::-1] 259 | threshold_val = kmax_vals[k-1] 260 | pos1 = np.argwhere(A[0, :]0)) 70 | nei01 = [] 71 | nei01.append(np.zeros([sp_num, sp_num])) 72 | nei01[0][pos_A_equals_1[0][:,0], pos_A_equals_1[0][:,1]] = 1 73 | 74 | nei01.append(model.AddConnectionFor01(nei01[-1])) 75 | pos_A_equals_1.append(np.argwhere(nei01[-1]>0)) 76 | nei01.append(model.AddConnectionFor01(nei01[-1])) 77 | pos_A_equals_1.append(np.argwhere(nei01[-1]>0)) 78 | nei01.append(model.AddConnectionFor01(nei01[-1])) 79 | pos_A_equals_1.append(np.argwhere(nei01[-1]>0)) 80 | del nei01[1] 81 | del nei01[1] 82 | del pos_A_equals_1[1] 83 | del pos_A_equals_1[1] 84 | 85 | 86 | 87 | 88 | GGemodel = GGeModel( features = sp_mean, labels = labels, learning_rate = learning_rate1, 89 | num_classes = num_classes, mask = mask, 90 | idea_A_tr = idea_A_tr, mat01_tr = mat01_tr, 91 | pos_A_equals_1 = pos_A_equals_1, nei01 = nei01) 92 | 93 | 94 | 95 | sess=tf.Session() 96 | sess.run(tf.global_variables_initializer()) 97 | 98 | 99 | for epoch in range(epochs1): 100 | t = time.time() 101 | outs = sess.run([GGemodel.opt_op, GGemodel.loss], feed_dict={ labels:sp_label_sp, 102 | mask:trmask_sp }) 103 | if epoch >= 500: 104 | train_cost, train_acc, train_duration = GCNevaluate(trmask_sp, sp_label_sp, GGemodel, sess) 105 | if train_acc >= 0.993: 106 | break 107 | 108 | if epoch % 50 == 0: 109 | train_cost, train_acc, train_duration = GCNevaluate(trmask_sp, sp_label_sp, GGemodel, sess) 110 | print("Epoch:", '%04d' % (epoch + 1), "train_loss=", "{:.5f}".format(outs[1]), " train_accuracy=", "{:.5f}".format(train_acc), "time=", "{:.5f}".format((time.time() - t))) 111 | 112 | 113 | outputs = sess.run(GGemodel.outputs) 114 | pixel_wise_pred = np.argmax(outputs, axis=1) 115 | pred_mat = AssignLabels(Data['useful_sp_lab'], np.argmax(sp_label_sp, axis=1), pixel_wise_pred, trmask_sp, temask_sp) 116 | OA = PixelWiseAccuracy(Data[img_gt].copy(), pred_mat.copy(), Data['trpos']) 117 | excel_data.append(GetExcelData(Data[img_gt], pred_mat.copy(), Data['trpos']-1)) 118 | scio.savemat('excel_data.mat',{'excel_data':excel_data}) 119 | 120 | 121 | 122 | --------------------------------------------------------------------------------