└── HSI Classification ├── AE_HSI ├── AE_grid_search.py ├── Dense_AE_spectral.py ├── Evaluate.py ├── Sense_AE_spatial.py ├── __pycache__ │ └── Evaluate.cpython-35.pyc ├── cnncoder_nn.py ├── layerwise_AE.py ├── mlp_HSI.py └── nn_HSI.py ├── CNN1d_neighbour_model.py ├── CNN2d_pca_model.py ├── CNN3d_pca_model.py ├── HSI_LSTM.py ├── HSI_ResNet.py ├── HSI_ResNet ├── DataLoad │ ├── HSIDataLoad.py │ ├── HSIDatasetLoad.py │ ├── HSIDatasetLoad.pyc │ ├── HSI_util.py │ └── __pycache__ │ │ ├── HSIDataLoad.cpython-35-USER-20160223KN-2.pyc │ │ ├── HSIDataLoad.cpython-35-USER-20160223KN.pyc │ │ ├── HSIDataLoad.cpython-35.pyc │ │ ├── HSIDatasetLoad.cpython-35.pyc │ │ ├── HSI_util.cpython-35.pyc │ │ └── util.cpython-35.pyc ├── HSI_ResNet.py ├── HSI_ResNet_Multi.py ├── accu.py ├── excute.py ├── plot.py ├── result13_res1_30.txt └── util.py ├── __pycache__ ├── util.cpython-35.pyc ├── visualize.cpython-35-DESKTOP-RS5NO3H.pyc └── visualize.cpython-35.pyc ├── basic_augmentation_model.py └── tensorflow ├── HSI_RN_temp.py ├── HSI_ResNet.py ├── MNIST-data ├── t10k-images-idx3-ubyte.gz ├── t10k-labels-idx1-ubyte.gz ├── train-images-idx3-ubyte.gz └── train-labels-idx1-ubyte.gz ├── dist_HSI_ResNet.py └── util.py /HSI Classification/AE_HSI/AE_grid_search.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Sat Feb 11 15:52:47 2017 4 | @author: Administrator 5 | """ 6 | 7 | 8 | from keras.layers import Dense,Input 9 | from keras.models import Model 10 | 11 | #%% (1.加载数据) 12 | from sklearn.cross_validation import StratifiedShuffleSplit 13 | from keras.utils import np_utils 14 | import numpy as np 15 | #lab转换成one-hot编码 16 | def datasetSplit(data,lab,num_calss): 17 | ssp = StratifiedShuffleSplit(lab,n_iter=1,test_size=0.90) 18 | for trainlab,testlab in ssp: 19 | print("train:\n%s\ntest:\n%s" % (trainlab,testlab)) 20 | X_train=data[trainlab] 21 | X_test=data[testlab] 22 | Y_train=np_utils.to_categorical(lab[trainlab],num_calss) 23 | Y_test=np_utils.to_categorical(lab[testlab],num_calss) 24 | return X_train,X_test,Y_train,Y_test 25 | 26 | #%%dataset2 27 | def datasetLoad2(): 28 | rootPath = r'D:/data/HSI' 29 | Xpath=rootPath+'/labeled_data.1.27.txt' 30 | Ypath=rootPath+'/data_label.1.27.txt' 31 | 32 | X_data = np.loadtxt(open(Xpath,"rb"),delimiter=",",skiprows=0,dtype=np.float) 33 | X_data=X_data.transpose() 34 | Y_data = np.loadtxt(open(Ypath,"rb"),delimiter=",",skiprows=0,dtype=np.int) 35 | Y_data=np_utils.to_categorical(Y_data-1,16) 36 | 37 | return X_data,Y_data 38 | 39 | X_data,Y_data=datasetLoad2() 40 | #Y_data = np_utils.categorical_probas_to_classes(Y_data) 41 | #%% keras wrappers 42 | from keras.models import Sequential 43 | 44 | """ 45 | KerasClassifier接受的模型函数, 46 | (1)函数参数不能是keras模型参数中不包含的(存疑?) 47 | (2)(metrics未放在参数列表,会报错illegal parameter) 48 | (3)nb_epoch和batch_size可以放在grid_search中 49 | 50 | """ 51 | def autoCoderDense_7(optimizer,loss): 52 | model=Sequential() 53 | model.add(Dense(128,input_dim=200, activation='relu')) 54 | model.add(Dense(64,activation='relu')) 55 | model.add(Dense(50,activation='relu')) 56 | model.add(Dense(16,activation="softmax")) 57 | model.compile(optimizer=optimizer, loss=loss,metrics=["accuracy"]) 58 | return model 59 | 60 | """ 61 | keras对sklearn的包装类,可以用grid_search参数 62 | 参数传入的顺序: 63 | (1)首先是 fit, predict, predict_proba, and score函数中的参数 64 | (2)其次是KerasClassifier中定义的参数 65 | (3)再次是默认参数 66 | 注意: grid_search的默认score是estimator的score 67 | """ 68 | from keras.wrappers.scikit_learn import KerasClassifier 69 | AE_keras=KerasClassifier(build_fn=autoCoderDense_7, nb_epoch=2, verbose=1) 70 | 71 | #%% grid_search 72 | """ 73 | 一般来说,在优化算法中包含epoch的数目是一个好主意, 74 | 因为每批(batch)学习量(学习速率)、每个epoch更新的数目(批尺寸)和 epoch的数量之间都具有相关性。 75 | """ 76 | from sklearn.grid_search import GridSearchCV 77 | from sklearn.metrics.scorer import mean_squared_error 78 | from sklearn.metrics import make_scorer 79 | 80 | #optimizer = ['SGD', 'RMSprop', 'Adagrad', 'Adadelta', 'Adam', 'Adamax', 'Nadam'] 81 | optimizer = ['Adadelta','RMSprop'] 82 | loss=['categorical_crossentropy'] 83 | batch_size=[10] 84 | 85 | param_grid = dict(optimizer=optimizer, 86 | loss=loss, 87 | batch_size=batch_size) 88 | 89 | grid_search = GridSearchCV(estimator=AE_keras, param_grid=param_grid, n_jobs=1)#scoring=make_scorer(mean_squared_error) 90 | 91 | #%% 92 | """ 93 | 只能用于预测标签 94 | The model is not configured to compute accuracy. 95 | You should pass `metrics=["accuracy"]` to the `model.compile() 96 | """ 97 | validator=grid_search.fit(X_data,Y_data) 98 | 99 | 100 | #%% 评估结果 101 | best_model = validator.best_estimator_.model 102 | metric_names = best_model.metrics_names 103 | metric_values = best_model.evaluate(X_data, Y_data) 104 | print('\n') 105 | for metric, value in zip(metric_names, metric_values): 106 | print(metric, ': ', value) 107 | print("Best: %f using %s" % (validator.best_score_, validator.best_params_)) 108 | print(validator.grid_scores_)#打印每一组参数的结果 109 | 110 | 111 | 112 | 113 | 114 | -------------------------------------------------------------------------------- /HSI Classification/AE_HSI/Dense_AE_spectral.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Thu Jun 22 09:40:54 2017 4 | @author: Shenjunling 5 | """ 6 | #%% args 7 | rootPath = "G:/data/HSI" 8 | logBasePath = "G:/data/mylog/KerasDL/" 9 | 10 | nb_epoch = 800 11 | test_size = 0.6 12 | batch_size = 64 13 | spectral_dim = 200 14 | encoded_dim = 60 15 | HSI_class= 16 16 | block_size = 1 17 | 18 | use_pca = False 19 | n_components = 30 20 | if use_pca ==True: 21 | input_shape = (block_size,block_size,n_components) 22 | else: 23 | input_shape = (block_size,block_size, spectral_dim) 24 | 25 | svm_switch = 0#0 classify for spectral info, 1 classify for encoded info 26 | 27 | #%% (1.加载数据) 28 | from HSIDatasetLoad import * 29 | from keras.utils import np_utils 30 | import numpy as np 31 | 32 | HSI = HSIData(rootPath) 33 | X_data = HSI.X_data 34 | Y_data = HSI.Y_data 35 | data_source = HSI.data_source 36 | idx_data = HSI.idx_data 37 | 38 | #是否使用PCA降维 39 | if use_pca==True: 40 | data_source = HSI.PCA_data_Source(data_source,n_components=n_components) 41 | 42 | X_data_nei = HSI.getNeighborData(data_source=data_source,idx_data=idx_data,block_size=block_size) 43 | Y_data = np_utils.categorical_probas_to_classes(Y_data) 44 | X_train_nei,X_test_nei,Y_train,Y_test,idx_train,idx_test = HSI.datasetSplit(X_data_nei,Y_data,idx_data,16,test_size = test_size) 45 | X_train = data_source[idx_train] 46 | X_test = data_source[idx_test] 47 | 48 | #%% (2)自编码器 49 | from keras.layers import Input, Dense, Flatten 50 | from keras.models import Model 51 | from keras.utils import np_utils 52 | from keras.callbacks import ReduceLROnPlateau 53 | from keras.callbacks import EarlyStopping 54 | 55 | """ 56 | categorical_crossentropy 57 | ‘valid’:image_shape - filter_shape + 1.即滤波器在图像内部滑动 58 | ‘full’ shape: image_shape + filter_shape - 1.允许滤波器超过图像边界 59 | """ 60 | def get_DenseAE_model(input_dim, encoded_dim): 61 | input_layer = Input(shape=(input_dim,)) 62 | x = Dense(100, activation='relu')(input_layer) 63 | x = Dense(100, activation='relu')(x) 64 | x = Dense(60, activation='relu')(x) 65 | encoded = Dense(encoded_dim, activation='relu')(x)#第四层,包括一层输入层 66 | 67 | x = Dense(60, activation='relu')(encoded) 68 | x = Dense(100, activation='relu')(x) 69 | x = Dense(100, activation='relu')(x) 70 | decoded = Dense(input_dim, activation='relu')(x) 71 | 72 | model = Model(input=input_layer, output=decoded) 73 | 74 | model.compile(optimizer='adadelta', loss='categorical_crossentropy', metrics = ['mse']) 75 | return model 76 | 77 | """ 78 | 对原本的光谱信息进行自编码,参见论文Deep Learning-Based Classification of Hyperspectral Data【170】 79 | """ 80 | spectral_model = get_DenseAE_model(input_shape[2], encoded_dim) 81 | reduce_lr1 = ReduceLROnPlateau(monitor="val_loss", patience=30) 82 | early_stopping1 = EarlyStopping(monitor='val_loss', patience=50,verbose=1) 83 | 84 | spectral_model.fit(X_train, X_train, 85 | nb_epoch = nb_epoch, 86 | batch_size = batch_size, 87 | validation_split=0.3, callbacks = [early_stopping1,reduce_lr1]) 88 | 89 | 90 | #%% (3)encoded data 91 | encoded_spetral_model = Model(input = spectral_model.input, 92 | output = spectral_model.layers[4].output)#get_DenseAE_model中的encoded输出 93 | X_test_encoded = encoded_spetral_model.predict(X_test) 94 | X_train_encoded = encoded_spetral_model.predict(X_train) 95 | 96 | #%% (4)分类模型 97 | from keras.layers import Dropout 98 | from Evaluate import modelMetrics 99 | 100 | #(1)dnn 101 | def get_mlp_classify_model(input_dim,classify_output_num): 102 | input_layer = Input(shape=(input_dim,)) 103 | x=Dense(256, activation='relu')(input_layer) 104 | x=Dropout(0.3)(x) 105 | x=Dense(256, activation='relu')(x) 106 | output=Dense(classify_output_num, activation='softmax')(x) 107 | 108 | model=Model(input_layer,output=output) 109 | 110 | model.compile(optimizer='adadelta',metrics=['accuracy'],loss='categorical_crossentropy') 111 | return model 112 | 113 | mlp_classify_model = get_mlp_classify_model(encoded_dim, HSI_class) 114 | reduce_lr3 = ReduceLROnPlateau(monitor="val_loss",patience=50) 115 | early_stopping3 = EarlyStopping(monitor='val_loss', patience=80,verbose=1) 116 | 117 | mlp_classify_model.fit(X_train_encoded,Y_train, nb_epoch=nb_epoch, 118 | validation_data=(X_test_encoded,Y_test), 119 | callbacks=[early_stopping3, reduce_lr3]) 120 | mlp_classify_model_report, mlp_classify_model_acu = modelMetrics( 121 | mlp_classify_model,X_test_encoded,Y_test) 122 | 123 | 124 | # sklearn mlp 125 | from sklearn.neural_network import MLPClassifier 126 | params = [{'solver': 'adam', 'learning_rate_init': 0.0001}] 127 | labels = ["adam"] 128 | for label, param in zip(labels, params): 129 | print("training: %s" % label) 130 | mlp = MLPClassifier(hidden_layer_sizes=(256,256,HSI_class), verbose=1, batch_size = 40, 131 | max_iter=1000, **param)#tol:Tolerance for the training loss. 132 | mlp.fit(X_train_encoded, np_utils.categorical_probas_to_classes(Y_train)) 133 | print("Training set score: %f" % mlp.score(X_train_encoded, np_utils.categorical_probas_to_classes(Y_train))) 134 | print("Training set loss: %f" % mlp.loss_) 135 | clf_report, clf_acu = modelMetrics(mlp,X_test_encoded,Y_test) 136 | 137 | 138 | 139 | # svm 140 | from sklearn.svm import SVC 141 | svc = SVC(kernel="rbf", C=30000, verbose=True) 142 | if svm_switch==0: 143 | svc.fit(X_train,np_utils.categorical_probas_to_classes(Y_train)) 144 | svc_train_accu = svc.score(X_train, np_utils.categorical_probas_to_classes(Y_train)) 145 | svc_report, svc_accu = modelMetrics(svc, X_test, Y_test) 146 | elif svm_switch==1: 147 | svc.fit(X_train_encoded, np_utils.categorical_probas_to_classes(Y_train)) 148 | svc_train_accu = svc.score(X_train_encoded,np_utils.categorical_probas_to_classes(Y_train)) 149 | svc_report, svc_accu = modelMetrics(svc,X_test_encoded,Y_test) 150 | print(svc_train_accu, svc_accu) 151 | 152 | 153 | 154 | 155 | 156 | -------------------------------------------------------------------------------- /HSI Classification/AE_HSI/Evaluate.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Tue Jun 20 15:12:34 2017 4 | 5 | @author: Shenjunling 6 | """ 7 | 8 | from keras.utils.np_utils import categorical_probas_to_classes 9 | from sklearn.metrics import classification_report 10 | from sklearn.metrics import accuracy_score 11 | 12 | #判断f1和总的准确率,输入的Y_test是概率形式的 13 | def modelMetrics(model_fitted,X_test,Y_test): 14 | Y_predict=model_fitted.predict(X_test) 15 | if len(Y_predict.shape)!=1: 16 | #转换onehot编码 17 | Y_predict=categorical_probas_to_classes(Y_predict) 18 | report =classification_report(Y_predict, categorical_probas_to_classes(Y_test))#各个类的f1score 19 | accuracy = accuracy_score(Y_predict, categorical_probas_to_classes(Y_test))#总的准确度 20 | return report,accuracy 21 | 22 | 23 | def cateAccuracy(model_fitted,X_test,Y_test): 24 | Y_test = categorical_probas_to_classes(Y_test) 25 | Y_predict=model_fitted.predict(X_test) 26 | if len(Y_predict.shape)!=1: 27 | #转换onehot编码 28 | Y_predict=categorical_probas_to_classes(Y_predict) 29 | 30 | accu_count={} 31 | accu_total = {} 32 | for cat in set(Y_test): 33 | total = list(Y_test).count(cat) 34 | accu_total[cat] = total 35 | accu_count[cat] = 0 36 | 37 | for iidx,cat in enumerate(Y_test): 38 | if cat == Y_predict[iidx]: 39 | accu_count[cat] = accu_count[cat]+1 40 | sum1 = 0 41 | sum2 = 0 42 | for i in range(len(set(Y_test))): 43 | sum1 = sum1+accu_total[i] 44 | sum2 = sum2+accu_count[i] 45 | print(sum2/float(sum1)) 46 | return [accu_count[i]/float(accu_total[i]) for i in range(len(set(Y_test)))] 47 | 48 | 49 | 50 | 51 | -------------------------------------------------------------------------------- /HSI Classification/AE_HSI/Sense_AE_spatial.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Sun Dec 11 14:18:52 2016 4 | @author: Shenjunling 5 | """ 6 | #%% args 7 | rootPath = "G:/data/HSI" 8 | logBasePath = "G:/data/mylog/KerasDL/" 9 | 10 | nb_epoch1 = 2000 11 | nb_epoch2 = 800 12 | test_size = 0.6 13 | batch_size = 64 14 | spectral_dim = 200 15 | encoded_dim = 60 16 | HSI_class= 16 17 | block_size = 1 18 | 19 | use_pca = False 20 | n_components = 30 21 | if use_pca ==True: 22 | input_shape = (block_size,block_size,n_components) 23 | else: 24 | input_shape = (block_size,block_size, spectral_dim) 25 | 26 | svm_switch = 0#0 classify for spectral info, 1 classify for encoded info 27 | 28 | #%% (1.加载数据) 29 | from HSIDatasetLoad import * 30 | from keras.utils import np_utils 31 | import numpy as np 32 | 33 | HSI = HSIData(rootPath) 34 | X_data = HSI.X_data 35 | Y_data = HSI.Y_data 36 | data_source = HSI.data_source 37 | idx_data = HSI.idx_data 38 | 39 | #是否使用PCA降维 40 | if use_pca==True: 41 | data_source = HSI.PCA_data_Source(data_source,n_components=n_components) 42 | 43 | X_data_nei = HSI.getNeighborData(data_source=data_source,idx_data=idx_data,block_size=block_size) 44 | Y_data = np_utils.categorical_probas_to_classes(Y_data) 45 | X_train_nei,X_test_nei,Y_train,Y_test,idx_train,idx_test = HSI.datasetSplit(X_data_nei,Y_data,idx_data,16,test_size = test_size) 46 | X_train = data_source[idx_train] 47 | X_test = data_source[idx_test] 48 | 49 | #%% (2)自编码器 50 | from keras.layers import Input, Dense, Flatten 51 | from keras.models import Model 52 | from keras.utils import np_utils 53 | from keras.callbacks import ReduceLROnPlateau 54 | from keras.callbacks import EarlyStopping 55 | 56 | """ 57 | categorical_crossentropy 58 | ‘valid’:image_shape - filter_shape + 1.即滤波器在图像内部滑动 59 | ‘full’ shape: image_shape + filter_shape - 1.允许滤波器超过图像边界 60 | """ 61 | def get_DenseAE_model(input_dim): 62 | input_layer = Input(shape=(input_dim,)) 63 | x = Dense(100, activation='relu')(input_layer) 64 | x = Dense(100, activation='relu')(x) 65 | x = Dense(60, activation='relu')(x) 66 | encoded = Dense(60, activation='relu')(x)#第四层,包括一层输入层 67 | 68 | x = Dense(60, activation='relu')(encoded) 69 | x = Dense(100, activation='relu')(x) 70 | x = Dense(100, activation='relu')(x) 71 | decoded = Dense(input_dim, activation='relu')(x) 72 | 73 | model = Model(input=input_layer, output=decoded) 74 | 75 | model.compile(optimizer='adadelta', loss='categorical_crossentropy', metrics = ['mse']) 76 | return model 77 | 78 | 79 | X_train_nei_flat = np.reshape(X_train_nei, (-1,np.prod(input_shape))) 80 | X_test_nei_flat = np.reshape(X_test_nei, (-1,np.prod(input_shape))) 81 | spatial_model = get_DenseAE_model(np.prod(input_shape)) 82 | reduce_lr2 = ReduceLROnPlateau(monitor="val_loss",factor=0.1,patience=30) 83 | early_stopping2 = EarlyStopping(monitor='val_loss', patience=50,verbose=1) 84 | 85 | spatial_model.fit(X_train_nei_flat, X_train_nei_flat, 86 | nb_epoch = nb_epoch1, 87 | batch_size = batch_size, 88 | validation_split=0.3, callbacks = [early_stopping2,reduce_lr2]) 89 | 90 | #%% (3)encoded data 91 | encoded_spatial_model = Model(input = spatial_model.input, 92 | output = spatial_model.layers[4].output) 93 | X_test_encoded = encoded_spatial_model.predict(X_test_nei_flat) 94 | X_train_encoded = encoded_spatial_model.predict(X_train_nei_flat) 95 | 96 | #%% (4)分类模型 97 | from keras.layers import Dropout 98 | from Evaluate import modelMetrics 99 | 100 | 101 | #(1)dnn 102 | def get_mlp_classify_model(input_dim,classify_output_num): 103 | input_layer = Input(shape=(input_dim,)) 104 | x=Dense(256, activation='relu')(input_layer) 105 | x=Dropout(0.3)(x) 106 | x=Dense(256, activation='relu')(x) 107 | output=Dense(classify_output_num, activation='softmax')(x) 108 | 109 | model=Model(input_layer,output=output) 110 | 111 | model.compile(optimizer='adadelta',metrics=['accuracy'],loss='categorical_crossentropy') 112 | return model 113 | 114 | mlp_classify_model = get_mlp_classify_model(encoded_dim, HSI_class) 115 | reduce_lr3 = ReduceLROnPlateau(monitor="val_loss",patience=50) 116 | early_stopping3 = EarlyStopping(monitor='val_loss', patience=80,verbose=1) 117 | 118 | mlp_classify_model.fit(X_train_encoded,Y_train, nb_epoch=nb_epoch2 , 119 | validation_data=(X_test_encoded,Y_test), 120 | callbacks=[early_stopping3, reduce_lr3]) 121 | mlp_classify_model_report, mlp_classify_model_acu = modelMetrics( 122 | mlp_classify_model,X_test_encoded,Y_test) 123 | 124 | 125 | # sklearn mlp 126 | from sklearn.neural_network import MLPClassifier 127 | params = [{'solver': 'adam', 'learning_rate_init': 0.0001}] 128 | labels = ["adam"] 129 | for label, param in zip(labels, params): 130 | print("training: %s" % label) 131 | mlp = MLPClassifier(hidden_layer_sizes=(256,256,16), verbose=1, batch_size = 40, 132 | max_iter=1000, **param)#tol:Tolerance for the training loss. 133 | mlp.fit(X_train_encoded, np_utils.categorical_probas_to_classes(Y_train)) 134 | print("Training set score: %f" % mlp.score(X_train_encoded, np_utils.categorical_probas_to_classes(Y_train))) 135 | print("Training set loss: %f" % mlp.loss_) 136 | clf_report, clf_acu = modelMetrics(mlp,X_test_encoded,Y_test) 137 | 138 | 139 | 140 | # svm 141 | from sklearn.svm import SVC 142 | svc = SVC(kernel="rbf", C=30000, verbose=True) 143 | if svm_switch==0: 144 | svc.fit(X_train,np_utils.categorical_probas_to_classes(Y_train)) 145 | svc_train_accu = svc.score(X_train, np_utils.categorical_probas_to_classes(Y_train)) 146 | svc_report, svc_accu = modelMetrics(svc, X_test, Y_test) 147 | elif svm_switch==1: 148 | svc.fit(X_train_encoded, np_utils.categorical_probas_to_classes(Y_train)) 149 | svc_train_accu = svc.score(X_train_encoded,np_utils.categorical_probas_to_classes(Y_train)) 150 | svc_report, svc_accu = modelMetrics(svc,X_test_encoded,Y_test) 151 | print(svc_train_accu, svc_accu) 152 | 153 | 154 | 155 | 156 | 157 | -------------------------------------------------------------------------------- /HSI Classification/AE_HSI/__pycache__/Evaluate.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drivenow/HSI-Classification/e2b98d08849a2b0ab681d1ecb893cfecabb08e9a/HSI Classification/AE_HSI/__pycache__/Evaluate.cpython-35.pyc -------------------------------------------------------------------------------- /HSI Classification/AE_HSI/cnncoder_nn.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from keras.layers import Input, Dense 3 | from keras.models import Model 4 | from keras.utils import np_utils 5 | from keras.callbacks import EarlyStopping 6 | import numpy as np 7 | from sklearn.cross_validation import StratifiedShuffleSplit 8 | 9 | #%%dataset2 10 | """ 11 | """ 12 | def datasetLoad2(): 13 | rootPath = r'D:/data/HSI' 14 | Xpath=rootPath+'/labeled_data.1.27.txt' 15 | Ypath=rootPath+'/data_label.1.27.txt' 16 | imgPath=rootPath+'/data_source.1.27.txt' 17 | idxPath=rootPath+'/labeled_idx.1.27.txt' 18 | 19 | X_data = np.loadtxt(open(Xpath,"rb"),delimiter=",",skiprows=0,dtype=np.float) 20 | X_data=X_data.transpose() 21 | Y_data = np.loadtxt(open(Ypath,"rb"),delimiter=",",skiprows=0,dtype=np.int) 22 | Y_data=np_utils.to_categorical(Y_data-1,16) 23 | data_source=np.loadtxt(open(imgPath,"rb"),delimiter=",",skiprows=0,dtype=np.float) 24 | idx_data=np.loadtxt(open(idxPath,"rb"),delimiter=",",skiprows=0,dtype=np.int) 25 | idx_data=idx_data-1 26 | 27 | return X_data,Y_data,data_source,idx_data 28 | 29 | X_data,Y_data,data_source,idx_data=datasetLoad2()#未划分训练集测试集的数据(不包括背景点) 30 | data_source_input=np.array([img.reshape(145,145,1) for img in data_source.transpose()]) 31 | 32 | 33 | #%% args 34 | nb_epoch=500 35 | #autoCoderCNN_input_shape=Input(shape=(145,145,1)) 36 | autoCoderCNN_input_shape=Input(shape=(148,148,1)) 37 | eachLayer1Node_3Layer_input_shape=Input(shape=(200,)) 38 | classify_output_num=16 39 | early_stopping = EarlyStopping(monitor='val_loss', patience=100,verbose=1) 40 | encoding_dim = 32 41 | 42 | 43 | #%% 44 | from keras.layers import Input, Convolution2D, MaxPooling2D, UpSampling2D 45 | """ 46 | autoencoder 47 | encoder 48 | decoder 49 | ‘valid’:image_shape - filter_shape + 1.即滤波器在图像内部滑动 50 | ‘full’ shape: image_shape + filter_shape - 1.允许滤波器超过图像边界 51 | """ 52 | def autoCoderCNN(input_shape): 53 | x = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(input_shape) 54 | x = MaxPooling2D((2, 2), border_mode='same')(x) 55 | x = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(x) 56 | encoded = MaxPooling2D((2, 2), border_mode='same')(x) 57 | 58 | 59 | x = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(encoded) 60 | x = UpSampling2D((2, 2))(x) 61 | x = Convolution2D(16, 3, 3, activation='relu',border_mode='same')(x) 62 | x = UpSampling2D((2, 2))(x) 63 | decoded = Convolution2D(1, 3, 3, activation='relu', border_mode='same')(x) 64 | return decoded 65 | 66 | autoCoderCNN_output=autoCoderCNN(autoCoderCNN_input_shape) 67 | autoencoder_model = Model(autoCoderCNN_input_shape, autoCoderCNN_output) 68 | autoencoder_model.compile(optimizer='adadelta', loss='binary_crossentropy') 69 | 70 | #将数据从(200,145,145,1)变为(200,148,148,1) 71 | def Img4DExtend(data_source_input,dim1,dim2_weight,dim3_height,dim4,out_dim2_weight,out_dim3_height): 72 | data_source_input_tmp=np.zeros((200,148,148,1)) 73 | for iidx,img in enumerate(data_source_input): 74 | img=img[:,:,0] 75 | img=np.concatenate((img,np.zeros((145,3)).reshape(145,3)),axis=1) 76 | img=np.concatenate((img,np.zeros((3,148)).reshape(3,148)),axis=0) 77 | data_source_input_tmp[iidx,:,:,0]=img 78 | data_source_input=data_source_input_tmp 79 | return data_source_input 80 | 81 | data_source_input_extend = Img4DExtend(data_source_input,200,145,145,1,148,148) 82 | 83 | autoencoder_model.fit(data_source_input_extend, data_source_input_extend, 84 | nb_epoch=200, 85 | batch_size=256, 86 | shuffle=True, 87 | validation_split=0.3) 88 | """ 89 | data_source_input_extend_decoded:经过编码解码后,得到的与原来数据集同大小的数据 90 | """ 91 | data_source_input_extend_decoded = autoencoder_model.predict(data_source_input_extend) 92 | 93 | #%% 94 | #将数据从(200,148,148,1)变为(200,145,145,1) 95 | def Img4DCut(data_source_input): 96 | data_source_input_tmp=np.zeros((200,145,145,1)) 97 | for iidx,img in enumerate(data_source_input): 98 | img=img[0:145,0:145,0] 99 | data_source_input_tmp[iidx,:,:,0]=img 100 | data_source_input=data_source_input_tmp 101 | return data_source_input 102 | data_source_input_cut=Img4DCut(data_source_input_extend_decoded) 103 | data_source_input_cut_X=np.array([img.reshape(21025) for img in data_source_input]).transpose() 104 | 105 | 106 | 107 | #%% 多层模型 108 | """ 109 | 三层,每层一个节点,迭代3000次,训练集0.92.验证集0.74 110 | """ 111 | def eachLayer1Node_3Layer(input_shape,classify_output_num): 112 | x=Dense(128, activation='relu')(input_shape)#30,0.33;50,0.85,100,0.36,500,0.96(收敛) 113 | x=Dense(64, activation='relu')(x)#50,0.03,100,0.70 114 | # x=Dropout(0.3)(x) 115 | x=Dense(32,activation='relu')(x)#50,0.71,;50,0.49;50,0.86;50.0.88;50,0.32,;100(未收敛),500(收敛) 116 | # x=Dropout(0.3)(x) 117 | output=Dense(classify_output_num,activation='softmax')(x) 118 | return output 119 | 120 | 121 | def datasetSplit(data,lab,idx_of_data,num_calss): 122 | ssp = StratifiedShuffleSplit(lab,n_iter=1,test_size=0.90) 123 | for trainlab,testlab in ssp: 124 | print("train:\n%s\ntest:\n%s" % (trainlab,testlab)) 125 | X_train=data[trainlab] 126 | X_test=data[testlab] 127 | Y_train=np_utils.to_categorical(lab[trainlab],num_calss) 128 | Y_test=np_utils.to_categorical(lab[testlab],num_calss) 129 | idx_train=idx_of_data[trainlab] 130 | idx_test=idx_of_data[testlab] 131 | return X_train,X_test,Y_train,Y_test,idx_train,idx_test 132 | 133 | Y_data=np_utils.categorical_probas_to_classes(Y_data) 134 | X_train,X_test,Y_train,Y_test,idx_train,idx_test=datasetSplit(data_source_input_cut_X,Y_data,idx_data,num_calss=16) 135 | 136 | 137 | print ("start mlp classification...") 138 | oneLayerOneNode_output=eachLayer1Node_3Layer(eachLayer1Node_3Layer_input_shape,classify_output_num) 139 | eachLayer3Node_3Layer_model=Model(eachLayer1Node_3Layer_input_shape,output=oneLayerOneNode_output) 140 | eachLayer3Node_3Layer_model.compile(optimizer='adadelta',metrics=['accuracy'],loss='categorical_crossentropy') 141 | early_stopping = EarlyStopping(monitor='val_loss', patience=100,verbose=0) 142 | eachLayer3Node_3Layer_model.fit(X_train,Y_train, nb_epoch=nb_epoch,validation_data=(X_test,Y_test),callbacks=[early_stopping]) 143 | 144 | 145 | 146 | 147 | 148 | -------------------------------------------------------------------------------- /HSI Classification/AE_HSI/layerwise_AE.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Thu Mar 2 15:59:40 2017 4 | 逐层初始化 5 | @author: Shenjunling 6 | """ 7 | #%% (1.加载数据) 8 | from HSIDataLoad import * 9 | import numpy as np 10 | 11 | #dataset2 12 | rootPath = r'G:/data/HSI' 13 | X_data,Y_data,data_source,idx_data = datasetLoad2(rootPath) 14 | idx = np.arange(X_data.shape[0]) 15 | np.random.shuffle(idx) 16 | X_data = X_data[idx] 17 | Y_data = Y_data[idx] 18 | 19 | 20 | #%% args 21 | nb_epoch1 = 2000 22 | nb_epoch2 = 2000 23 | input_dim = 200 24 | input_dim2 = 50 25 | test_size = 0.5 26 | batch_size1 = 256 27 | classify_output_num = 16 28 | encoding_dim = 64 29 | 30 | from keras.callbacks import EarlyStopping 31 | 32 | 33 | 34 | #%% (2)自编码器 35 | from keras.layers import Input, Dense 36 | from keras.models import Model 37 | from keras.utils import np_utils 38 | from keras.callbacks import ReduceLROnPlateau 39 | """ 40 | ‘valid’:image_shape - filter_shape + 1.即滤波器在图像内部滑动 41 | ‘full’ shape: image_shape + filter_shape - 1.允许滤波器超过图像边界 42 | """ 43 | def layerwise_model(input_layer, output_layer, X_data): 44 | 45 | model = Model(input = input_layer, output = output_layer) 46 | model.compile(optimizer='adadelta', loss='binary_crossentropy', metrics = ['mse']) 47 | 48 | reduce_lr = ReduceLROnPlateau(monitor="val_loss",patience=30) 49 | early_stopping = EarlyStopping(monitor='val_loss', patience=100,verbose=1) 50 | model.fit(X_data, X_data, 51 | nb_epoch = nb_epoch1, 52 | batch_size = batch_size1, 53 | validation_split=0.3,callbacks = [early_stopping, reduce_lr]) 54 | 55 | return model 56 | 57 | input_layer = Input(shape=(200,)) 58 | encoded_layer = Dense(128,activation="relu")(input_layer) 59 | output_layer = Dense(200,activation="relu")(encoded_layer) 60 | input_data = X_data 61 | model = layerwise_model(input_layer, output_layer,input_data) 62 | 63 | -------------------------------------------------------------------------------- /HSI Classification/AE_HSI/mlp_HSI.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Tue Dec 06 14:06:47 2016 4 | 5 | @author: Administrator 6 | """ 7 | 8 | import numpy as np 9 | from HSIDataLoad import * 10 | 11 | n_components = 3 12 | #X_train,Y_train,idx_train,X_test,Y_test,idx_test,data_source=dataLoad3(r"D:\OneDrive\codes\python\RandomForest\data")#另一数据集 13 | #X_train = X_train.transpose() 14 | #X_test = X_test.transpose() 15 | 16 | #from keras.utils import np_utils 17 | test_size = 0.9 18 | X_data,Y_data,data_source,idx_data=datasetLoad2("D:/data/HSI") 19 | #X_data,data_source = PCA_data_Source(data_source,idx_data,n_components=n_components) 20 | Y_data=np_utils.categorical_probas_to_classes(Y_data) 21 | X_train,X_test,Y_train,Y_test,idx_train,idx_test=datasetSplit(X_data,Y_data,idx_data,num_calss=16,test_size=test_size) 22 | Y_train = np_utils.categorical_probas_to_classes(Y_train)+1 23 | Y_test = np_utils.categorical_probas_to_classes(Y_test)+1 24 | 25 | 26 | #%% 模型初始化 27 | """ 28 | alpha:L2 regularzation 29 | beta:only used in ADAM 30 | learning_rate : {‘constant’, ‘invscaling’, ‘adaptive’}, 学习率,在sgd方法中改变 31 | momentum:动量,在sgd方法中使用 32 | activation : {‘identity’, ‘logistic’, ‘tanh’, ‘relu’}, default ‘relu’ 33 | """ 34 | from sklearn.neural_network import MLPClassifier 35 | from sklearn.metrics import classification_report 36 | from sklearn.metrics import accuracy_score 37 | clf=MLPClassifier(solver="lbfgs",alpha=1E-5,hidden_layer_sizes=(200,64,16),max_iter=5000) 38 | clf.fit(X_train,Y_train) 39 | 40 | Y_pred=clf.predict(X_test) 41 | report =classification_report(Y_pred,Y_test) 42 | acu=accuracy_score(Y_pred,Y_test) 43 | -------------------------------------------------------------------------------- /HSI Classification/AE_HSI/nn_HSI.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Tue Dec 06 14:06:47 2016 4 | 5 | optimizer='adadelta',ada自适应的 6 | metrics='accuracy', 7 | loss='categorical_crossentropy'多类对数损失函数 8 | 9 | @author: Administrator 10 | """ 11 | 12 | from keras.models import Sequential,Model 13 | from keras.layers import Dense,Input,merge,Dropout 14 | from keras.activations import softmax,relu 15 | from keras.utils import np_utils 16 | from keras.callbacks import EarlyStopping 17 | import numpy as np 18 | from sklearn.cross_validation import StratifiedShuffleSplit 19 | 20 | #%% 数据读取 21 | """ 22 | X_data,Y_data,data_source,idx_data=datasetLoad2()#未划分训练集测试集的数据(不包括背景点) 23 | Y_data=np_utils.categorical_probas_to_classes(Y_data) 24 | X_train,X_test,Y_train,Y_test,idx_train,idx_test=datasetSplit(X_data,Y_data,idx_data,num_calss=16) 25 | """ 26 | 27 | def datasetSplit(data,lab,idx_of_data,num_calss): 28 | ssp = StratifiedShuffleSplit(lab,n_iter=1,test_size=0.90) 29 | for trainlab,testlab in ssp: 30 | print("train:\n%s\ntest:\n%s" % (trainlab,testlab)) 31 | X_train=data[trainlab] 32 | X_test=data[testlab] 33 | Y_train=np_utils.to_categorical(lab[trainlab],num_calss) 34 | Y_test=np_utils.to_categorical(lab[testlab],num_calss) 35 | idx_train=idx_of_data[trainlab] 36 | idx_test=idx_of_data[testlab] 37 | return X_train,X_test,Y_train,Y_test,idx_train,idx_test 38 | 39 | 40 | def datasetLoad2(): 41 | rootPath = r'D:/data/HSI' 42 | Xpath=rootPath+'/labeled_data.1.27.txt' 43 | Ypath=rootPath+'/data_label.1.27.txt' 44 | imgPath=rootPath+'/data_source.1.27.txt' 45 | idxPath=rootPath+'/labeled_idx.1.27.txt' 46 | 47 | X_data = np.loadtxt(open(Xpath,"rb"),delimiter=",",skiprows=0,dtype=np.float) 48 | X_data=X_data.transpose() 49 | Y_data = np.loadtxt(open(Ypath,"rb"),delimiter=",",skiprows=0,dtype=np.int) 50 | Y_data=np_utils.to_categorical(Y_data-1,16) 51 | data_source=np.loadtxt(open(imgPath,"rb"),delimiter=",",skiprows=0,dtype=np.float) 52 | idx_data=np.loadtxt(open(idxPath,"rb"),delimiter=",",skiprows=0,dtype=np.int) 53 | idx_data=idx_data-1 54 | 55 | return X_data,Y_data,data_source,idx_data 56 | 57 | #%% 模型评估 58 | from keras.utils.np_utils import categorical_probas_to_classes 59 | from sklearn.metrics import classification_report 60 | from sklearn.metrics import accuracy_score 61 | 62 | #判断f1和总的准确率,输入的Y_test是概率形式的 63 | def modelMetrics(model_fitted,X_test,Y_test): 64 | Y_predict_prob=model_fitted.predict(X_test) 65 | Y_predict_ctg=categorical_probas_to_classes(Y_predict_prob) 66 | report =classification_report(Y_predict_ctg,categorical_probas_to_classes(Y_test))#各个类的f1score 67 | accuracy = accuracy_score(Y_predict_ctg,categorical_probas_to_classes(Y_test))#总的准确度 68 | return report,accuracy 69 | 70 | #%% args 71 | X_data,Y_data,data_source,idx_data=datasetLoad2()#未划分训练集测试集的数据(不包括背景点) 72 | Y_data=np_utils.categorical_probas_to_classes(Y_data) 73 | X_train,X_test,Y_train,Y_test,idx_train,idx_test=datasetSplit(X_data,Y_data,idx_data,num_calss=16) 74 | 75 | nb_epoch = 500 76 | input_shape = (200,)#输入特征维度4 77 | classify_output_num=16 78 | 79 | """ 80 | monitor='val_loss',需要监视的量 81 | patience=2,监视两相比上一次迭代没有下降,经过几次epoch之后 82 | 在min模式下,如果检测值停止下降则中止训练。在max模式下,当检测值不再上升则停止训练。 83 | """ 84 | early_stopping = EarlyStopping(monitor='val_loss', patience=50,verbose=1) 85 | 86 | #%% 多层模型 87 | """ 88 | 三层最典型多层感知机网络,每层单输入单输出,迭代3000次,训练集0.92.验证集0.74,新数据集训练集0.97,测试集0.74 89 | """ 90 | def basic_mlp_model(input_shape,classify_output_num): 91 | input_layer = Input(input_shape) 92 | x=Dense(128, activation='relu')(input_layer) 93 | x=Dropout(0.3)(x) 94 | x=Dense(64, activation='relu')(x) 95 | x=Dropout(0.3)(x) 96 | x=Dense(32,activation='relu')(x) 97 | x=Dropout(0.3)(x) 98 | output=Dense(classify_output_num,activation='softmax')(x) 99 | 100 | model = Model(input = input_layer,output=output) 101 | model.compile(optimizer='adadelta',metrics=['accuracy'],loss='categorical_crossentropy') 102 | return model 103 | 104 | basic_mlp_model = basic_mlp_model(input_shape,classify_output_num) 105 | 106 | #============================================================================== 107 | #basic_mlp_model.fit(X_train,Y_train, nb_epoch=nb_epoch,validation_split=0.1,callbacks=[early_stopping]) 108 | basic_mlp_model.fit(X_train,Y_train, nb_epoch=nb_epoch,validation_data=(X_test,Y_test),callbacks=[early_stopping]) 109 | basic_mlp_model_report,basic_mlp_model_accuracy = modelMetrics(basic_mlp_model,X_test,Y_test) 110 | #============================================================================== 111 | #%% 一层多个节点 112 | """ 113 | 一层三个节点,迭代971次,准确率在71~75%,验证集低约6个百分点,验证集低约5个百分点 114 | """ 115 | def eachLayer3Node_2Layer(input_shape,classify_output_num): 116 | dense1=Dense(64) 117 | x1=dense1(input_shape) 118 | x2=dense1(input_shape) 119 | x3=dense1(input_shape) 120 | assert dense1.get_output_at(0) == x1,"dense1层的第一个节点的输出不是x1" 121 | assert dense1.get_output_at(1) == x2,"dense1层的第二个节点的输出不是x2" 122 | assert dense1.get_output_at(2) == x3,"dense1层的第三个节点的输出不是x3" 123 | merged=merge([x1,x2,x3],mode='sum') 124 | output=Dense(16,activation='softmax')(merged) 125 | return output 126 | 127 | """ 128 | 三层,前两层三个节点 129 | """ 130 | def eachLayer3Node_3Layer(input_shape,classify_output_num): 131 | dense1=Dense(64) 132 | x1=dense1(input_shape) 133 | x2=dense1(input_shape) 134 | x3=dense1(input_shape) 135 | assert dense1.get_output_at(0) == x1,"dense1层的第一个节点的输出不是x1" 136 | assert dense1.get_output_at(1) == x2,"dense1层的第二个节点的输出不是x2" 137 | assert dense1.get_output_at(2) == x3,"dense1层的第三个节点的输出不是x3" 138 | dense2=Dense(32) 139 | x11=dense2(x1) 140 | x21=dense2(x2) 141 | x31=dense2(x3) 142 | assert dense2.get_output_at(0) == x11,"dense2层的第一个节点的输出不是x1" 143 | assert dense2.get_output_at(1) == x21,"dense2层的第二个节点的输出不是x2" 144 | assert dense2.get_output_at(2) == x31,"dense2层的第三个节点的输出不是x3" 145 | merged=merge([x11,x21,x31],mode='sum') 146 | output=Dense(16,activation='softmax')(merged) 147 | return output 148 | 149 | oneLayer3Node_output=eachLayer3Node_3Layer(input_shape,classify_output_num) 150 | 151 | 152 | 153 | #%% 模型初始化 154 | eachLayer3Node_3Layer_model=Model(input_shape,output=oneLayer3Node_output) 155 | 156 | eachLayer3Node_3Layer_model.compile(optimizer='adadelta',metrics=['accuracy'],loss='categorical_crossentropy') 157 | 158 | 159 | 160 | #============================================================================== 161 | # eachLayer3Node_3Layer_model.fit(X_train,Y_train, nb_epoch=nb_epoch,validation_data=(X_test,Y_test),callbacks=[early_stopping]) 162 | # eachLayer3Node_3Layer_model_report,eachLayer3Node_3Layer_model_accuracy = modelMetrics( 163 | # eachLayer3Node_3Layer_model,X_test,Y_test) 164 | 165 | #============================================================================== 166 | 167 | 168 | 169 | -------------------------------------------------------------------------------- /HSI Classification/CNN1d_neighbour_model.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Sun Feb 26 16:04:20 2017 4 | @author: Administrator 5 | """ 6 | #logBasePath = "D:/data/mylog/KerasDL/" 7 | #rootPath = r'D:/data/HSI' 8 | 9 | rootPath = "G:/data/HSI" 10 | logBasePath = "G:/data/mylog/KerasDL/CNN1d_neighbour_model" 11 | 12 | test_size = 0.9 13 | nb_epoch = 2000 14 | nb_classes = 16 15 | batch_size = 200 16 | 17 | block_size = 7 18 | input_shape = (200,block_size*block_size) 19 | 20 | #%% 21 | from HSIDatasetLoad import * 22 | from keras.utils import np_utils 23 | import numpy as np 24 | #数据规范化 25 | def data_standard(X_data): 26 | import numpy as np 27 | sample,block_size,block_size,band = X_data.shape 28 | new_X_data = np.zeros((sample,band,block_size*block_size)) 29 | for i in range(sample): 30 | for row in range(block_size): 31 | for col in range(block_size): 32 | new_X_data[i,:,row*block_size+col] = X_data[i,row,col,:] 33 | return new_X_data 34 | 35 | HSI = HSIData(rootPath) 36 | X_data = HSI.X_data 37 | Y_data = HSI.Y_data 38 | data_source = HSI.data_source 39 | idx_data = HSI.idx_data 40 | 41 | X_data = HSI.getNeighborData(data_source=data_source,idx_data=idx_data,block_size=block_size) 42 | X_data = data_standard(X_data) 43 | 44 | Y_data = np_utils.categorical_probas_to_classes(Y_data) 45 | X_train,X_test,Y_train,Y_test,idx_train,idx_test = HSI.datasetSplit(X_data,Y_data,idx_data,16,test_size = test_size) 46 | 47 | 48 | #%% 49 | from keras.layers import Input,merge,Dense,Dropout,Flatten,Convolution1D,MaxPooling1D 50 | from keras.layers.normalization import BatchNormalization 51 | from keras.models import Model 52 | from keras.regularizers import l2 53 | import tensorflow as tf 54 | 55 | """ 56 | 2——D guass distribution 57 | """ 58 | def guass_dist(mu1, mu2, sig1, sig2, rho, window): 59 | sig1_2 = sig1**2 60 | sig2_2 = sig2**2 61 | filters = np.zeros(window**2) 62 | for i in range(window): 63 | for j in range(window): 64 | e = (i-mu1)**2/sig1_2+(j-mu2)**2/sig2_2-2*rho*(i-mu1)*(j-mu2)/sig1*sig2 65 | f = 1/(2*3.14*sig1*sig2*np.sqrt(1-rho**2))*np.exp(-1/(2*(1-rho**2))*e) 66 | filters[i*window+j] = f 67 | return filters 68 | 69 | """ 70 | 手动1改gaussian_filter的窗口 71 | """ 72 | def gauss_filter(shape, name=None, dim_ordering='th'): 73 | assert len(shape)==4,"guass filter shape error, shape %d,%d,%d,%d" %(shape[0],shape[1],shape[2],shape[3]) 74 | # assert shape[1]==1,"size one filter" 75 | filters = np.zeros(shape) 76 | for i in range(shape[-1]): 77 | filters[0,0,:,i] = guass_dist(1,1,1,1,0,7) 78 | return tf.Variable(filters, dtype=tf.float32, name=name) 79 | 80 | 81 | """ 82 | batch200,gauss_filter,conv3,block3,adadelta. 83 | 1036/1036 [==============================] - 4s - loss: 0.1366 - acc: 0.9923 - val_loss: 0.4995 - val_acc: 0.8825 84 | batch200,gauss_filter,conv4,block3,adadelta. 85 | 1000,1036/1036 [==============================] - 6s - loss: 0.2877 - acc: 0.9305 - val_loss: 0.4829 - val_acc: 0.8765 86 | batch200,gauss_filter,conv4,block3,adadelta. 87 | Epoch 701/2000 88 | 1036/1036 [==============================] - 8s - loss: 0.4024 - acc: 0.8996 - val_loss: 0.4835 - val_acc: 0.8757 89 | batch200,gauss_filter,conv5,block3,adadelta. 90 | Epoch 666/2000 91 | 1036/1036 [==============================] - 8s - loss: 0.3767 - acc: 0.8938 - val_loss: 0.4977 - val_acc: 0.8593 92 | """ 93 | 94 | 95 | def get_CNN1d_model(input_shape, classify_output_num, my_optimizer): 96 | input_layer = Input(input_shape) 97 | conv1 = Convolution1D(1, 1, subsample_length=1,init=gauss_filter, 98 | border_mode = "valid")(input_layer) 99 | # conv2 = Convolution1D(12, 5, subsample_length=1, 100 | # border_mode = "valid")(conv1) 101 | # conv3 = Convolution1D(24, 4, subsample_length=1, 102 | # border_mode = "valid")(conv2) 103 | # conv4 = Convolution1D(36, 5, subsample_length=1, 104 | # border_mode = "valid")(conv3) 105 | # conv5 = Convolution1D(48, 5, subsample_length=1, 106 | # border_mode = "valid")(conv4) 107 | flat1 = Flatten()(conv1) 108 | 109 | 110 | x = Dense(512, activation='relu', W_regularizer=l2(0.1))(flat1) 111 | x = BatchNormalization()(x) 112 | x = Dropout(0.3)(x) 113 | x = Dense(512, activation='relu', W_regularizer=l2(0.1))(x) 114 | x = BatchNormalization()(x) 115 | x = Dropout(0.3)(x) 116 | output = Dense(classify_output_num, activation='softmax')(x) 117 | 118 | model = Model(input = input_layer,output=output) 119 | model.compile(optimizer = my_optimizer,metrics=['accuracy'],loss='categorical_crossentropy') 120 | return model 121 | 122 | basic_model = get_CNN1d_model(input_shape, nb_classes, 'adadelta') 123 | 124 | #%% callbacks 125 | from keras.utils.visualize_util import plot 126 | from keras.callbacks import EarlyStopping,ReduceLROnPlateau,TensorBoard,CSVLogger,ModelCheckpoint 127 | 128 | plot(basic_model,to_file=logBasePath+"/CNN1d_neighbour_model.png",show_shapes=True) 129 | 130 | 131 | csvLogger = CSVLogger(filename = logBasePath+"/CNN1d_neighbour_model_csv.log") 132 | reduce_lr = ReduceLROnPlateau(patience = 40, factor = 0.1, verbose = 1) 133 | tensor_board = TensorBoard(log_dir = logBasePath, histogram_freq=0, write_graph=True, write_images=True) 134 | ealystop = EarlyStopping(monitor='val_loss', patience = 300) 135 | checkPoint = ModelCheckpoint(filepath=logBasePath+"/CNN1d_neighbour_model_check", monitor = "val_acc", mode = "max", save_best_only=True) 136 | 137 | 138 | #%% fit model 139 | basic_model.fit(X_train,Y_train,nb_epoch=nb_epoch,batch_size=30,verbose=1, 140 | validation_data=[X_test,Y_test],callbacks=[ealystop, tensor_board, csvLogger, reduce_lr, checkPoint]) 141 | 142 | #%% fit model 143 | #from keras.wrappers.scikit_learn import KerasClassifier 144 | #keras_model = KerasClassifier(get_CNN1d_model, input_shape = input_shape, classify_output_num = nb_classes)#keras model 145 | # 146 | #from sklearn.grid_search import GridSearchCV 147 | # 148 | #param_grid = dict( 149 | # my_optimizer = ['Adadelta','RMSprop'], 150 | # batch_size = [10,20,30,50], 151 | # nb_epoch = [nb_epoch], 152 | # validation_data=[[X_test,Y_test]], 153 | # callbacks=[[ealystop, reduce_lr]], 154 | # verbose=[1] 155 | # ) 156 | # 157 | #grid_model = GridSearchCV(estimator = keras_model, param_grid = param_grid, n_jobs=1)#scoring=make_scorer(mean_squared_error) 158 | #grid_model.fit(X_train, Y_train) 159 | 160 | 161 | #%% 评估结果 162 | #print("Best: %f using %s" % (grid_model.best_score_, grid_model.best_params_)) 163 | ##Best: 0.892892 using {'batch_size': 20, 'my_optimizer': 'Adadelta', } 164 | #all_result = grid_model.grid_scores_ 165 | # 166 | #best_model = grid_model.best_estimator_.model 167 | #metric_names = best_model.metrics_names 168 | #metric_values = best_model.evaluate(X_test, Y_test) 169 | #print('Test score: %f , Test accuracy: %f' % (metric_values[0], metric_values[1])) 170 | ##Test score: 0.232483 , Test accuracy: 0.932617 171 | 172 | 173 | 174 | 175 | -------------------------------------------------------------------------------- /HSI Classification/CNN2d_pca_model.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Sun Feb 26 16:04:20 2017 4 | @author: Administrator 5 | """ 6 | #logBasePath = "D:/data/mylog/KerasDL/" 7 | #rootPath = r'D:/data/HSI' 8 | 9 | rootPath = "G:/data/HSI" 10 | logBasePath = "G:/data/mylog/KerasDL/CNN2d_pca_model" 11 | 12 | block_size = 11 13 | test_size = 0.9 14 | #validate_size = 0.8 15 | nb_epoch = 100 16 | nb_classes = 16 17 | batch_size = 30 18 | 19 | #是否使用pca 20 | use_pca = True 21 | n_components = 15 22 | if use_pca ==True: 23 | input_shape = (block_size,block_size,n_components) 24 | else: 25 | input_shape = (block_size,block_size,200) 26 | 27 | #%% 28 | from HSIDatasetLoad import * 29 | from keras.utils import np_utils 30 | 31 | HSI = HSIData(rootPath) 32 | X_data = HSI.X_data 33 | Y_data = HSI.Y_data 34 | data_source = HSI.data_source 35 | idx_data = HSI.idx_data 36 | 37 | l2_lr = 0.1 38 | #是否使用PCA降维 39 | if use_pca==True: 40 | data_source = HSI.PCA_data_Source(data_source,n_components=n_components) 41 | 42 | X_data_nei = HSI.getNeighborData(data_source,idx_data,block_size) 43 | 44 | Y_data = np_utils.categorical_probas_to_classes(Y_data) 45 | X_train_nei,X_test_nei,Y_train,Y_test,idx_train,idx_test = HSI.datasetSplit(X_data_nei,Y_data,idx_data,16,test_size = test_size) 46 | 47 | 48 | #%% 49 | from keras.layers import MaxPooling2D,Input,Dense,Dropout,Flatten,Convolution2D 50 | from keras.layers.normalization import BatchNormalization 51 | from keras.models import Model 52 | from keras.callbacks import ReduceLROnPlateau, CSVLogger, EarlyStopping 53 | from keras.utils.visualize_util import plot 54 | from keras.regularizers import l2 55 | 56 | border_name = "same" 57 | def CNN2d_model(input_shape, nb_classes): 58 | input_layer = Input(input_shape) 59 | 60 | conv0 = Convolution2D(64,1,1, border_mode=border_name)(input_layer) 61 | 62 | conv1 = Convolution2D(64,3,3, 63 | border_mode=border_name)(conv0) 64 | bn1 = BatchNormalization()(conv1) 65 | 66 | conv2 = Convolution2D(64,3,3, 67 | border_mode=border_name)(bn1) 68 | bn2 = BatchNormalization()(conv2) 69 | 70 | conv3 = Convolution2D(256,3,3, 71 | border_mode=border_name)(bn2) 72 | bn3 = BatchNormalization(axis=-1)(conv3) 73 | 74 | conv4 = Convolution2D(256,3,3,border_mode=border_name)(bn3) 75 | bn4 = BatchNormalization(axis=-1)(conv4) 76 | pool4 = MaxPooling2D(pool_size=(2,2))(bn4) 77 | 78 | 79 | flat1 = Flatten()(pool4) 80 | dense1 = Dense(512,activation="relu",W_regularizer=l2(l2_lr))(flat1) 81 | bn5 = BatchNormalization()(dense1) 82 | drop5 = Dropout(0.3)(bn5) 83 | 84 | dense2 = Dense(512,activation="relu",W_regularizer=l2(l2_lr))(drop5) 85 | bn6 = BatchNormalization()(dense2) 86 | drop6 = Dropout(0.3)(bn6) 87 | 88 | dense3 = Dense(nb_classes,activation="softmax")(drop6) 89 | 90 | model = Model(input = input_layer,output = dense3) 91 | model.compile(loss='categorical_crossentropy', 92 | optimizer="adadelta", 93 | metrics=['accuracy']) 94 | return model 95 | 96 | CNN2d_model = CNN2d_model(input_shape,nb_classes) 97 | 98 | 99 | #%% fit model 100 | """ 101 | categorical_crossentropy,adadelta,pca15,block11,test0.9,l2_lr = 0.1,batch_size = 30 102 | Epoch 00413: reducing learning rate to 9.999999310821295e-05. 103 | 1036/1036 [==============================] - 3s - loss: 0.0028 - acc: 1.0000 - val_loss: 0.2989 - val_acc: 0.9199 104 | categorical_crossentropy,adadelta,pca15,block11,test0.9,l2_lr = 0.1,batch_size = 30, conv1,1在最前面 105 | Epoch 00539: reducing learning rate to 9.999999310821295e-05. 106 | 1036/1036 [==============================] - 3s - loss: 0.0024 - acc: 1.0000 - val_loss: 0.2316 - val_acc: 0.9342 107 | """ 108 | plot(CNN2d_model,to_file=logBasePath+"/CNN2d_pca_model.png",show_shapes=True) 109 | reduce_lr = ReduceLROnPlateau(patience = 50, verbose =1) 110 | ealystop = EarlyStopping(monitor='val_loss',patience = 100) 111 | #myLogger = MyProgbarLogger(to_file=logBasePath+"/CNN2d_pca_model.log") 112 | csvLog = CSVLogger(logBasePath+"/CNN2d_pca_model.log") 113 | CNN2d_model.fit(X_train_nei,Y_train,nb_epoch=nb_epoch,batch_size=batch_size,verbose=1, 114 | validation_data=[X_test_nei,Y_test],callbacks=[csvLog,reduce_lr,ealystop]) 115 | 116 | 117 | 118 | -------------------------------------------------------------------------------- /HSI Classification/CNN3d_pca_model.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Sun Feb 26 16:04:20 2017 4 | @author: Administrator 5 | """ 6 | #logBasePath = "D:/data/mylog/KerasDL/" 7 | #rootPath = r'D:/data/HSI' 8 | 9 | import tensorflow as tf 10 | from keras.backend.tensorflow_backend import set_session 11 | config = tf.ConfigProto() 12 | #指定了每个GPU进程中使用显存的上限,但它只能均匀作用于所有GPU,无法对不同GPU设置不同的上限 13 | #以上的显存限制仅仅为了在跑小数据集时避免对显存的浪费而已 14 | #但在实际运行中如果达到了这个阈值,程序有需要的话还是会突破这个阈值。 15 | config.gpu_options.per_process_gpu_memory_fraction = 0.3 16 | set_session(tf.Session(config=config)) 17 | 18 | rootPath = "G:/data/HSI" 19 | logBasePath = "G:/data/mylog/KerasDL/" 20 | 21 | block_size = 7 22 | test_size = 0.9 23 | validate_size = 0.8 24 | nb_epoch = 500 25 | nb_classes = 16 26 | batch_size = 32 27 | 28 | #是否使用pca 29 | use_pca = False 30 | if use_pca ==True: 31 | n_components = 30 32 | else: 33 | n_components = 200 34 | input_shape = (block_size,block_size,n_components,1) 35 | 36 | #%% 37 | from HSIDatasetLoad import * 38 | from keras.utils import np_utils 39 | 40 | HSI = HSIData(rootPath) 41 | X_data = HSI.X_data 42 | Y_data = HSI.Y_data 43 | data_source = HSI.data_source 44 | idx_data = HSI.idx_data 45 | 46 | l2_lr = 0.1 47 | 48 | #是否使用PCA降维 49 | if use_pca==True: 50 | data_source = HSI.PCA_data_Source(data_source,n_components=n_components) 51 | 52 | X_data_nei = HSI.getNeighborData(data_source,idx_data,block_size) 53 | #reshape(none,7,7,30,1) 54 | X_data_nei = np.array([x.reshape(block_size,block_size,n_components,1) for x in X_data_nei]) 55 | 56 | Y_data = np_utils.categorical_probas_to_classes(Y_data) 57 | X_train_nei,X_test_nei,Y_train,Y_test,idx_train,idx_test = HSI.datasetSplit(X_data_nei,Y_data,idx_data,16,test_size = test_size) 58 | 59 | 60 | #%% 61 | from keras.layers import MaxPooling3D,Input,Dense,Dropout,Flatten,Convolution3D 62 | from keras.layers.normalization import BatchNormalization 63 | from keras.models import Model 64 | from mykeras.callbacks import MyProgbarLogger 65 | from keras.callbacks import ReduceLROnPlateau 66 | from keras.utils.visualize_util import plot 67 | from keras.optimizers import adadelta 68 | from keras.regularizers import l2 69 | from random import randint 70 | 71 | border_name = "same" 72 | def CNN3d_model(input_shape, nb_classes): 73 | input_layer = Input(input_shape) 74 | 75 | conv1 = Convolution3D(16,3,3,1,activation="relu", 76 | border_mode=border_name,W_regularizer=l2(l2_lr))(input_layer) 77 | bn1 = BatchNormalization(axis=-1)(conv1) 78 | 79 | conv2 = Convolution3D(32,3,3,1,W_regularizer=l2(l2_lr), 80 | activation="relu",border_mode=border_name)(bn1) 81 | bn2 = BatchNormalization(axis=-1)(conv2) 82 | # pool2 = MaxPooling2D(pool_size=(2,2))(bn2) 83 | drop2 = Dropout(0.3)(bn2) 84 | 85 | conv3 = Convolution3D(32,3,3,1,W_regularizer=l2(l2_lr), 86 | activation="relu",border_mode=border_name)(drop2) 87 | bn3 = BatchNormalization(axis=-1)(conv3) 88 | pool3 = MaxPooling3D(pool_size=(3,3,1))(bn3) 89 | drop3 = Dropout(0.3)(pool3) 90 | 91 | conv4 = Convolution3D(64,3,3,1,W_regularizer=l2(l2_lr), 92 | activation="relu",border_mode=border_name)(drop3) 93 | bn4 = BatchNormalization(axis=-1)(conv4) 94 | pool4 = MaxPooling3D(pool_size=(2,2,1))(bn4) 95 | drop4 = Dropout(0.3)(pool4) 96 | 97 | conv5 = Convolution3D(8,1,1,1, 98 | activation="relu",border_mode=border_name)(drop4) 99 | bn5 = BatchNormalization(axis=-1)(conv5) 100 | 101 | flat1 = Flatten()(conv5) 102 | dense1 = Dense(256,activation="relu",W_regularizer=l2(l2_lr))(flat1) 103 | bn5 = BatchNormalization()(dense1) 104 | drop5 = Dropout(0.3)(bn5) 105 | 106 | dense2 = Dense(256,activation="relu",W_regularizer=l2(l2_lr))(drop5) 107 | bn6 = BatchNormalization()(dense2) 108 | drop6 = Dropout(0.3)(bn6) 109 | 110 | dense3 = Dense(nb_classes,activation="softmax")(drop6) 111 | 112 | model = Model(input = input_layer,output = dense3) 113 | model.compile(loss='categorical_crossentropy', 114 | optimizer="adam", 115 | metrics=['accuracy']) 116 | return model 117 | 118 | 119 | def generate_batch_data_random(x, y, batch_size): 120 | """逐步提取batch数据到显存,降低对显存的占用""" 121 | ylen = len(y) 122 | loopcount = ylen//batch_size#向下取整 123 | while (True): 124 | i = randint(0,loopcount) 125 | yield x[i * batch_size:(i + 1) * batch_size], y[i * batch_size:(i + 1) * batch_size] 126 | 127 | CNN3d_model = CNN3d_model(input_shape,nb_classes) 128 | 129 | 130 | #%% fit model 131 | plot(CNN3d_model,to_file=logBasePath+"CNN3d_pca_preserve_band_model_V2.png",show_shapes=True) 132 | reduce_lr = ReduceLROnPlateau(patience=40) 133 | myLogger = MyProgbarLogger(to_file=logBasePath+"CNN3d_pca_preserv_band_model_V2.log") 134 | #CNN2d_model.fit(X_train_nei,Y_train,nb_epoch=nb_epoch,batch_size=batch_size,verbose=1, 135 | # validation_data=[X_test_nei,Y_test],callbacks=[myLogger,reduce_lr]) 136 | train_gene = generate_batch_data_random(X_train_nei,Y_train,batch_size) 137 | test_gene = generate_batch_data_random(X_test_nei,Y_test,batch_size) 138 | CNN3d_model.fit_generator(train_gene,nb_epoch=nb_epoch,verbose=1, validation_data=test_gene, 139 | nb_val_samples=(len(Y_test)//batch_size*batch_size), 140 | samples_per_epoch=len(Y_train)//batch_size*batch_size, callbacks=[myLogger,reduce_lr]) 141 | 142 | 143 | 144 | -------------------------------------------------------------------------------- /HSI Classification/HSI_LSTM.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Mon Jun 19 11:26:53 2017 4 | 5 | @author: Shenjunling 6 | """ 7 | #logBasePath = "D:/data/mylog/KerasDL/" 8 | #rootPath = r'D:/data/HSI' 9 | 10 | rootPath = "G:/data/HSI" 11 | logBasePath = "G:/data/mylog/KerasDL/HSI_LSTM_model" 12 | 13 | test_size = 0.9 14 | nb_epoch = 2000 15 | nb_classes = 16 16 | batch_size = 200 17 | 18 | block_size = 1 19 | input_shape = (200,block_size*block_size) 20 | 21 | #%% 22 | from HSIDatasetLoad import * 23 | from keras.utils import np_utils 24 | import numpy as np 25 | #数据规范化 26 | def data_standard(X_data): 27 | sample,block_size,block_size,band = X_data.shape 28 | new_X_data = np.zeros((sample,band,block_size*block_size)) 29 | for i in range(sample): 30 | for row in range(block_size): 31 | for col in range(block_size): 32 | new_X_data[i,:,row*block_size+col] = X_data[i,row,col,:] 33 | return new_X_data 34 | 35 | HSI = HSIData(rootPath) 36 | X_data = HSI.X_data 37 | Y_data = HSI.Y_data 38 | data_source = HSI.data_source 39 | idx_data = HSI.idx_data 40 | 41 | X_data = HSI.getNeighborData(data_source=data_source,idx_data=idx_data,block_size=block_size) 42 | X_data = data_standard(X_data) 43 | 44 | Y_data = np_utils.categorical_probas_to_classes(Y_data) 45 | X_train,X_test,Y_train,Y_test,idx_train,idx_test = HSI.datasetSplit(X_data,Y_data,idx_data,16,test_size = test_size) 46 | 47 | 48 | #%% 49 | from keras.layers import Input,merge,Dense,Dropout,Flatten,Convolution1D,MaxPooling1D 50 | from keras.layers.normalization import BatchNormalization 51 | from keras.models import Model 52 | from keras.regularizers import l2 53 | import tensorflow as tf 54 | from keras.layers import LSTM 55 | 56 | 57 | 58 | def get_model(input_shape, classify_output_num, my_optimizer): 59 | input_layer = Input(input_shape) 60 | lstm = LSTM(100,activation="relu")(input_layer) 61 | 62 | 63 | x = Dense(256, activation='relu', W_regularizer=l2(0.1))(lstm) 64 | x = BatchNormalization()(x) 65 | x = Dropout(0.3)(x) 66 | x = Dense(256, activation='relu', W_regularizer=l2(0.1))(x) 67 | x = BatchNormalization()(x) 68 | x = Dropout(0.3)(x) 69 | output = Dense(classify_output_num, activation='softmax')(x) 70 | 71 | model = Model(input = input_layer,output=output) 72 | model.compile(optimizer = my_optimizer,metrics=['accuracy'],loss='categorical_crossentropy') 73 | return model 74 | 75 | basic_model = get_model(input_shape, nb_classes, 'adadelta') 76 | 77 | #%% callbacks 78 | from keras.utils.visualize_util import plot 79 | from keras.callbacks import EarlyStopping,ReduceLROnPlateau,TensorBoard,CSVLogger,ModelCheckpoint 80 | 81 | plot(basic_model,to_file=logBasePath+"/HSI_LSTM_model.png",show_shapes=True) 82 | 83 | 84 | csvLogger = CSVLogger(filename = logBasePath+"/HSI_LSTM_model.log") 85 | reduce_lr = ReduceLROnPlateau(patience = 40, factor = 0.1, verbose = 1) 86 | tensor_board = TensorBoard(log_dir = logBasePath, histogram_freq=0, write_graph=True, write_images=True) 87 | ealystop = EarlyStopping(monitor='val_loss', patience = 300) 88 | checkPoint = ModelCheckpoint(filepath=logBasePath+"/model_check", monitor = "val_acc", mode = "max", save_best_only=True) 89 | 90 | 91 | #%% fit model 92 | basic_model.fit(X_train,Y_train,nb_epoch=nb_epoch,batch_size=30,verbose=1, 93 | validation_data=[X_test,Y_test],callbacks=[ealystop, tensor_board, csvLogger, reduce_lr, checkPoint]) 94 | 95 | #%% fit model 96 | #from keras.wrappers.scikit_learn import KerasClassifier 97 | #keras_model = KerasClassifier(get_CNN1d_model, input_shape = input_shape, classify_output_num = nb_classes)#keras model 98 | # 99 | #from sklearn.grid_search import GridSearchCV 100 | # 101 | #param_grid = dict( 102 | # my_optimizer = ['Adadelta','RMSprop'], 103 | # batch_size = [10,20,30,50], 104 | # nb_epoch = [nb_epoch], 105 | # validation_data=[[X_test,Y_test]], 106 | # callbacks=[[ealystop, reduce_lr]], 107 | # verbose=[1] 108 | # ) 109 | # 110 | #grid_model = GridSearchCV(estimator = keras_model, param_grid = param_grid, n_jobs=1)#scoring=make_scorer(mean_squared_error) 111 | #grid_model.fit(X_train, Y_train) 112 | 113 | 114 | 115 | 116 | -------------------------------------------------------------------------------- /HSI Classification/HSI_ResNet.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Sun Feb 26 16:04:20 2017 4 | @author: Administrator 5 | """ 6 | #logBasePath = "D:/data/mylog/KerasDL/" 7 | #rootPath = r'D:/data/HSI' 8 | import time 9 | start = time.clock() 10 | 11 | rootPath = "G:/data/HSI" 12 | logBasePath = "G:/data/mylog/KerasDL/HSI_resNet" 13 | 14 | block_size = 13 15 | test_size = 0.9 16 | #validate_size = 0.8 17 | nb_epoch = 600 18 | nb_classes = 16 19 | batch_size = 25 20 | 21 | def parse_args(): 22 | import argparse 23 | parser = argparse.ArgumentParser() 24 | parser.add_argument("-block_size",help="HSI block window") 25 | parser.add_argument("-batch_size",help="trainnig batch") 26 | parser.add_argument("-test_size") 27 | parser.set_defaults(test_size=0.9) 28 | args = parser.parse_args() 29 | return args 30 | args = parse_args() 31 | print(args) 32 | block_size = int(args.block_size) 33 | batch_size = int(args.batch_size) 34 | test_size = float(args.test_size) 35 | 36 | #是否使用pca 37 | use_pca = True 38 | n_components = 30 39 | if use_pca ==True: 40 | input_shape = (block_size,block_size,n_components) 41 | else: 42 | input_shape = (block_size,block_size,200) 43 | 44 | 45 | #%% 46 | from HSIDatasetLoad import * 47 | from keras.utils import np_utils 48 | 49 | HSI = HSIData(rootPath) 50 | X_data = HSI.X_data 51 | Y_data = HSI.Y_data 52 | data_source = HSI.data_source 53 | idx_data = HSI.idx_data 54 | 55 | l2_lr = 0.1 56 | #是否使用PCA降维 57 | if use_pca==True: 58 | data_source = HSI.PCA_data_Source(data_source,n_components=n_components) 59 | 60 | X_data_nei = HSI.getNeighborData(data_source,idx_data,block_size) 61 | 62 | Y_data = np_utils.categorical_probas_to_classes(Y_data) 63 | X_train_nei,X_test_nei,Y_train,Y_test,idx_train,idx_test = HSI.datasetSplit(X_data_nei,Y_data,idx_data,16,test_size = test_size) 64 | 65 | 66 | #%% 67 | from keras.layers import MaxPooling2D,Input,Dense,Dropout,Flatten,Convolution2D,Activation,merge 68 | from keras.layers.normalization import BatchNormalization 69 | from keras.models import Model 70 | from keras.callbacks import ReduceLROnPlateau, CSVLogger, EarlyStopping 71 | from keras.utils.visualize_util import plot 72 | from keras.optimizers import adadelta 73 | from keras.regularizers import l2 74 | from keras.initializations import glorot_normal 75 | 76 | def identity_block(x,nb_filter,kernel_size=3): 77 | k1,k2,k3 = nb_filter 78 | out = Convolution2D(k1,1,1)(x) 79 | out = BatchNormalization(axis= -1)(out) 80 | out = Activation('relu')(out) 81 | 82 | out = Convolution2D(k2,kernel_size,kernel_size,border_mode='same')(out) 83 | out = BatchNormalization(axis=-1)(out) 84 | out = Activation('relu')(out) 85 | 86 | out = Convolution2D(k3,1,1)(out) 87 | out = BatchNormalization(axis=-1)(out) 88 | 89 | 90 | out = merge([out,x],mode='sum') 91 | out = Activation('relu')(out) 92 | return out 93 | 94 | def conv_block(x,nb_filter,kernel_size=3): 95 | k1,k2,k3 = nb_filter 96 | 97 | out = Convolution2D(k1,1,1)(x) 98 | out = BatchNormalization(axis= -1)(out) 99 | out = Activation('relu')(out) 100 | 101 | out = Convolution2D(k2,kernel_size,kernel_size,border_mode='same')(x) 102 | out = BatchNormalization(axis= -1)(out) 103 | out = Activation('relu')(out) 104 | 105 | out = Convolution2D(k3,1,1)(out) 106 | out = BatchNormalization(axis= -1)(out) 107 | out = MaxPooling2D()(out) 108 | 109 | x = Convolution2D(k3,1,1)(x) 110 | x = BatchNormalization(axis= -1)(x) 111 | x = MaxPooling2D()(x) 112 | 113 | out = merge([out,x],mode='sum') 114 | out = Activation('relu')(out) 115 | return out 116 | 117 | border_name = "same" 118 | def get_CNN2d_model(input_shape, classify_output_num): 119 | input_tensor = Input(input_shape) 120 | 121 | res1 = conv_block(input_tensor,[64,64,256],3) 122 | # res2 = identity_block(res1,[64,64,256],3) 123 | # res3 = identity_block(res2,[128,128,256],3) 124 | # res4 = identity_block(res3,[128,128,256],3) 125 | 126 | flat1 = Flatten()(res1) 127 | dense1 = Dense(1024,activation="relu",W_regularizer=l2(l2_lr))(flat1) 128 | bn5 = BatchNormalization()(dense1) 129 | drop5 = Dropout(0.3)(bn5) 130 | 131 | dense2 = Dense(1024,activation="relu",W_regularizer=l2(l2_lr))(drop5) 132 | bn6 = BatchNormalization()(dense2) 133 | drop6 = Dropout(0.3)(bn6) 134 | 135 | dense3 = Dense(nb_classes,activation="softmax")(drop6) 136 | 137 | model = Model(input = input_tensor,output = dense3) 138 | model.compile(loss='categorical_crossentropy',#categorical_crossentropy 139 | optimizer="RMSprop", 140 | metrics=['accuracy']) 141 | return model 142 | 143 | 144 | 145 | 146 | #%% fit model 147 | """ 148 | PCA,3*3领域,批规范化,l2范数 149 | categorical_crossentropy,adadelta,pca,block9,test0.9,l2_lr = 0.1,batch_size = 32,res3 150 | Epoch 1000/1000 151 | 1036/1036 [==============================] - 7s - loss: 0.0021 - acc: 1.0000 - val_loss: 0.3779 - val_acc: 0.9109 152 | categorical_crossentropy,adadelta,pca,block11,test0.9,l2_lr = 0.1,batch_size = 32,res4 153 | Epoch 1000/1000 154 | 1036/1036 [==============================] - 10s - loss: 0.0046 - acc: 0.9990 - val_loss: 0.2558 - val_acc: 0.9347 155 | categorical_crossentropy,adadelta,pca,block7,test0.9,l2_lr = 0.1,batch_size = 32,res4 156 | Epoch 400/1000 157 | 1036/1036 [==============================] - 6s - loss: 0.0067 - acc: 1.0000 - val_loss: 0.6330 - val_acc: 0.8607 158 | categorical_crossentropy,adadelta,pca,block5,test0.9,l2_lr = 0.1,batch_size = 32,res4 159 | Epoch 1000/1000 160 | 1036/1036 [==============================] - 5s - loss: 0.0055 - acc: 1.0000 - val_loss: 0.8763 - val_acc: 0.8356 161 | categorical_crossentropy,adadelta,nopca,block5,test0.9,l2_lr = 0.1,batch_size = 32,res4 162 | Epoch 276/1000 163 | 1036/1036 [==============================] - 11s - loss: 0.0051 - acc: 1.0000 - val_loss: 0.3141 - val_acc: 0.9149 164 | categorical_crossentropy,adadelta,nopca,block13,test0.9,l2_lr = 0.1,batch_size = 32,res4 165 | och 1000/1000 166 | 1036/1036 [==============================] - 11s - loss: 0.0026 - acc: 1.0000 - val_loss: 0.2811 - val_acc: 0.9285 167 | 168 | RMSprop: 169 | ############# batchsize ################ 170 | categorical_crossentropy,RMSprop:,pca,block11,test0.9,l2_lr = 0.1,batch_size = 25,res4 171 | 0.9885 172 | categorical_crossentropy,RMSprop,pca30,block11,test0.9,l2_lr = 0.1,batch_size = 25,res4 173 | 0.9901 174 | categorical_crossentropy,RMSprop,pca30,block11,test0.9,l2_lr = 0.1,batch_size = 25,res4 175 | 0.9846 176 | categorical_crossentropy,RMSprop,pca30,block11,test0.9,l2_lr = 0.1,batch_size = 35,res4 177 | 0.9856 178 | categorical_crossentropy,RMSprop,pca30,block11,test0.9,l2_lr = 0.1,batch_size = 40,res4 179 | 0.9851 180 | categorical_crossentropy,RMSprop,pca30,block11,test0.9,l2_lr = 0.1,batch_size = 45,res4 181 | 0.9847 182 | 183 | categorical_crossentropy,RMSprop,pca30,block11,test0.9,l2_lr = 0.1,batch_size = 25,res2 184 | 9896 185 | categorical_crossentropy,RMSprop,pca30,block11,test0.9,l2_lr = 0.1,batch_size = 25,res1 186 | 9851 187 | categorical_crossentropy,RMSprop,pca15,block11,test0.9,l2_lr = 0.1,batch_size = 25,res2 188 | 9880,500次迭代9870,后面缓慢上升,有触到9889 189 | categorical_crossentropy,残差特征图128而不是256,RMSprop,pca15,block11,test0.9,l2_lr = 0.1,batch_size = 25,res2 190 | 9864, 191 | categorical_crossentropy,RMSprop,pca15,block21,test0.9,l2_lr = 0.1,batch_size = 25,res1 192 | 9838 193 | categorical_crossentropy,RMSprop,pca15,block11,test0.9,l2_lr = 0.1,batch_size = 25,res1 194 | 9833 195 | categorical_crossentropy,RMSprop,pca15,block13,test0.9,l2_lr = 0.1,batch_size = 25,res1(残差模块pooling)1763.8 196 | 9885,9815,9841,9877,9893 197 | categorical_crossentropy,RMSprop,pca15,block13,test0.9,l2_lr = 0.1,batch_size = 25,res1(残差模块pooling,残差层后跟卷积层)2527s 198 | 9862 199 | categorical_crossentropy,RMSprop,pca15,block13,test0.9,l2_lr = 0.1,batch_size = 25,res1(残差模块pooling,64,64,64)2527s 200 | 9812 201 | categorical_crossentropy,RMSprop,pca15,block13,test0.9,l2_lr = 0.1,batch_size = 25,res1(残差模块pooling,64,64,128)2527s 202 | 9796 203 | categorical_crossentropy,RMSprop,pca15,block11,test0.9,l2_lr = 0.1,batch_size = 25,res1(残差模块pooling)1815s 204 | 9810 205 | categorical_crossentropy,RMSprop,pca15,block11,test0.9,l2_lr = 0.1,batch_size = 25,res1(残差模块pooling,残差层后跟卷积层)2527s 206 | 9889 207 | categorical_crossentropy,RMSprop,pca15,block7,test0.9,l2_lr = 0.1,batch_size = 25,res1(残差模块pooling,)1639s 208 | 9759 209 | categorical_crossentropy,RMSprop,pca15,block9,test0.9,l2_lr = 0.1,batch_size = 25,res1(残差模块pooling,)1639s 210 | 9832 211 | 212 | ############# l2 ################ 213 | categorical_crossentropy,RMSprop,pca30,block11,test0.9,l2_lr = 0.5,batch_size = 40,res4 214 | Epoch 594/600 215 | 1036/1036 [==============================] - 7s - loss: 0.0040 - acc: 1.0000 - val_loss: 0.2569 - val_acc: 0.9401 216 | categorical_crossentropy,RMSprop,pca,block11,test0.9,l2_lr = 1,batch_size = 40,res4 217 | Epoch 584/600 218 | 1036/1036 [==============================] - 7s - loss: 0.0068 - acc: 1.0000 - val_loss: 0.2215 - val_acc: 0.9460 219 | categorical_crossentropy,RMSprop,pca,block11,test0.9,l2_lr = 0.01,batch_size = 40,res4 220 | Epoch 578/600 221 | 1036/1036 [==============================] - 7s - loss: 2.3365e-04 - acc: 1.0000 - val_loss: 0.2480 - val_acc: 0.9490 222 | categorical_crossentropy,RMSprop,pca,block11,test0.9,l2_lr = 0.01,batch_size = 40,res4,filter_size:3,3,5,5 223 | Epoch 484/600 224 | 1036/1036 [==============================] - 7s - loss: 0.0014 - acc: 1.0000 - val_loss: 0.2540 - val_acc: 0.9507 225 | categorical_crossentropy,RMSprop,pca,block11,test0.9,l2_lr = 0.01,batch_size = 40,res4,filter_size:3,3,3,5 226 | Epoch 518/600 227 | 1036/1036 [==============================] - 7s - loss: 0.0015 - acc: 1.0000 - val_loss: 0.2327 - val_acc: 0.9484 228 | """ 229 | #plot(CNN2d_model,to_file=logBasePath+"/CNN2d_pca_model.png",show_shapes=True) 230 | 231 | #myLogger = MyProgbarLogger(to_file=logBasePath+"/CNN2d_pca_model.log") 232 | reduce_lr = ReduceLROnPlateau(patience = 50, verbose =1) 233 | ealystop = EarlyStopping(monitor='val_loss',patience =100) 234 | CNN2d_model = get_CNN2d_model(input_shape,nb_classes) 235 | csvLog = CSVLogger(logBasePath+"/"+str(batch_size)+".log") 236 | CNN2d_model.fit(X_train_nei,Y_train,nb_epoch=nb_epoch,batch_size=batch_size,verbose=1, 237 | validation_data=[X_test_nei,Y_test],callbacks=[csvLog,reduce_lr,ealystop]) 238 | 239 | end = time.clock() 240 | print ("read: %f s" % (end - start)) 241 | 242 | #%% fit model 243 | #from keras.wrappers.scikit_learn import KerasClassifier 244 | #keras_model = KerasClassifier(get_CNN2d_model, input_shape = input_shape, classify_output_num = nb_classes)#keras model 245 | # 246 | #from sklearn.grid_search import GridSearchCV 247 | #from sklearn.cross_validation import StratifiedShuffleSplit 248 | # 249 | #param_grid = dict( 250 | # batch_size = [20,25,30,35,40,45], 251 | # nb_epoch = [nb_epoch], 252 | ## validation_split=[0.001], 253 | # validation_data=[[X_test_nei,Y_test]], 254 | # callbacks=[[ealystop, reduce_lr]], 255 | # ) 256 | # 257 | #ssp = StratifiedShuffleSplit(Y_data,n_iter=1,test_size=test_size) 258 | #grid_model = GridSearchCV(estimator = keras_model, param_grid = param_grid, n_jobs=1,cv=ssp,refit=False)#scoring=make_scorer(mean_squared_error) 259 | #Y_train = np_utils.to_categorical(Y_train, nb_classes) 260 | #grid_model.fit(X_data_nei, Y_data) 261 | 262 | 263 | #%% 写入详细分类结果 264 | from util import cateAccuracy 265 | def sample_count(Y): 266 | sample_count_train = {} 267 | if len(Y.shape)!=1: 268 | #转换onehot编码 269 | Y=np_utils.categorical_probas_to_classes(Y) 270 | for i in set(Y): 271 | sample_count_train[i] = list(Y).count(i) 272 | return sample_count_train 273 | 274 | total_accu,accu = cateAccuracy(CNN2d_model,X_test_nei,Y_test) 275 | train_count = sample_count(Y_train) 276 | test_count = sample_count(Y_test) 277 | 278 | 279 | f = open("result17_res1_30_99.txt","a") 280 | f.writelines(str(total_accu)+"\n") 281 | for i in range(len(accu)): 282 | line = str(accu[i])+","+str(train_count[i])+","+str(test_count[i])+"\n" 283 | f.writelines(line) 284 | 285 | f.write("=========================================================\n") 286 | f.close() 287 | 288 | #pred = CNN2d_model.predict(X_test_nei,batch_size=1) 289 | #from sklearn.metrics import classification_report,accuracy_score 290 | #accu = accuracy_score(np_utils.categorical_probas_to_classes(pred),np_utils.categorical_probas_to_classes(Y_test)) 291 | #from visualize import plot_result 292 | #plot_result(Y_data,idx_data)#graoud_truth 293 | #plot_train(Y_data,idx_data,idx_train)#train 294 | #plot_result(np_utils.categorical_probas_to_classes(pred),idx_test)#test_result 295 | # 296 | #np.savetxt("Y_data.txt",Y_data) 297 | #np.savetxt("idx_data.txt",idx_data) 298 | #np.savetxt("idx_test.txt",idx_test) 299 | #np.savetxt("idx_train.txt",idx_train) 300 | #np.savetxt("pred.txt",pred) 301 | 302 | 303 | -------------------------------------------------------------------------------- /HSI Classification/HSI_ResNet/DataLoad/HSIDataLoad.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import numpy as np 3 | from keras.utils import np_utils 4 | from sklearn.cross_validation import StratifiedShuffleSplit 5 | 6 | #%% 7 | """ 8 | X_train,Y_train,idx_train,X_test,Y_test,idx_test,data_source=dataLoad3(rootPath) 9 | data_source: 21025*200, 有训练集和测试集的index编号。 10 | 训练集和测试集直接从文件中读取 11 | """ 12 | 13 | def dataLoad3(rootPath): 14 | # rootPath = r'G:\OneDrive\codes\python\RandomForest\data' 15 | # rootPath = r'D:\OneDrive\codes\python\RandomForest\data' 16 | # trainPath = rootPath+r'\train_binal' 17 | # testPath = rootPath+r'\test_binal' 18 | # imgPath = rootPath+r'\img_binal' 19 | trainPath = rootPath+r'\train' 20 | testPath = rootPath+r'\test' 21 | 22 | trainlabPath = rootPath+r'\trainlab' 23 | trainidxPath = rootPath+r'\trainidx' 24 | testlabPath = rootPath+r'\testlab' 25 | testidxPath = rootPath+r'\testidx' 26 | imgPath = rootPath+r'\img' 27 | 28 | X_train = np.loadtxt(open(trainPath,"rb"),delimiter=",",skiprows=0,dtype=np.float) 29 | Y_train = np.loadtxt(open(trainlabPath,"rb"),delimiter=",",skiprows=0,dtype=np.int) 30 | idx_train = np.loadtxt(open(trainidxPath,"rb"),delimiter=",",skiprows=0,dtype=np.int) 31 | X_test = np.loadtxt(open(testPath,"rb"),delimiter=",",skiprows=0,dtype=np.float) 32 | Y_test = np.loadtxt(open(testlabPath,"rb"),delimiter=",",skiprows=0,dtype=np.int) 33 | idx_test = np.loadtxt(open(testidxPath,"rb"),delimiter=",",skiprows=0,dtype=np.int) 34 | img = np.loadtxt(open(imgPath,"rb"),delimiter=',',skiprows=0,dtype=np.float) 35 | data_source = img.transpose() 36 | 37 | return X_train,Y_train,idx_train,X_test,Y_test,idx_test,data_source 38 | 39 | #%% dataset2 40 | """ 41 | X_data,Y_data,data_source,idx_data=datasetLoad2(rootPath)#未划分训练集测试集的数据(不包括背景点) 42 | Y_data=np_utils.categorical_probas_to_classes(Y_data) 43 | X_train,X_test,Y_train,Y_test,idx_train,idx_test=datasetSplit(X_data,Y_data,idx_data,num_calss=16,test_size=tes_size) 44 | data_source: 21025*200, 有训练集和测试集的index编号 45 | 手动分割训练集和测试集 46 | """ 47 | 48 | def datasetSplit(data,lab,idx_of_data,num_calss,test_size): 49 | ssp = StratifiedShuffleSplit(lab,n_iter=1,test_size=test_size) 50 | for trainlab,testlab in ssp: 51 | print("train:\n%s\ntest:\n%s" % (trainlab,testlab)) 52 | X_train=data[trainlab] 53 | X_test=data[testlab] 54 | Y_train=np_utils.to_categorical(lab[trainlab],num_calss) 55 | Y_test=np_utils.to_categorical(lab[testlab],num_calss) 56 | idx_train=idx_of_data[trainlab] 57 | idx_test=idx_of_data[testlab] 58 | return X_train,X_test,Y_train,Y_test,idx_train,idx_test 59 | 60 | 61 | def datasetLoad2(rootPath): 62 | # rootPath = r'D:/data/HSI' 63 | Xpath=rootPath+'/labeled_data.2.28.txt' 64 | Ypath=rootPath+'/data_label.2.28.txt' 65 | imgPath=rootPath+'/data_source.2.28.txt' 66 | idxPath=rootPath+'/labeled_idx.2.28.txt' 67 | 68 | X_data = np.loadtxt(open(Xpath,"rb"),delimiter=",",skiprows=0,dtype=np.float) 69 | Y_data = np.loadtxt(open(Ypath,"rb"),delimiter=",",skiprows=0,dtype=np.int) 70 | Y_data=np_utils.to_categorical(Y_data-1,16) 71 | data_source=np.loadtxt(open(imgPath,"rb"),delimiter=",",skiprows=0,dtype=np.float) 72 | idx_data=np.loadtxt(open(idxPath,"rb"),delimiter=",",skiprows=0,dtype=np.int) 73 | idx_data=idx_data-1 74 | 75 | return X_data,Y_data,data_source,idx_data 76 | 77 | #%% 将数据由1D索引变为2D 78 | 79 | #%% 80 | """ 81 | # X_data,Y_data,data_source,idx_data=datasetLoad1(rootPath,block_size=7) 82 | Y_data=np_utils.categorical_probas_to_classes(Y_data) 83 | X_train,X_test,Y_train,Y_test,idx_train,idx_test=datasetSplit(X_data,Y_data,idx_data,num_calss=16,test_size=0.9) 84 | # data_source: 21025*200, 有训练集和测试集的index编号 85 | # 返回:n*200*3*3 的高光谱数据 和标签 86 | """ 87 | 88 | #1d索引转换为2d 89 | def indexTransform2D(index_1d): 90 | xidx = index_1d/145 91 | yidx = index_1d%145 92 | return int(xidx),int(yidx) 93 | 94 | #取数据的领域范围,若超出边界,以边界点来代替 95 | def neighbourhood(idx,block_size): 96 | xidx,yidx = indexTransform2D(idx) 97 | x_neighbourhood = [] #x的领域范围 98 | y_neighbourhood = [] 99 | x_border_left = xidx-int(block_size/2) 100 | x_border_right = xidx+int(block_size/2) 101 | y_border_left = yidx-int(block_size/2) 102 | y_border_right = yidx+int(block_size/2) 103 | if x_border_left<0: 104 | x_neighbourhood.extend(abs(x_border_left)*[0])#超出边界,以边界点来代替 105 | x_neighbourhood.extend(range(0,xidx)) 106 | else: 107 | x_neighbourhood.extend(range(x_border_left,xidx)) 108 | 109 | if x_border_right>144: 110 | x_neighbourhood.extend(range(xidx,145)) 111 | x_neighbourhood.extend(abs(x_border_right-144)*[144]) 112 | else: 113 | x_neighbourhood.extend(range(xidx,x_border_right+1)) 114 | 115 | if y_border_left<0: 116 | y_neighbourhood.extend(abs(y_border_left)*[0])#超出边界,以边界点来代替 117 | y_neighbourhood.extend(range(0,yidx)) 118 | else: 119 | y_neighbourhood.extend(range(y_border_left,yidx)) 120 | 121 | if y_border_right>144: 122 | y_neighbourhood.extend(range(yidx,145)) 123 | y_neighbourhood.extend(abs(y_border_right-144)*[144]) 124 | else: 125 | y_neighbourhood.extend(range(yidx,y_border_right+1)) 126 | return x_neighbourhood,y_neighbourhood 127 | 128 | 129 | """ 130 | idx_data:有标签的数据的1D索引 131 | data_source : 3D数据块 132 | block_size: 选取像素点周围邻域,组成一个更大的数据块,block_size取奇数 133 | """ 134 | def blockTansform(idx_data,data_source,block_size): 135 | samples = len(idx_data) 136 | X_data = np.zeros((samples,block_size,block_size,200)) 137 | for ii,idx in enumerate(idx_data): 138 | x_range,y_range = neighbourhood(idx,block_size)#求邻域数据的行列范围 139 | for iidx,i in enumerate(x_range): 140 | for jidx,j in enumerate(y_range): 141 | X_data[ii,iidx,jidx,:] = data_source[i,j,:] 142 | return X_data 143 | 144 | def datasetLoad1(rootPath,block_size): 145 | # rootPath = r'D:/data/HSI' 146 | Ypath=rootPath+'/data_label.2.28.txt' 147 | imgPath=rootPath+'/data_source.2.28.txt' 148 | idxPath=rootPath+'/labeled_idx.2.28.txt' 149 | 150 | 151 | Y_data = np.loadtxt(open(Ypath,"rb"),delimiter=",",skiprows=0,dtype=np.int) 152 | Y_data=np_utils.to_categorical(Y_data-1,16) 153 | 154 | data_source=np.loadtxt(open(imgPath,"rb"),delimiter=",",skiprows=0,dtype=np.float) 155 | new_data_source = np.zeros((145,145,200)) 156 | for i in range(200): 157 | new_data_source[:,:,i] = data_source[:,i].reshape(145,145) 158 | 159 | idx_data=np.loadtxt(open(idxPath,"rb"),delimiter=",",skiprows=0,dtype=np.int) 160 | idx_data=idx_data-1 161 | 162 | X_data = blockTansform(idx_data,new_data_source,block_size) 163 | 164 | return X_data,Y_data,new_data_source,idx_data 165 | 166 | #%% PCA提取 167 | def PCA_data_Source(data_source, idx_data, n_components): 168 | from sklearn.decomposition import PCA 169 | pca = PCA(n_components=n_components,svd_solver="full") 170 | new_data_source = pca.fit(data_source).transform(data_source) 171 | X_data = new_data_source[idx_data,:] 172 | 173 | return X_data 174 | 175 | 176 | #%% 图像增大数据量 177 | def shift(X_sample, padding): 178 | a = np.zeros((padding))#两边补0的长度 179 | tmp = np.concatenate((a,X_sample)) 180 | tmp = np.concatenate((tmp,a)) 181 | start_window = np.random.randint(padding*2) 182 | return tmp[start_window:start_window+200] 183 | 184 | def messedup(X_sample, mess_window): 185 | import copy 186 | tmp = copy.copy(X_sample) 187 | dim = X_sample.size 188 | start_window = np.random.randint((dim-mess_window)) 189 | tmp[start_window:start_window+10] = 0 190 | return tmp 191 | 192 | def data_Augmentation(X_data,Y_data,padding,mess_window): 193 | sample,dim = X_data.shape 194 | sample,lab = Y_data.shape 195 | new_X_data = np.zeros((sample*2,dim)) 196 | new_Y_data = np.zeros((sample*2,lab)) 197 | 198 | for i in range(sample): 199 | new_X_data[2*i,:] = shift(X_data[i,:], padding) 200 | new_X_data[2*i+1,:] = messedup(X_data[i,:], mess_window) 201 | new_Y_data[2*i,:] = Y_data[i,:] 202 | new_Y_data[2*i+1,:] = Y_data[i,:] 203 | 204 | return new_X_data,new_Y_data 205 | 206 | 207 | 208 | -------------------------------------------------------------------------------- /HSI Classification/HSI_ResNet/DataLoad/HSIDatasetLoad.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Thu Mar 2 21:29:07 2017 4 | 5 | @author: Administrator 6 | """ 7 | import numpy as np 8 | from keras.utils import np_utils 9 | from sklearn.cross_validation import StratifiedShuffleSplit 10 | from HSI_util import load_mat 11 | 12 | 13 | """ 14 | HSI = HSIData(rootPath,"Indian") 15 | X_data = HSI.X_data #(10336,200) 16 | Y_data = HSI.Y_data #(10336,16) 17 | data_source = HSI.data_source #(21025,200) 18 | idx_data = HSI.idx_data #(10336,) 19 | """ 20 | class HSIData: 21 | # def __init__(self,rootPath): 22 | # Xpath=rootPath+'/labeled_data.2.28.txt' 23 | # Ypath=rootPath+'/data_label.2.28.txt' 24 | # imgPath=rootPath+'/data_source.2.28.txt' 25 | # idxPath=rootPath+'/labeled_idx.2.28.txt' 26 | # 27 | # self.X_data = np.loadtxt(open(Xpath,"rb"),delimiter=",",skiprows=0,dtype=np.float) 28 | # Y_data = np.loadtxt(open(Ypath,"rb"),delimiter=",",skiprows=0,dtype=np.int) 29 | # self.Y_data=np_utils.to_categorical(Y_data-1,16) 30 | # self.data_source=np.loadtxt(open(imgPath,"rb"),delimiter=",",skiprows=0,dtype=np.float) 31 | # idx_data=np.loadtxt(open(idxPath,"rb"),delimiter=",",skiprows=0,dtype=np.int) 32 | # self.idx_data=idx_data-1 33 | # self.rows =145 34 | # self.cols =145 35 | # self.ctgs=16 36 | """ 37 | 两个数据集加载 38 | """ 39 | def __init__(self, rootPath, flag="Indian"): 40 | img,labeled_data,data_label,labeled_idx,rows,cols,bands,ctgs = load_mat(rootPath, flag) 41 | # img,labeled_data,data_label,labeled_idx,rows,cols,bands,ctgs = load_mat(rootPath, flag="Pavia") 42 | self.X_data=labeled_data 43 | self.Y_data=np_utils.to_categorical(data_label-1,ctgs) 44 | self.idx_data=labeled_idx 45 | self.data_source=img 46 | self.rows = rows 47 | self.cols=cols 48 | self.bands=bands 49 | self.ctgs=ctgs 50 | 51 | 52 | """ 53 | X_data,Y_data,data_source,idx_data=datasetLoad2(rootPath)#未划分训练集测试集的数据(不包括背景点) 54 | Y_data=np_utils.categorical_probas_to_classes(Y_data) 55 | X_train,X_test,Y_train,Y_test,idx_train,idx_test=datasetSplit(X_data,Y_data,idx_data,num_calss=16,test_size=tes_size) 56 | data_source: 21025*200, 有训练集和测试集的index编号 57 | 手动分割训练集和测试集 58 | """ 59 | def datasetSplit(self,data,lab,idx_of_data,num_calss,test_size): 60 | ssp = StratifiedShuffleSplit(lab,n_iter=1,test_size=test_size) 61 | for trainlab,testlab in ssp: 62 | print("train:\n%s\ntest:\n%s" % (trainlab,testlab)) 63 | X_train=data[trainlab] 64 | X_test=data[testlab] 65 | Y_train=np_utils.to_categorical(lab[trainlab],num_calss) 66 | Y_test=np_utils.to_categorical(lab[testlab],num_calss) 67 | idx_train=idx_of_data[trainlab] 68 | idx_test=idx_of_data[testlab] 69 | return X_train,X_test,Y_train,Y_test,idx_train,idx_test 70 | 71 | #%% PCA提取主成分 72 | def PCA_data_Source(self,data_source, n_components): 73 | from sklearn.decomposition import PCA 74 | pca = PCA(n_components = n_components,svd_solver="full") 75 | new_data_source = pca.fit(data_source).transform(data_source) 76 | 77 | return new_data_source 78 | 79 | 80 | #%% 图像增大数据量 81 | def shift(self,X_sample, padding): 82 | a = np.zeros((padding))#两边补0的长度 83 | tmp = np.concatenate((a,X_sample)) 84 | tmp = np.concatenate((tmp,a)) 85 | start_window = np.random.randint(padding*2) 86 | return tmp[start_window:start_window+200] 87 | 88 | def messedup(self,X_sample, mess_window): 89 | import copy 90 | tmp = copy.copy(X_sample) 91 | dim = X_sample.size 92 | start_window = np.random.randint((dim-mess_window)) 93 | tmp[start_window:start_window+10] = 0 94 | return tmp 95 | 96 | def data_Augmentation(self,X_data,Y_data,padding,mess_window): 97 | sample,dim = X_data.shape 98 | sample,lab = Y_data.shape 99 | new_X_data = np.zeros((sample*2,dim)) 100 | new_Y_data = np.zeros((sample*2,lab)) 101 | 102 | for i in range(self,sample): 103 | new_X_data[2*i,:] = self.shift(X_data[i,:], padding) 104 | new_X_data[2*i+1,:] = self.messedup(X_data[i,:], mess_window) 105 | new_Y_data[2*i,:] = Y_data[i,:] 106 | new_Y_data[2*i+1,:] = Y_data[i,:] 107 | return new_X_data,new_Y_data 108 | #%% 加载邻域数据 109 | """ 110 | X_data = HSI.getNeighborData(data_source=data_source,idx_data=idx_data,block_size=block_size) 111 | Y_data = np_utils.categorical_probas_to_classes(Y_data) 112 | X_train,X_test,Y_train,Y_test,idx_train,idx_test = HSI.datasetSplit(X_data,Y_data,idx_data,16,test_size = test_size) 113 | # data_source: 21025*200, 有训练集和测试集的index编号 114 | # 返回:n*200*k*k 的高光谱数据 和标签 115 | """ 116 | 117 | #1d索引转换为2d 118 | def indexTransform2D(self,index_1d): 119 | xidx = index_1d/self.cols 120 | if(xidx==0 & index_1d>self.cols): 121 | xidx=self.rows 122 | yidx = index_1d%self.cols 123 | return int(xidx),int(yidx) 124 | 125 | #取数据的领域范围,若超出边界,以边界点来代替 126 | def neighbourhood(self,idx,block_size): 127 | xidx,yidx = self.indexTransform2D(idx) 128 | x_neighbourhood = [] #x的领域范围 129 | y_neighbourhood = [] 130 | x_border_left = xidx-int(block_size/2) 131 | x_border_right = xidx+int(block_size/2) 132 | y_border_left = yidx-int(block_size/2) 133 | y_border_right = yidx+int(block_size/2) 134 | if x_border_left<0: 135 | x_neighbourhood.extend(abs(x_border_left)*[0])#超出边界,以边界点来代替 136 | x_neighbourhood.extend(range(0,xidx)) 137 | else: 138 | x_neighbourhood.extend(range(x_border_left,xidx)) 139 | 140 | if x_border_right>self.rows-1: 141 | x_neighbourhood.extend(range(xidx,self.rows)) 142 | x_neighbourhood.extend(abs(x_border_right-self.rows+1)*[self.rows-1]) 143 | else: 144 | x_neighbourhood.extend(range(xidx,x_border_right+1)) 145 | 146 | if y_border_left<0: 147 | y_neighbourhood.extend(abs(y_border_left)*[0])#超出边界,以边界点来代替 148 | y_neighbourhood.extend(range(0,yidx)) 149 | else: 150 | y_neighbourhood.extend(range(y_border_left,yidx)) 151 | 152 | if y_border_right>self.cols-1: 153 | y_neighbourhood.extend(range(yidx,self.cols)) 154 | y_neighbourhood.extend(abs(y_border_right-self.cols+1)*[self.cols-1]) 155 | else: 156 | y_neighbourhood.extend(range(yidx,y_border_right+1)) 157 | return x_neighbourhood,y_neighbourhood 158 | 159 | 160 | """ 161 | idx_data:有标签的数据的1D索引 162 | data_source : 3D数据块 163 | block_size: 选取像素点周围邻域,组成一个更大的数据块,block_size取奇数 164 | """ 165 | def blockTansform(self,idx_data,data_source,block_size): 166 | samples = len(idx_data) 167 | bands = data_source.shape[2] 168 | X_data = np.zeros((samples,block_size,block_size,bands)) 169 | for ii,idx in enumerate(idx_data): 170 | x_range,y_range = self.neighbourhood(idx,block_size)#求邻域数据的行列范围 171 | # print(y_range) 172 | for iidx,i in enumerate(x_range): 173 | for jidx,j in enumerate(y_range): 174 | X_data[ii,iidx,jidx,:] = data_source[i,j,:] 175 | return X_data 176 | 177 | 178 | def getNeighborData(self,data_source,idx_data,block_size): 179 | bands = data_source.shape[1] 180 | new_data_source = np.zeros((self.rows,self.cols,bands)) 181 | for i in range(bands): 182 | new_data_source[:,:,i] = data_source[:,i].reshape(self.rows,self.cols) 183 | 184 | X_data = self.blockTansform(idx_data,new_data_source,block_size) 185 | return X_data -------------------------------------------------------------------------------- /HSI Classification/HSI_ResNet/DataLoad/HSIDatasetLoad.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drivenow/HSI-Classification/e2b98d08849a2b0ab681d1ecb893cfecabb08e9a/HSI Classification/HSI_ResNet/DataLoad/HSIDatasetLoad.pyc -------------------------------------------------------------------------------- /HSI Classification/HSI_ResNet/DataLoad/HSI_util.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Sat Jul 15 17:29:43 2017 4 | 5 | @author: Administrator 6 | """ 7 | import numpy as np 8 | import scipy.io as sio 9 | 10 | rootPath = "G:/data/HSI" 11 | 12 | 13 | def reshape_2dat(dat): 14 | rows,cols,bands = dat.shape 15 | img=np.zeros((bands,rows*cols)) 16 | for i in range(bands): 17 | x=dat[:,:,i] 18 | x=np.reshape(x,(1,rows*cols)) 19 | img[i,:]=x 20 | return img,rows,cols,bands 21 | 22 | def linear(X,a,b): 23 | X=X.flatten() 24 | x=np.sort(X) 25 | L=x.shape[0] 26 | lmin=x[max(int(np.ceil(L*a)),1)] 27 | lmax=x[min(int(np.floor(L*b)),L-1)] 28 | return lmax,lmin 29 | 30 | def line_dat(dat,rdown,rup): 31 | img=dat 32 | lmax,lmin = linear(dat, rdown, rup) 33 | img[datlmax] = lmax#去掉顶端和末端 35 | img = (img-lmin) / lmax#归一化 36 | return img 37 | 38 | """ 39 | #img,labeled_data,data_label,labeled_idx,rows,cols,bands,ctgs = load_mat(rootPath, flag="Indian") 40 | #img,labeled_data,data_label,labeled_idx,rows,cols,bands,ctgs = load_mat(rootPath, flag="Pavia") 41 | """ 42 | def load_mat(rootPath, flag="Indian"): 43 | if flag=="Indian": 44 | data=sio.loadmat(rootPath+"/Indian_pines_corrected.mat") 45 | label=sio.loadmat(rootPath+"/Indian_gt.mat") 46 | dat=data['indian_pines_corrected'] 47 | lab=label['indian_pines_gt'] 48 | img,rows,cols,bands=reshape_2dat(dat) 49 | img = np.transpose(img) 50 | elif flag=="Pavia": 51 | data=sio.loadmat(rootPath+"/PaviaU.mat") 52 | label=sio.loadmat(rootPath+"/PaviaU_gt.mat") 53 | dat=data['paviaU'] 54 | lab=label['paviaU_gt'] 55 | img,rows,cols,bands=reshape_2dat(dat) 56 | img = np.transpose(img) 57 | rdown = 0.001 58 | rup = 0.999 59 | img = line_dat(img, rdown, rup) 60 | img_gt = lab.flatten() 61 | ctgs=np.max(lab) 62 | 63 | labeled_idx = [] 64 | labeled_data = [] 65 | data_label = [] 66 | for idx,i in enumerate(img_gt): 67 | if i!=0: 68 | labeled_idx.append(idx) 69 | labeled_data.append(img[int(idx),:]) 70 | data_label.append(i) 71 | labeled_data = np.array(labeled_data) 72 | data_label = np.array(data_label) 73 | labeled_idx = np.array(labeled_idx) 74 | 75 | return img,labeled_data,data_label,labeled_idx,rows,cols,bands,ctgs 76 | 77 | -------------------------------------------------------------------------------- /HSI Classification/HSI_ResNet/DataLoad/__pycache__/HSIDataLoad.cpython-35-USER-20160223KN-2.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drivenow/HSI-Classification/e2b98d08849a2b0ab681d1ecb893cfecabb08e9a/HSI Classification/HSI_ResNet/DataLoad/__pycache__/HSIDataLoad.cpython-35-USER-20160223KN-2.pyc -------------------------------------------------------------------------------- /HSI Classification/HSI_ResNet/DataLoad/__pycache__/HSIDataLoad.cpython-35-USER-20160223KN.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drivenow/HSI-Classification/e2b98d08849a2b0ab681d1ecb893cfecabb08e9a/HSI Classification/HSI_ResNet/DataLoad/__pycache__/HSIDataLoad.cpython-35-USER-20160223KN.pyc -------------------------------------------------------------------------------- /HSI Classification/HSI_ResNet/DataLoad/__pycache__/HSIDataLoad.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drivenow/HSI-Classification/e2b98d08849a2b0ab681d1ecb893cfecabb08e9a/HSI Classification/HSI_ResNet/DataLoad/__pycache__/HSIDataLoad.cpython-35.pyc -------------------------------------------------------------------------------- /HSI Classification/HSI_ResNet/DataLoad/__pycache__/HSIDatasetLoad.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drivenow/HSI-Classification/e2b98d08849a2b0ab681d1ecb893cfecabb08e9a/HSI Classification/HSI_ResNet/DataLoad/__pycache__/HSIDatasetLoad.cpython-35.pyc -------------------------------------------------------------------------------- /HSI Classification/HSI_ResNet/DataLoad/__pycache__/HSI_util.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drivenow/HSI-Classification/e2b98d08849a2b0ab681d1ecb893cfecabb08e9a/HSI Classification/HSI_ResNet/DataLoad/__pycache__/HSI_util.cpython-35.pyc -------------------------------------------------------------------------------- /HSI Classification/HSI_ResNet/DataLoad/__pycache__/util.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drivenow/HSI-Classification/e2b98d08849a2b0ab681d1ecb893cfecabb08e9a/HSI Classification/HSI_ResNet/DataLoad/__pycache__/util.cpython-35.pyc -------------------------------------------------------------------------------- /HSI Classification/HSI_ResNet/HSI_ResNet.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Sun Feb 26 16:04:20 2017 4 | @author: Administrator 5 | """ 6 | logBasePath = "D:/data/mylog/KerasDL/" 7 | rootPath = r'D:/data/HSI' 8 | import time 9 | start = time.clock() 10 | 11 | #rootPath = "G:/data/HSI" 12 | #logBasePath = "G:/data/mylog/KerasDL/HSI_resNet" 13 | 14 | block_size = 13 15 | test_size = 0.9 16 | #validate_size = 0.8 17 | nb_epoch = 600 18 | nb_classes = 16 19 | batch_size = 25 20 | 21 | #def parse_args(): 22 | # import argparse 23 | # parser = argparse.ArgumentParser() 24 | # parser.add_argument("-block_size",help="HSI block window") 25 | # parser.add_argument("-batch_size",help="trainnig batch") 26 | # parser.add_argument("-test_size") 27 | # parser.set_defaults(test_size=0.9) 28 | # args = parser.parse_args() 29 | # return args 30 | #args = parse_args() 31 | #print(args) 32 | #block_size = int(args.block_size) 33 | #batch_size = int(args.batch_size) 34 | #test_size = float(args.test_size) 35 | 36 | #是否使用pca 37 | use_pca = True 38 | n_components = 30 39 | if use_pca ==True: 40 | input_shape = (block_size,block_size,n_components) 41 | else: 42 | input_shape = (block_size,block_size,200) 43 | 44 | 45 | #%% 46 | from HSIDatasetLoad import * 47 | from keras.utils import np_utils 48 | 49 | HSI = HSIData(rootPath,"Pavia") 50 | X_data = HSI.X_data 51 | Y_data = HSI.Y_data 52 | data_source = HSI.data_source 53 | idx_data = HSI.idx_data 54 | 55 | l2_lr = 0.1 56 | #是否使用PCA降维 57 | if use_pca==True: 58 | data_source = HSI.PCA_data_Source(data_source,n_components=n_components) 59 | 60 | X_data_nei = HSI.getNeighborData(data_source,idx_data,block_size) 61 | 62 | Y_data = np_utils.categorical_probas_to_classes(Y_data) 63 | X_train_nei,X_test_nei,Y_train,Y_test,idx_train,idx_test = HSI.datasetSplit(X_data_nei,Y_data,idx_data,16,test_size = test_size) 64 | 65 | 66 | #%% 67 | from keras.layers import MaxPooling2D,Input,Dense,Dropout,Flatten,Convolution2D,Activation,merge 68 | from keras.layers.normalization import BatchNormalization 69 | from keras.models import Model 70 | from keras.callbacks import ReduceLROnPlateau, CSVLogger, EarlyStopping 71 | from keras.utils.visualize_util import plot 72 | from keras.optimizers import adadelta 73 | from keras.regularizers import l2 74 | from keras.initializations import glorot_normal 75 | 76 | def identity_block(x,nb_filter,kernel_size=3): 77 | k1,k2,k3 = nb_filter 78 | out = Convolution2D(k1,1,1)(x) 79 | out = BatchNormalization(axis= -1)(out) 80 | out = Activation('relu')(out) 81 | 82 | out = Convolution2D(k2,kernel_size,kernel_size,border_mode='same')(out) 83 | out = BatchNormalization(axis=-1)(out) 84 | out = Activation('relu')(out) 85 | 86 | out = Convolution2D(k3,1,1)(out) 87 | out = BatchNormalization(axis=-1)(out) 88 | 89 | 90 | out = merge([out,x],mode='sum') 91 | out = Activation('relu')(out) 92 | return out 93 | 94 | def conv_block(x,nb_filter,kernel_size=3): 95 | k1,k2,k3 = nb_filter 96 | 97 | out = Convolution2D(k1,1,1)(x) 98 | out = BatchNormalization(axis= -1)(out) 99 | out = Activation('relu')(out) 100 | 101 | out = Convolution2D(k2,kernel_size,kernel_size,border_mode='same')(x) 102 | out = BatchNormalization(axis= -1)(out) 103 | out = Activation('relu')(out) 104 | 105 | out = Convolution2D(k3,1,1)(out) 106 | out = BatchNormalization(axis= -1)(out) 107 | out = MaxPooling2D()(out) 108 | 109 | x = Convolution2D(k3,1,1)(x) 110 | x = BatchNormalization(axis= -1)(x) 111 | x = MaxPooling2D()(x) 112 | 113 | out = merge([out,x],mode='sum') 114 | out = Activation('relu')(out) 115 | return out 116 | 117 | border_name = "same" 118 | def get_CNN2d_model(input_shape, classify_output_num): 119 | input_tensor = Input(input_shape) 120 | 121 | res1 = conv_block(input_tensor,[64,64,256],3) 122 | # res2 = identity_block(res1,[64,64,256],3) 123 | # res3 = identity_block(res2,[128,128,256],3) 124 | # res4 = identity_block(res3,[128,128,256],3) 125 | 126 | flat1 = Flatten()(res1) 127 | dense1 = Dense(1024,activation="relu",W_regularizer=l2(l2_lr))(flat1) 128 | bn5 = BatchNormalization()(dense1) 129 | drop5 = Dropout(0.3)(bn5) 130 | 131 | dense2 = Dense(1024,activation="relu",W_regularizer=l2(l2_lr))(drop5) 132 | bn6 = BatchNormalization()(dense2) 133 | drop6 = Dropout(0.3)(bn6) 134 | 135 | dense3 = Dense(nb_classes,activation="softmax")(drop6) 136 | 137 | model = Model(input = input_tensor,output = dense3) 138 | model.compile(loss='categorical_crossentropy',#categorical_crossentropy 139 | optimizer="RMSprop", 140 | metrics=['accuracy']) 141 | # model.compile(loss='categorical_crossentropy',#categorical_crossentropy 142 | # optimizer="adadelta", 143 | # metrics=['accuracy']) 144 | return model 145 | 146 | 147 | 148 | 149 | #%% fit model 150 | """ 151 | RMSprop: 152 | 153 | 9880,500次迭代9870,后面缓慢上升,有触到9889 154 | categorical_crossentropy,残差特征图128而不是256,RMSprop,pca15,block11,test0.9,l2_lr = 0.1,batch_size = 25,res2 155 | 9864, 156 | categorical_crossentropy,RMSprop,pca15,block21,test0.9,l2_lr = 0.1,batch_size = 25,res1 157 | 9838 158 | categorical_crossentropy,RMSprop,pca15,block11,test0.9,l2_lr = 0.1,batch_size = 25,res1 159 | 9833 160 | categorical_crossentropy,RMSprop,pca15,block13,test0.9,l2_lr = 0.1,batch_size = 25,res1(残差模块pooling)1763.8 161 | 9885,9815,9841,9877,9893 162 | categorical_crossentropy,RMSprop,pca15,block13,test0.9,l2_lr = 0.1,batch_size = 25,res1(残差模块pooling,残差层后跟卷积层)2527s 163 | 9862 164 | categorical_crossentropy,RMSprop,pca15,block13,test0.9,l2_lr = 0.1,batch_size = 25,res1(残差模块pooling,64,64,64)2527s 165 | 9812 166 | categorical_crossentropy,RMSprop,pca15,block13,test0.9,l2_lr = 0.1,batch_size = 25,res1(残差模块pooling,64,64,128)2527s 167 | 9796 168 | categorical_crossentropy,RMSprop,pca15,block11,test0.9,l2_lr = 0.1,batch_size = 25,res1(残差模块pooling)1815s 169 | 9810 170 | categorical_crossentropy,RMSprop,pca15,block11,test0.9,l2_lr = 0.1,batch_size = 25,res1(残差模块pooling,残差层后跟卷积层)2527s 171 | 9889 172 | categorical_crossentropy,RMSprop,pca15,block7,test0.9,l2_lr = 0.1,batch_size = 25,res1(残差模块pooling,)1639s 173 | 9759 174 | categorical_crossentropy,RMSprop,pca15,block9,test0.9,l2_lr = 0.1,batch_size = 25,res1(残差模块pooling,)1639s 175 | 9832 176 | """ 177 | #plot(CNN2d_model,to_file=logBasePath+"/CNN2d_pca_model.png",show_shapes=True) 178 | 179 | #myLogger = MyProgbarLogger(to_file=logBasePath+"/CNN2d_pca_model.log") 180 | reduce_lr = ReduceLROnPlateau(patience = 50, verbose =1) 181 | ealystop = EarlyStopping(monitor='val_loss',patience =100) 182 | CNN2d_model = get_CNN2d_model(input_shape,nb_classes) 183 | csvLog = CSVLogger(logBasePath+"/"+str(batch_size)+".log") 184 | #CNN2d_model.fit(X_train_nei,Y_train,nb_epoch=nb_epoch,batch_size=batch_size,verbose=1, 185 | # validation_data=[X_test_nei,Y_test],callbacks=[csvLog,reduce_lr,ealystop]) 186 | CNN2d_model.fit(X_train_nei,Y_train,nb_epoch=nb_epoch,batch_size=batch_size,verbose=1, 187 | validation_split=0.1,callbacks=[csvLog,reduce_lr,ealystop]) 188 | 189 | end = time.clock() 190 | print ("read: %f s" % (end - start)) 191 | 192 | #%% fit model 193 | #from keras.wrappers.scikit_learn import KerasClassifier 194 | #keras_model = KerasClassifier(get_CNN2d_model, input_shape = input_shape, classify_output_num = nb_classes)#keras model 195 | # 196 | #from sklearn.grid_search import GridSearchCV 197 | #from sklearn.cross_validation import StratifiedShuffleSplit 198 | # 199 | #param_grid = dict( 200 | # batch_size = [20,25,30,35,40,45], 201 | # nb_epoch = [nb_epoch], 202 | ## validation_split=[0.001], 203 | # validation_data=[[X_test_nei,Y_test]], 204 | # callbacks=[[ealystop, reduce_lr]], 205 | # ) 206 | # 207 | #ssp = StratifiedShuffleSplit(Y_data,n_iter=1,test_size=test_size) 208 | #grid_model = GridSearchCV(estimator = keras_model, param_grid = param_grid, n_jobs=1,cv=ssp,refit=False)#scoring=make_scorer(mean_squared_error) 209 | #Y_train = np_utils.to_categorical(Y_train, nb_classes) 210 | #grid_model.fit(X_data_nei, Y_data) 211 | 212 | 213 | #%% 写入详细分类结果 214 | from util import cateAccuracy 215 | def sample_count(Y): 216 | sample_count_train = {} 217 | if len(Y.shape)!=1: 218 | #转换onehot编码 219 | Y=np_utils.categorical_probas_to_classes(Y) 220 | for i in set(Y): 221 | sample_count_train[i] = list(Y).count(i) 222 | return sample_count_train 223 | 224 | total_accu,accu = cateAccuracy(CNN2d_model,X_test_nei,Y_test) 225 | train_count = sample_count(Y_train) 226 | test_count = sample_count(Y_test) 227 | 228 | 229 | f = open("pavia_result13_30_rmsprop_99.txt","a") 230 | f.writelines(str(total_accu)+"\n") 231 | for i in range(len(accu)): 232 | line = str(accu[i])+","+str(train_count[i])+","+str(test_count[i])+"\n" 233 | f.writelines(line) 234 | 235 | f.write("=========================================================\n") 236 | f.close() 237 | 238 | #pred = CNN2d_model.predict(X_test_nei,batch_size=1) 239 | #from sklearn.metrics import classification_report,accuracy_score 240 | #accu = accuracy_score(np_utils.categorical_probas_to_classes(pred),np_utils.categorical_probas_to_classes(Y_test)) 241 | #from visualize import plot_result 242 | #plot_result(Y_data,idx_data)#graoud_truth 243 | #plot_train(Y_data,idx_data,idx_train)#train 244 | #plot_result(np_utils.categorical_probas_to_classes(pred),idx_test)#test_result 245 | # 246 | #np.savetxt("Y_data.txt",Y_data) 247 | #np.savetxt("idx_data.txt",idx_data) 248 | #np.savetxt("idx_test.txt",idx_test) 249 | #np.savetxt("idx_train.txt",idx_train) 250 | #np.savetxt("pred.txt",pred) 251 | 252 | 253 | -------------------------------------------------------------------------------- /HSI Classification/HSI_ResNet/HSI_ResNet_Multi.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Sun Feb 26 16:04:20 2017 4 | @author: Administrator 5 | """ 6 | #logBasePath = "D:/data/mylog/KerasDL/" 7 | #rootPath = r'D:/data/HSI' 8 | 9 | rootPath = "G:/data/HSI" 10 | logBasePath = "G:/data/mylog/KerasDL/CNN_multi_model" 11 | 12 | block_size = 13 13 | test_size = 0.9 14 | #validate_size = 0.8 15 | nb_epoch = 1000 16 | nb_classes = 16 17 | batch_size = 32 18 | 19 | #是否使用pca 20 | use_pca = True 21 | n_components = 30 22 | if use_pca ==True: 23 | input_shape = (block_size,block_size,n_components) 24 | else: 25 | input_shape = (block_size,block_size,200) 26 | 27 | #%% 28 | from HSIDatasetLoad import * 29 | from keras.utils import np_utils 30 | 31 | HSI = HSIData(rootPath) 32 | X_data = HSI.X_data 33 | Y_data = HSI.Y_data 34 | data_source = HSI.data_source 35 | idx_data = HSI.idx_data 36 | 37 | l2_lr = 0.1 38 | #是否使用PCA降维 39 | if use_pca==True: 40 | data_source = HSI.PCA_data_Source(data_source,n_components=n_components) 41 | 42 | X_data_nei = HSI.getNeighborData(data_source,idx_data,block_size) 43 | 44 | Y_data = np_utils.categorical_probas_to_classes(Y_data) 45 | X_train_nei,X_test_nei,Y_train,Y_test,idx_train,idx_test = HSI.datasetSplit(X_data_nei,Y_data,idx_data,16,test_size = test_size) 46 | 47 | #原始谱信息 48 | data_source1 = HSI.data_source 49 | X_train = data_source1[idx_train] 50 | X_train = X_train.reshape(len(X_train),200,1) 51 | X_test = data_source1[idx_test] 52 | X_test = X_test.reshape(len(X_test),200,1) 53 | #每类去一个样本 54 | """ 55 | 迁移学习,多路学习 56 | """ 57 | 58 | 59 | #%% 60 | from keras.layers import MaxPooling2D,Input,Dense,Dropout,Flatten,Convolution2D,Activation,merge 61 | from keras.layers.normalization import BatchNormalization 62 | from keras.models import Model 63 | from keras.callbacks import ReduceLROnPlateau, CSVLogger 64 | from keras.utils.visualize_util import plot 65 | from keras.optimizers import adadelta 66 | from keras.regularizers import l2 67 | 68 | def identity_block(x,nb_filter,kernel_size=3): 69 | k1,k2,k3 = nb_filter 70 | out = Convolution2D(k1,1,1)(x) 71 | out = BatchNormalization(axis= -1)(out) 72 | out = Activation('relu')(out) 73 | 74 | out = Convolution2D(k2,kernel_size,kernel_size,border_mode='same')(out) 75 | out = BatchNormalization(axis=-1)(out) 76 | out = Activation('relu')(out) 77 | 78 | out = Convolution2D(k3,1,1)(out) 79 | out = BatchNormalization(axis=-1)(out) 80 | 81 | 82 | out = merge([out,x],mode='sum') 83 | out = Activation('relu')(out) 84 | return out 85 | 86 | def conv_block(x,nb_filter,kernel_size=3): 87 | k1,k2,k3 = nb_filter 88 | 89 | out = Convolution2D(k1,1,1)(x) 90 | out = BatchNormalization(axis= -1)(out) 91 | out = Activation('relu')(out) 92 | 93 | out = Convolution2D(k2,kernel_size,kernel_size,border_mode='same')(out) 94 | out = BatchNormalization(axis= -1)(out) 95 | out = Activation('relu')(out) 96 | 97 | out = Convolution2D(k3,1,1)(out) 98 | out = BatchNormalization(axis= -1)(out) 99 | 100 | x = Convolution2D(k3,1,1)(x) 101 | x = BatchNormalization(axis= -1)(x) 102 | 103 | out = merge([out,x],mode='sum') 104 | out = Activation('relu')(out) 105 | return out 106 | 107 | 108 | from keras.layers import Convolution1D,MaxPooling1D 109 | def get_CNN1d(input_layer): 110 | 111 | conv1 = Convolution1D(64,3,activation = "relu", 112 | border_mode = "same")(input_layer) 113 | bn1 = BatchNormalization(axis = -1)(conv1) 114 | 115 | conv2 = Convolution1D(128,3,activation = "relu", 116 | border_mode = "same")(bn1) 117 | bn2 = BatchNormalization(axis = -1)(conv2) 118 | drop2 = Dropout(0.3)(bn2) 119 | 120 | conv3 = Convolution1D(128,5,activation = "relu", 121 | border_mode = "same")(drop2) 122 | bn3 = BatchNormalization(axis = -1)(conv3) 123 | 124 | 125 | conv4 = Convolution1D(128,5,activation="relu", 126 | border_mode = "same")(bn3) 127 | bn4 = BatchNormalization(axis = -1)(conv4) 128 | drop4 = Dropout(0.3)(bn4) 129 | 130 | 131 | flat = Flatten()(drop4) 132 | 133 | return flat 134 | 135 | 136 | def CNN2d_model(input_shape, nb_classes): 137 | input_tensor = Input(input_shape) 138 | input_layer = Input((200,1)) 139 | res1 = conv_block(input_tensor,[64,64,256],3) 140 | 141 | 142 | flat1 = Flatten()(res1) 143 | flat2 = get_CNN1d(input_layer) 144 | flat = merge((flat1,flat2),mode="concat") 145 | # res2 = identity_block(res1,[64,64,256],3) 146 | # res3 = identity_block(res2,[128,128,256],3) 147 | # res4 = identity_block(res3,[128,128,256],3) 148 | 149 | 150 | dense1 = Dense(1024,activation="relu",W_regularizer=l2(l2_lr))(flat) 151 | bn5 = BatchNormalization()(dense1) 152 | drop5 = Dropout(0.3)(bn5) 153 | 154 | dense2 = Dense(1024,activation="relu",W_regularizer=l2(l2_lr))(drop5) 155 | bn6 = BatchNormalization()(dense2) 156 | drop6 = Dropout(0.3)(bn6) 157 | 158 | dense3 = Dense(nb_classes,activation="softmax")(drop6) 159 | 160 | model = Model(input = [input_tensor,input_layer],output = dense3) 161 | model.compile(loss='categorical_crossentropy',#categorical_crossentropy 162 | optimizer="adadelta", 163 | metrics=['accuracy']) 164 | return model 165 | 166 | CNN2d_model = CNN2d_model(input_shape,nb_classes) 167 | 168 | 169 | #%% fit model 170 | """ 171 | PCA,3*3领域,批规范化,l2范数 172 | categorical_crossentropy,adadelta,pca,block9,test0.9,l2_lr = 0.1,batch_size = 32,res3 173 | Epoch 1000/1000 174 | 1036/1036 [==============================] - 7s - loss: 0.0021 - acc: 1.0000 - val_loss: 0.3779 - val_acc: 0.9109 175 | categorical_crossentropy,adadelta,pca,block11,test0.9,l2_lr = 0.1,batch_size = 32,res4 176 | Epoch 1000/1000 177 | 1036/1036 [==============================] - 10s - loss: 0.0046 - acc: 0.9990 - val_loss: 0.2558 - val_acc: 0.9347 178 | categorical_crossentropy,adadelta,pca,block7,test0.9,l2_lr = 0.1,batch_size = 32,res4 179 | Epoch 400/1000 180 | 1036/1036 [==============================] - 6s - loss: 0.0067 - acc: 1.0000 - val_loss: 0.6330 - val_acc: 0.8607 181 | categorical_crossentropy,adadelta,pca,block5,test0.9,l2_lr = 0.1,batch_size = 32,res4 182 | Epoch 1000/1000 183 | 1036/1036 [==============================] - 5s - loss: 0.0055 - acc: 1.0000 - val_loss: 0.8763 - val_acc: 0.8356 184 | categorical_crossentropy,adadelta,nopca,block5,test0.9,l2_lr = 0.1,batch_size = 32,res4 185 | Epoch 276/1000 186 | 1036/1036 [==============================] - 11s - loss: 0.0051 - acc: 1.0000 - val_loss: 0.3141 - val_acc: 0.9149 187 | categorical_crossentropy,adadelta,nopca,block13,test0.9,l2_lr = 0.1,batch_size = 32,res4 188 | och 1000/1000 189 | 1036/1036 [==============================] - 11s - loss: 0.0026 - acc: 1.0000 - val_loss: 0.2811 - val_acc: 0.9285 190 | """ 191 | plot(CNN2d_model,to_file=logBasePath+"/CNN_multi_model.png",show_shapes=True) 192 | reduce_lr = ReduceLROnPlateau(patience=40) 193 | #myLogger = MyProgbarLogger(to_file=logBasePath+"/CNN2d_pca_model.log") 194 | csvLog = CSVLogger(logBasePath+"/CNN_multi_model.log") 195 | CNN2d_model.fit([X_train_nei,X_train],Y_train,nb_epoch=nb_epoch,batch_size=batch_size,verbose=1, 196 | validation_data=[[X_test_nei,X_test],Y_test],callbacks=[csvLog,reduce_lr]) 197 | 198 | 199 | 200 | -------------------------------------------------------------------------------- /HSI Classification/HSI_ResNet/accu.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Tue Jul 11 08:54:18 2017 4 | 5 | @author: fullmetal 6 | """ 7 | import numpy as np 8 | 9 | def get_accu(f): 10 | result = [] 11 | flag =True 12 | for line in f: 13 | if(flag==True): 14 | result.append(float(line)) 15 | flag=False 16 | if(line.startswith("=")): 17 | flag=True 18 | return result 19 | 20 | base_path="result/pca30/" 21 | block_size=[3,5,7,9,11,13,15,17] 22 | block_accu = [] 23 | 24 | for size in block_size: 25 | f = open(base_path+"result"+str(size)+"_res1_30.txt") 26 | result = get_accu(f) 27 | block_accu.append(np.mean(result)) 28 | print(np.mean(result)) 29 | 30 | result = get_accu(open(base_path+"result17_res1_30_85.txt")) 31 | print(np.mean(result)) 32 | 33 | 34 | -------------------------------------------------------------------------------- /HSI Classification/HSI_ResNet/excute.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Sun Jun 25 15:56:31 2017 4 | 5 | @author: Shenjunling 6 | """ 7 | 8 | import os 9 | import logging 10 | import subprocess 11 | import sys 12 | sys.path.append("G:/OneDrive/codes/python/DataLoad") 13 | 14 | log = logging.getLogger("Core.Analysis.Processing") 15 | 16 | INTERPRETER = "F:/Anaconda2/envs/py3/python.exe" 17 | 18 | 19 | if not os.path.exists(INTERPRETER): 20 | log.error("Cannot find INTERPRETER at path \"%s\"." % INTERPRETER) 21 | 22 | processor = "HSI_ResNet.py" 23 | batch_size = [25] 24 | block_size = [13] 25 | repeat = 10 26 | 27 | for i in batch_size: 28 | for j in block_size: 29 | for re in range(repeat): 30 | pargs = [INTERPRETER, processor] 31 | pargs.append("-batch_size") 32 | pargs.append(str(i)) 33 | pargs.append("-block_size") 34 | pargs.append(str(j)) 35 | pargs.append("-test_size") 36 | pargs.append(str(0.99)) 37 | print("===============================================================") 38 | print(pargs) 39 | p = subprocess.Popen(pargs) 40 | p.wait() -------------------------------------------------------------------------------- /HSI Classification/HSI_ResNet/plot.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Thu Jul 6 15:18:59 2017 4 | 5 | @author: Administrator 6 | """ 7 | 8 | from matplotlib import pyplot as plt 9 | from matplotlib import colors 10 | import numpy as np 11 | 12 | base_path = "result/13 98.6/" 13 | 14 | def plot_result(lab,index,imname): 15 | im = np.zeros((145,145))+16 16 | for idx,i in enumerate(index): 17 | row = int(np.floor(i/145)) 18 | col = int(i%145) 19 | im[row][col] = lab[idx] 20 | 21 | fig = plt.figure(figsize=(6,6)) 22 | # cmap = plt.get_cmap('tab20', 20) 23 | color=["red","green","lightcoral","red","chocolate","yellow","orange","slategray","indigo", 24 | "blue","teal","skyblue","darkorchid","gold","silver","darkgreen","black"] 25 | cmap=colors.ListedColormap(color) 26 | sc = plt.imshow(im, cmap = cmap) 27 | # plt.colorbar(sc, ticks = np.arange(0,17)) 28 | axes=plt.subplot(111) 29 | axes.set_xticks([]) 30 | axes.set_yticks([]) 31 | plt.savefig(base_path+imname+".jpg") 32 | # plt.imsave(base_path+imname+".jpg",im, cmap = cmap) 33 | 34 | def plot_train(total_lab,total_index,train_index, imname): 35 | im = np.zeros((145,145))+16 36 | for idx,i in enumerate(total_index): 37 | row = int(np.floor(i/145)) 38 | col = int(i%145) 39 | im[row][col] = total_lab[idx] 40 | for idx,i in enumerate(train_index): 41 | row = int(np.floor(i/145)) 42 | col = int(i%145) 43 | im[row][col] = 16 44 | 45 | fig = plt.figure(figsize=(6,6)) 46 | color=["red","green","lightcoral","red","chocolate","yellow","orange","slategray","indigo", 47 | "blue","teal","skyblue","darkorchid","gold","silver","darkgreen","black"] 48 | cmap=colors.ListedColormap(color) 49 | sc = plt.imshow(im, cmap = cmap) 50 | plt.colorbar(sc, ticks = np.arange(0,17),fraction=0.046, pad=0.04) 51 | axes=plt.subplot(111) 52 | axes.set_xticks([]) 53 | axes.set_yticks([]) 54 | plt.savefig(base_path+imname+".jpg") 55 | 56 | 57 | 58 | Y_data = np.loadtxt(base_path+"Y_data.txt") 59 | idx_train = np.loadtxt(base_path+"idx_train.txt") 60 | idx_test = np.loadtxt(base_path+"idx_test.txt") 61 | idx_data = np.loadtxt(base_path+"idx_data.txt") 62 | pred = np.loadtxt(base_path+"pred.txt") 63 | train_lab=np.loadtxt(base_path+"train_lab.txt") 64 | 65 | pred1 = np.zeros(pred.shape[0])+16 66 | for i in range(pred.shape[0]): 67 | pred1[i] = np.argmax(pred[i]) 68 | 69 | idx_data_list = list(idx_data) 70 | accu_count={} 71 | accu_total = {} 72 | for i in range(16): 73 | accu_count[i]=0 74 | accu_total[i]=0 75 | 76 | for idx,test in enumerate(idx_test): 77 | place = idx_data_list.index(test) 78 | accu_total[Y_data[place]]=accu_total[Y_data[place]]+1 79 | if(Y_data[place]==pred1[idx]): 80 | accu_count[Y_data[place]] = accu_count[Y_data[place]]+1 81 | 82 | accu = []#class-wise accu 83 | for i in range(16): 84 | accu.append(accu_count[i]/accu_total[i]) 85 | total_accu = sum(accu_count.values())/sum(accu_total.values())#accuracy 86 | 87 | 88 | #plot_result(Y_data, idx_data, "groud_truth")# 89 | #plot_train(Y_data,idx_data,idx_train,"train_sample") 90 | lab_pred = np.concatenate((pred1,train_lab)) 91 | idx_pred = np.concatenate((idx_test,idx_train)) 92 | plot_result(lab_pred, idx_pred,"test_sample")# 93 | 94 | -------------------------------------------------------------------------------- /HSI Classification/HSI_ResNet/result13_res1_30.txt: -------------------------------------------------------------------------------- 1 | 0.982529474812433 2 | 1.0,5,49 3 | 0.9852827265685515,143,1291 4 | 1.0,83,751 5 | 1.0,23,211 6 | 0.9731543624161074,50,447 7 | 0.9925595238095238,75,672 8 | 1.0,3,23 9 | 1.0,49,440 10 | 0.8888888888888888,2,18 11 | 0.9678530424799081,97,871 12 | 0.9869428185502026,247,2221 13 | 0.891500904159132,61,553 14 | 0.9947643979057592,21,191 15 | 0.9982832618025751,129,1165 16 | 0.9970760233918129,38,342 17 | 0.9529411764705882,10,85 18 | ========================================================= 19 | 0.9889603429796355 20 | 0.9387755102040817,5,49 21 | 0.9899302865995352,143,1291 22 | 0.9920106524633822,83,751 23 | 0.985781990521327,23,211 24 | 0.970917225950783,50,447 25 | 0.9970238095238095,75,672 26 | 0.9130434782608695,3,23 27 | 0.9886363636363636,49,440 28 | 1.0,2,18 29 | 0.9850746268656716,97,871 30 | 0.9891940567312022,247,2221 31 | 0.9819168173598554,61,553 32 | 1.0,21,191 33 | 1.0,129,1165 34 | 0.9941520467836257,38,342 35 | 0.9176470588235294,10,85 36 | ========================================================= 37 | 0.9836012861736334 38 | 0.9795918367346939,5,49 39 | 0.9767621998450813,143,1291 40 | 0.9853528628495339,83,751 41 | 0.981042654028436,23,211 42 | 0.9932885906040269,50,447 43 | 0.9910714285714286,75,672 44 | 0.9565217391304348,3,23 45 | 1.0,49,440 46 | 0.4444444444444444,2,18 47 | 0.9609644087256027,97,871 48 | 0.9941467807294012,247,2221 49 | 0.9421338155515371,61,553 50 | 0.9947643979057592,21,191 51 | 0.9965665236051502,129,1165 52 | 0.9941520467836257,38,342 53 | 0.9882352941176471,10,85 54 | ========================================================= 55 | 0.9851018220793141 56 | 0.8979591836734694,5,49 57 | 0.9899302865995352,143,1291 58 | 0.996005326231691,83,751 59 | 0.9478672985781991,23,211 60 | 0.9440715883668904,50,447 61 | 0.9895833333333334,75,672 62 | 0.9130434782608695,3,23 63 | 1.0,49,440 64 | 0.8333333333333334,2,18 65 | 0.9598163030998852,97,871 66 | 0.9963980189104007,247,2221 67 | 0.9566003616636528,61,553 68 | 0.9895287958115183,21,191 69 | 1.0,129,1165 70 | 1.0,38,342 71 | 0.9882352941176471,10,85 72 | ========================================================= 73 | 0.9808145766345123 74 | 0.7755102040816326,5,49 75 | 0.981409759876065,143,1291 76 | 0.9680426098535286,83,751 77 | 1.0,23,211 78 | 0.9261744966442953,50,447 79 | 0.9970238095238095,75,672 80 | 1.0,3,23 81 | 1.0,49,440 82 | 0.6666666666666666,2,18 83 | 0.9758897818599311,97,871 84 | 0.9927960378208014,247,2221 85 | 0.9656419529837251,61,553 86 | 0.9842931937172775,21,191 87 | 0.9939914163090129,129,1165 88 | 0.9766081871345029,38,342 89 | 0.9411764705882353,10,85 90 | ========================================================= 91 | 0.9879957127545552 92 | 0.8367346938775511,5,49 93 | 0.9821843532145623,143,1291 94 | 0.9946737683089214,83,751 95 | 0.985781990521327,23,211 96 | 0.9843400447427293,50,447 97 | 0.9940476190476191,75,672 98 | 1.0,3,23 99 | 1.0,49,440 100 | 0.8888888888888888,2,18 101 | 0.9724454649827784,97,871 102 | 0.9977487618190004,247,2221 103 | 0.976491862567812,61,553 104 | 0.9528795811518325,21,191 105 | 0.9991416309012876,129,1165 106 | 0.97953216374269,38,342 107 | 0.9764705882352941,10,85 108 | ========================================================= 109 | 0.9872454448017149 110 | 0.9183673469387755,5,49 111 | 0.9860573199070488,143,1291 112 | 0.9893475366178429,83,751 113 | 0.990521327014218,23,211 114 | 0.9910514541387024,50,447 115 | 1.0,75,672 116 | 1.0,3,23 117 | 1.0,49,440 118 | 0.6666666666666666,2,18 119 | 0.9770378874856487,97,871 120 | 0.9828905898244035,247,2221 121 | 0.9783001808318263,61,553 122 | 1.0,21,191 123 | 0.9991416309012876,129,1165 124 | 0.9883040935672515,38,342 125 | 0.9764705882352941,10,85 126 | ========================================================= 127 | 0.984994640943194 128 | 0.8979591836734694,5,49 129 | 0.9860573199070488,143,1291 130 | 0.9920106524633822,83,751 131 | 1.0,23,211 132 | 0.9753914988814317,50,447 133 | 0.9970238095238095,75,672 134 | 0.5217391304347826,3,23 135 | 1.0,49,440 136 | 1.0,2,18 137 | 0.9850746268656716,97,871 138 | 0.9860423232778028,247,2221 139 | 0.9620253164556962,61,553 140 | 0.9947643979057592,21,191 141 | 1.0,129,1165 142 | 0.9766081871345029,38,342 143 | 0.8470588235294118,10,85 144 | ========================================================= 145 | 0.9920685959271168 146 | 0.9387755102040817,5,49 147 | 0.9860573199070488,143,1291 148 | 1.0,83,751 149 | 0.981042654028436,23,211 150 | 0.9977628635346756,50,447 151 | 0.9955357142857143,75,672 152 | 0.8260869565217391,3,23 153 | 0.9977272727272727,49,440 154 | 0.8333333333333334,2,18 155 | 0.9896670493685419,97,871 156 | 0.9941467807294012,247,2221 157 | 0.9855334538878843,61,553 158 | 0.9895287958115183,21,191 159 | 1.0,129,1165 160 | 0.9912280701754386,38,342 161 | 0.9764705882352941,10,85 162 | ========================================================= 163 | 0.9845659163987138 164 | 0.9591836734693877,5,49 165 | 0.975987606506584,143,1291 166 | 0.9773635153129161,83,751 167 | 0.995260663507109,23,211 168 | 0.9686800894854586,50,447 169 | 0.9925595238095238,75,672 170 | 0.8260869565217391,3,23 171 | 0.9954545454545455,49,440 172 | 0.7222222222222222,2,18 173 | 1.0,97,871 174 | 0.9864925709140027,247,2221 175 | 0.9891500904159132,61,553 176 | 0.9790575916230366,21,191 177 | 1.0,129,1165 178 | 0.9736842105263158,38,342 179 | 0.8352941176470589,10,85 180 | ========================================================= 181 | -------------------------------------------------------------------------------- /HSI Classification/HSI_ResNet/util.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Sun Jun 25 17:27:39 2017 4 | 5 | @author: Shenjunling 6 | """ 7 | 8 | from keras.utils.np_utils import categorical_probas_to_classes 9 | from sklearn.metrics import classification_report 10 | from sklearn.metrics import accuracy_score 11 | 12 | """ 13 | 返回:list, 每一个类的准确度 14 | """ 15 | def cateAccuracy(model_fitted,X_test,Y_test): 16 | Y_test = categorical_probas_to_classes(Y_test) 17 | Y_predict=model_fitted.predict(X_test) 18 | if len(Y_predict.shape)!=1: 19 | #转换onehot编码 20 | Y_predict=categorical_probas_to_classes(Y_predict) 21 | 22 | accu_count={} 23 | accu_total = {} 24 | for cat in set(Y_test): 25 | total = list(Y_test).count(cat) 26 | accu_total[cat] = total 27 | accu_count[cat] = 0 28 | 29 | for iidx,cat in enumerate(Y_test): 30 | if cat == Y_predict[iidx]: 31 | accu_count[cat] = accu_count[cat]+1 32 | sum1 = 0 33 | sum2 = 0 34 | for i in range(len(set(Y_test))): 35 | sum1 = sum1+accu_total[i] 36 | sum2 = sum2+accu_count[i] 37 | print(sum2/float(sum1)) 38 | return (sum2/float(sum1),[accu_count[i]/float(accu_total[i]) for i in range(len(set(Y_test)))]) 39 | 40 | 41 | -------------------------------------------------------------------------------- /HSI Classification/__pycache__/util.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drivenow/HSI-Classification/e2b98d08849a2b0ab681d1ecb893cfecabb08e9a/HSI Classification/__pycache__/util.cpython-35.pyc -------------------------------------------------------------------------------- /HSI Classification/__pycache__/visualize.cpython-35-DESKTOP-RS5NO3H.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drivenow/HSI-Classification/e2b98d08849a2b0ab681d1ecb893cfecabb08e9a/HSI Classification/__pycache__/visualize.cpython-35-DESKTOP-RS5NO3H.pyc -------------------------------------------------------------------------------- /HSI Classification/__pycache__/visualize.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drivenow/HSI-Classification/e2b98d08849a2b0ab681d1ecb893cfecabb08e9a/HSI Classification/__pycache__/visualize.cpython-35.pyc -------------------------------------------------------------------------------- /HSI Classification/basic_augmentation_model.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Sun Feb 26 16:04:20 2017 4 | @author: Administrator 5 | """ 6 | 7 | logBasePath = "D:/data/mylog/KerasDL/" 8 | rootPath = r'D:/data/HSI' 9 | 10 | #rootPath = "G:/data/HSI" 11 | #logBasePath = "G:/data/mylog/KerasDL/" 12 | 13 | test_size = 0.7 14 | validate_size = 0.8 15 | input_shape = (200,1) 16 | nb_epoch = 2000 17 | nb_classes = 16 18 | batch_size = 32 19 | 20 | #data_Augmentation 21 | padding = 10 22 | mess_window = 10 23 | 24 | #%% 25 | from HSIDataLoad import * 26 | 27 | X_data,Y_data,data_source,idx_data=datasetLoad2(rootPath) 28 | Y_data=np_utils.categorical_probas_to_classes(Y_data) 29 | X_train,X_test,Y_train,Y_test,idx_train,idx_test=datasetSplit(X_data,Y_data,idx_data,num_calss=16,test_size=test_size) 30 | ##数据增强 31 | #X_train_add, Y_train_add = data_Augmentation(X_train, Y_train, padding,mess_window) 32 | ##数据集合并 33 | #X_train = np.concatenate((X_train,X_train_add),axis = 0) 34 | #Y_train = np.concatenate((Y_train,Y_train_add),axis = 0) 35 | 36 | #数据规范化 37 | X_train = np.array([x.reshape(200,1) for x in X_train]) 38 | X_test = np.array([x.reshape(200,1) for x in X_test]) 39 | 40 | #划分验证集 41 | Y_test = np_utils.categorical_probas_to_classes(Y_test) 42 | X_validate,X_test,Y_validate,Y_test,idx_validate,idx_test=datasetSplit(X_test,Y_test,idx_test,num_calss=16,test_size=validate_size) 43 | 44 | 45 | #%% 46 | """ 47 | batch normalization: relu之后 48 | """ 49 | from keras.layers import Input,merge,Dense,Dropout,Flatten,Convolution1D,AveragePooling1D,up 50 | from keras.layers.normalization import BatchNormalization 51 | from keras.models import Model 52 | from mykeras.callbacks import MyProgbarLogger 53 | from keras.utils.visualize_util import plot 54 | from keras.callbacks import EarlyStopping,ReduceLROnPlateau,TensorBoard,CSVLogger,ModelCheckpoint 55 | 56 | 57 | def get_basic_model(input_shape, classify_output_num): 58 | input_layer = Input(input_shape) 59 | conv1 = Convolution1D(nb_filter=4,filter_length=3,activation = "relu", 60 | border_mode = "same")(input_layer) 61 | pool1 = AveragePooling1D()(conv1) 62 | bn1 = BatchNormalization(axis = -1)(pool1) 63 | 64 | conv2 = Convolution1D(nb_filter=8,filter_length=5,activation = "relu", 65 | border_mode = "same")(bn1) 66 | pool2 = AveragePooling1D()(conv2) 67 | bn2 = BatchNormalization(axis = -1)(pool2) 68 | 69 | conv3 = Convolution1D(nb_filter=16,filter_length=5,activation = "relu", 70 | border_mode = "same")(bn2) 71 | pool3 = AveragePooling1D()(conv3) 72 | drop3 = Dropout(0.3)(pool3) 73 | bn3 = BatchNormalization(axis = -1)(drop3) 74 | 75 | conv4 = Convolution1D(nb_filter = 16, filter_length=5,activation="relu", 76 | border_mode = "same")(bn3) 77 | bn4 = BatchNormalization(axis = -1)(conv4) 78 | 79 | 80 | y=Dense(512, activation='relu')(conv4) 81 | y=Dropout(0.3)(y) 82 | y=Dense(256, activation='relu')(y) 83 | y=Dropout(0.3)(y) 84 | y=Dense(512,activation='relu')(y) 85 | 86 | 87 | conv5 = Convolution1D(nb_filter=16,filter_length=5,activation = "relu", 88 | border_mode = "same")(input_layer) 89 | pool5 = AveragePooling1D()(conv5) 90 | bn5 = BatchNormalization(axis = -1)(pool5) 91 | 92 | conv6 = Convolution1D(nb_filter=8,filter_length=5,activation = "relu", 93 | border_mode = "same")(bn5) 94 | pool6 = AveragePooling1D()(conv6) 95 | bn6 = BatchNormalization(axis = -1)(pool6) 96 | 97 | conv7 = Convolution1D(nb_filter=16,filter_length=5,activation = "relu", 98 | border_mode = "same")(bn6) 99 | pool7 = AveragePooling1D()(conv7) 100 | drop7 = Dropout(0.3)(pool7) 101 | bn7 = BatchNormalization(axis = -1)(drop7) 102 | 103 | conv8 = Convolution1D(nb_filter = 16, filter_length=5,activation="relu", 104 | border_mode = "same")(bn7) 105 | bn4 = BatchNormalization(axis = -1)(conv8) 106 | 107 | 108 | flat1 = Flatten()(bn4) 109 | flat2 = Flatten()(input_layer) 110 | 111 | merge1 = merge([flat1,flat2], mode = "concat") 112 | 113 | x=Dense(512, activation='relu')(merge1) 114 | x=Dropout(0.3)(x) 115 | x=Dense(256, activation='relu')(x) 116 | x=Dropout(0.3)(x) 117 | x=Dense(128,activation='relu')(x) 118 | x=Dropout(0.3)(x) 119 | output=Dense(classify_output_num,activation='softmax')(x) 120 | 121 | model = Model(input = input_layer,output=output) 122 | model.compile(optimizer='adadelta',metrics=['accuracy'],loss='categorical_crossentropy') 123 | return model 124 | 125 | basic_model = get_basic_model(input_shape,nb_classes) 126 | 127 | #%% fit model 128 | plot(basic_model,to_file=logBasePath+"basic_augmentation_model.png",show_shapes=True) 129 | myLogger = MyProgbarLogger(to_file=logBasePath+"basic_model2.log") 130 | csvLogger = CSVLogger(filename=logBasePath+"basic_model2_csv.log") 131 | reduce_lr = ReduceLROnPlateau(patience=50,factor = 0.3, verbose =1) 132 | tensor_board = TensorBoard(log_dir = logBasePath+"board") 133 | ealystop = EarlyStopping(monitor='val_loss',patience=100) 134 | checkPoint = ModelCheckpoint(filepath=logBasePath+"basic_model2_check",save_best_only=True) 135 | 136 | basic_model.fit(X_train,Y_train,nb_epoch=nb_epoch,batch_size=batch_size,verbose=1, 137 | validation_data=[X_validate,Y_validate],callbacks=[ 138 | myLogger, ealystop, tensor_board, csvLogger, reduce_lr,checkPoint]) 139 | 140 | #%% 中间层输出 141 | input_layer = basic_model.input 142 | layer_name = 'convolution1d_44' 143 | #layer_name = 'averagepooling1d_16' 144 | conv4_layer = basic_model.get_layer(name=layer_name).output 145 | conv4_layer_model = Model(input_layer,conv4_layer) 146 | 147 | conv_output = conv4_layer_model.predict(X_train) 148 | 149 | #%% 150 | import matplotlib.pyplot as plt 151 | import matplotlib 152 | matplotlib.rcParams['figure.figsize'] = (20.0, 10.0) 153 | sample,length,band = conv_output.shape 154 | 155 | #%matplotlib qt5 156 | 157 | for sam in range(2): 158 | sam = sam+200 159 | plt.figure() 160 | sub_row=6#子图的行数 161 | sub_col=6#子图的列数 162 | sub_i=0#子图的编号 163 | for ba in range(band): 164 | sub_i = sub_i+1 165 | plt.subplot(sub_row,sub_col,sub_i) 166 | plt.plot(conv_output[sam,:,ba]) 167 | plt.show() 168 | -------------------------------------------------------------------------------- /HSI Classification/tensorflow/HSI_RN_temp.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Fri May 26 20:03:33 2017 4 | @author: Shenjunling 5 | """ 6 | 7 | #logBasePath = "D:/data/mylog/KerasDL/" 8 | #rootPath = r'D:/data/HSI' 9 | 10 | rootPath = "G:/data/HSI" 11 | logBasePath = "G:/data/mylog/KerasDL/CNN2d_pca_model" 12 | 13 | block_size = 11 14 | test_size = 0.9 15 | #validate_size = 0.8 16 | nb_epoch = 600 17 | epoch = 1 18 | nb_classes = 16 19 | batch_size = 32 20 | l2_lr = 0.1 21 | 22 | 23 | 24 | #是否使用pca 25 | use_pca = True 26 | n_components = 30 27 | if use_pca ==True: 28 | input_shape = (block_size,block_size,n_components) 29 | else: 30 | input_shape = (block_size,block_size,200) 31 | 32 | #%% 33 | from HSIDatasetLoad import * 34 | from keras.utils import np_utils 35 | 36 | def next_batch(num, data, labels): 37 | idx = np.arange(0, len(data)) 38 | np.random.shuffle(idx) 39 | 40 | count=0 41 | while count