├── CVA.py ├── IRMAD.py ├── MAD.py ├── PCA.py ├── SFA.py ├── __init__.py ├── deeplearning ├── Loss │ ├── BalanceBCELoss.py │ ├── CustomLoss.py │ ├── DiceLoss.py │ └── __init__.py ├── MxNet.py ├── NestUnet.py ├── RemoteImageDataset.py ├── Siam_Cat_Unet.py ├── Siam_Diff_Unet.py ├── UNet_Plus.py ├── UpdateNestUnet.py ├── accuracy_cal.py ├── test.py ├── test2.py ├── train.py └── valid.py ├── readme └── spectral.py /CVA.py: -------------------------------------------------------------------------------- 1 | #变化检测CVA方法 2 | class CVA: 3 | def __init__(self,after,before): 4 | self.after = after 5 | self.before = before 6 | def process(self): 7 | print("start") 8 | -------------------------------------------------------------------------------- /IRMAD.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy.linalg import inv, sqrtm, eig 3 | import matplotlib.pyplot as pyplot 4 | from sklearn.cluster import KMeans 5 | class IRMAD: 6 | def __init__(self,after,before): 7 | self.after = after 8 | self.before = before 9 | def process(self): 10 | after = self.after 11 | before = self.before 12 | (rows, cols, bands) = after.shape 13 | 14 | 15 | 16 | after = np.transpose(np.reshape(after, (rows * cols, bands)), (1, 0)) 17 | before = np.transpose(np.reshape(before, (rows * cols, bands)), (1, 0)) 18 | 19 | after_mean = np.mean(after, axis=1) 20 | after_var = np.std(after, axis=1) 21 | before_mean = np.mean(before, axis=1) 22 | before_var = np.std(before, axis=1) 23 | 24 | # for i in range(bands): 25 | # #test = after[:, i] - after_mean[i] 26 | # after[i,:] = (after[i,:]-after_mean[i])/after_var[i] 27 | # before[i,:] = (before[i,:]-before_mean[i])/before_var[i] 28 | 29 | cov_aa_mari = np.cov(after) 30 | cov_aa_mat_i = np.linalg.inv(cov_aa_mari) 31 | con_cov = np.cov(after, before) 32 | cov_xx = con_cov[0:bands, 0:bands] 33 | cov_xy = con_cov[0:bands, bands:] 34 | cov_yx = con_cov[bands:, 0:bands] 35 | cov_yy = con_cov[bands:, bands:] 36 | # yy_cov = np.cov(before) 37 | A = inv(cov_xx) @ cov_xy @ inv(cov_yy) @ cov_yx 38 | B = inv(cov_yy) @ cov_yx @ inv(cov_xx) @ cov_xy # 与A特征值相同,但特征向量不同 39 | 40 | # A的特征值与特征向量 av 特征值, ad 特征向量 41 | [av, ad] = eig(A) 42 | 43 | # 对特征值从小到大排列 与 CCA相反 44 | swap_av_index = np.argsort(av) 45 | swap_av = av[swap_av_index[:av.size:1]] 46 | swap_ad = ad[swap_av_index[:av.size:1], :] 47 | 48 | # 满足st 条件 49 | ma = inv(sqrtm(swap_ad.T @ cov_xx @ swap_ad)) # 条件一 50 | 51 | swap_ad = swap_ad @ ma 52 | 53 | # 对应b的值 54 | [bv, bd] = eig(B) 55 | swap_bv = bv[swap_av_index[:bv.size:1]] 56 | swap_bd = bd[swap_av_index[:bd.size:1]] 57 | mb = inv(sqrtm(swap_bd.T @ cov_yy @ swap_bd)) # 条件二 58 | 59 | swap_bd = swap_bd @ mb 60 | # ab = np.linalg.inv(cov_yy) @ cov_yx @ swap_ad 61 | # bb = np.linalg.inv() 62 | 63 | MAD = swap_ad.T @ after - (swap_bd.T @ before) 64 | [i, j] = MAD.shape 65 | var_mad = np.zeros(i) 66 | for k in range(i): 67 | var_mad[k] = np.var(MAD[k]) 68 | var_mad = np.transpose(np.matlib.repmat(var_mad, j, 1), (1, 0)) 69 | res = MAD * MAD / var_mad 70 | T = res.sum(axis=0) 71 | # T = np.zeros(j) 72 | # #for row in range(j): 73 | # sum = 0. 74 | # for col in range(i): 75 | # sum = np.sum(np.square(MAD[col,:] / np.var(MAD[col]))) 76 | # T[i] = sum 77 | # Kmeans 聚类 78 | 79 | 80 | re = np.reshape(T, (j, 1)) 81 | kmeans = KMeans(n_clusters=2, random_state=0).fit(re) 82 | img = np.reshape(kmeans.labels_, (rows, cols,)) 83 | center = kmeans.cluster_centers_ 84 | pyplot.imshow(np.uint8(img)) 85 | pyplot.show() 86 | # scipy.misc.imsave('c.jpg', img) 87 | print(center) 88 | import cv2 89 | if __name__ == "__main__": 90 | 91 | after = cv2.imread("../../data/abudhabi_8_after.png") 92 | before = cv2.imread("../../data/abudhabi_8_before.png") 93 | irmad = IRMAD(after,before) 94 | irmad.process() -------------------------------------------------------------------------------- /MAD.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy.linalg import inv, sqrtm, eig 3 | import matplotlib.pyplot as pyplot 4 | import numpy.matlib as nplib 5 | class MAD: 6 | def __init__(self,after,before): 7 | print(print(np.__version__)) 8 | self.after = after 9 | self.before = before 10 | def propess(self): 11 | #进行相应的处理计算 12 | # dataset_after = gdal.Open( 13 | # r"F:\deeplearndata\rssrai2019_change_detection\train\train\img_2017\image_2017_960_960_8.tif") 14 | # im_width = dataset_after.RasterXSize # 栅格矩阵的列数 15 | # im_height = dataset_after.RasterYSize # 栅格矩阵的行数 16 | # im_bands = dataset_after.RasterCount # 波段数 17 | # after = np.transpose(dataset_after.ReadAsArray(0, 0, im_width, im_height), (1, 2, 0)) # 获取数据 18 | # 19 | # dataset_before = gdal.Open( 20 | # r"F:\deeplearndata\rssrai2019_change_detection\train\train\img_2018\image_2018_960_960_8.tif") 21 | # im_width = dataset_before.RasterXSize # 栅格矩阵的列数 22 | # im_height = dataset_before.RasterYSize # 栅格矩阵的行数 23 | # im_bands = dataset_before.RasterCount # 波段数 24 | # before = np.transpose(dataset_before.ReadAsArray(0, 0, im_width, im_height), (1, 2, 0)) # 获取数据 25 | after = self.after 26 | before = self.before 27 | (rows, cols, bands) = after.shape 28 | 29 | # 归一化处理 30 | 31 | after = np.transpose(np.reshape(after, (rows * cols, bands)), (1, 0)) 32 | before = np.transpose(np.reshape(before, (rows * cols, bands)), (1, 0)) 33 | 34 | after_mean = np.mean(after, axis=1) 35 | after_var = np.std(after, axis=1) 36 | before_mean = np.mean(before, axis=1) 37 | before_var = np.std(before, axis=1) 38 | 39 | for i in range(bands): 40 | #test = after[:, i] - after_mean[i] 41 | after[i,:] = (after[i,:]-after_mean[i])/after_var[i] 42 | before[i,:] = (before[i,:]-before_mean[i])/before_var[i] 43 | 44 | cov_aa_mari = np.cov(after) 45 | cov_aa_mat_i = np.linalg.inv(cov_aa_mari) 46 | con_cov = np.cov(after, before) 47 | cov_xx = con_cov[0:bands, 0:bands] 48 | cov_xy = con_cov[0:bands, bands:] 49 | cov_yx = con_cov[bands:, 0:bands] 50 | cov_yy = con_cov[bands:, bands:] 51 | # yy_cov = np.cov(before) 52 | A = inv(cov_xx) @ cov_xy @ inv(cov_yy) @ cov_yx 53 | B = inv(cov_yy) @ cov_yx @ inv(cov_xx) @ cov_xy # 与A特征值相同,但特征向量不同 54 | 55 | # A的特征值与特征向量 av 特征值, ad 特征向量 56 | [av, ad] = eig(A) 57 | 58 | # 对特征值从小到大排列 与 CCA相反 59 | swap_av_index = np.argsort(av) 60 | swap_av = av[swap_av_index[:av.size:1]] 61 | swap_ad = ad[swap_av_index[:av.size:1], :] 62 | 63 | # 满足st 条件 64 | ma = inv(sqrtm(swap_ad.T @ cov_xx @ swap_ad)) # 条件一 65 | 66 | swap_ad = swap_ad @ ma 67 | 68 | # 对应b的值 69 | [bv, bd] = eig(B) 70 | swap_bv = bv[swap_av_index[:bv.size:1]] 71 | swap_bd = bd[swap_av_index[:bd.size:1]] 72 | mb = inv(sqrtm(swap_bd.T @ cov_yy @ swap_bd)) # 条件二 73 | 74 | swap_bd = swap_bd @ mb 75 | # ab = np.linalg.inv(cov_yy) @ cov_yx @ swap_ad 76 | # bb = np.linalg.inv() 77 | 78 | MAD = swap_ad.T @ after - (swap_bd.T @ before) 79 | [i, j] = MAD.shape 80 | var_mad = np.zeros(i) 81 | for k in range(i): 82 | var_mad[k] = np.var(MAD[k]) 83 | 84 | var_mad = np.transpose(nplib.repmat(var_mad, j, 1), (1, 0)) 85 | res = MAD * MAD / var_mad 86 | T = res.sum(axis=0) 87 | # T = np.zeros(j) 88 | # #for row in range(j): 89 | # sum = 0. 90 | # for col in range(i): 91 | # sum = np.sum(np.square(MAD[col,:] / np.var(MAD[col]))) 92 | # T[i] = sum 93 | # Kmeans 聚类 94 | from sklearn.cluster import KMeans 95 | 96 | re = np.reshape(T, (j, 1)) 97 | kmeans = KMeans(n_clusters=2, random_state=0).fit(re) 98 | img = np.reshape(kmeans.labels_, (rows, cols,)) 99 | center = kmeans.cluster_centers_ 100 | pyplot.imshow(np.uint8(img)) 101 | pyplot.show() 102 | # scipy.misc.imsave('c.jpg', img) 103 | print(center) 104 | import gdal 105 | if __name__=="__main__": 106 | dataset_after = gdal.Open(r"F:\变化检测数据\A1_clip.tif") 107 | im_width = dataset_after.RasterXSize # 栅格矩阵的列数 108 | im_height = dataset_after.RasterYSize # 栅格矩阵的行数 109 | im_bands = dataset_after.RasterCount # 波段数 110 | after = np.transpose(dataset_after.ReadAsArray(0, 0, im_width, im_height), (1, 2, 0))[0:5558, 0:5314] # 获取数据 111 | 112 | dataset_before = gdal.Open(r"F:\变化检测数据\B1_clip.tif") 113 | im_width = dataset_before.RasterXSize # 栅格矩阵的列数 114 | im_height = dataset_before.RasterYSize # 栅格矩阵的行数 115 | im_bands = dataset_before.RasterCount # 波段数 116 | before = np.transpose(dataset_before.ReadAsArray(0, 0, im_width, im_height), (1, 2, 0))[0:5558, 0:5314] # 获取数据 117 | mad = MAD(after,before) 118 | mad.propess() -------------------------------------------------------------------------------- /PCA.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from sklearn.decomposition import PCA 3 | from sklearn.cluster import KMeans 4 | import cv2 5 | import matplotlib.pyplot as pyplot 6 | class PCA_CD: 7 | def __init__(self, after, before): 8 | self.after = after 9 | self.before = before 10 | def process(self): 11 | diff = self.after - self.before 12 | [w,h,d] = diff.shape 13 | diff = diff.reshape((w*h,d)) 14 | print(diff) 15 | pca = PCA(n_components=0.75) 16 | data = pca.fit_transform(diff) 17 | k_data = KMeans(n_clusters=2, random_state=9).fit_predict(data) 18 | print(k_data) 19 | print(data) 20 | img = np.reshape(k_data, (w, h,)) 21 | 22 | pyplot.imshow(np.uint8(img)) 23 | pyplot.show() 24 | # scipy.misc.imsave('c.jpg', img) 25 | 26 | 27 | 28 | import gdal 29 | if __name__ == "__main__": 30 | # after = cv2.imread("../../data/abudhabi_8_after.png") 31 | # before = cv2.imread("../../data/abudhabi_8_before.png") 32 | dataset_after = gdal.Open(r"F:\变化检测数据\A1_clip.tif") 33 | im_width = dataset_after.RasterXSize # 栅格矩阵的列数 34 | im_height = dataset_after.RasterYSize # 栅格矩阵的行数 35 | im_bands = dataset_after.RasterCount # 波段数 36 | after = np.transpose(dataset_after.ReadAsArray(0, 0, im_width, im_height),(1,2,0))[0:5558,0:5314] # 获取数据 37 | 38 | dataset_before = gdal.Open(r"F:\变化检测数据\B1_clip.tif") 39 | im_width = dataset_before.RasterXSize # 栅格矩阵的列数 40 | im_height = dataset_before.RasterYSize # 栅格矩阵的行数 41 | im_bands = dataset_before.RasterCount # 波段数 42 | before = np.transpose(dataset_before.ReadAsArray(0, 0, im_width, im_height),(1,2,0))[0:5558,0:5314] # 获取数据 43 | 44 | 45 | pca = PCA_CD(after, before) 46 | pca.process() 47 | 48 | 49 | -------------------------------------------------------------------------------- /SFA.py: -------------------------------------------------------------------------------- 1 | # 慢特征图像变化检测 2 | import numpy as np 3 | import cv2 4 | from scipy.linalg import inv, sqrtm, eig 5 | import matplotlib.pyplot as pyplot 6 | 7 | 8 | class SFA: 9 | def __init__(self, after, before): 10 | self.after = after 11 | self.before = before 12 | self.shape = None 13 | 14 | def process(self): 15 | after = self.after 16 | before = self.before 17 | 18 | self.shape = after.shape 19 | (rows, cols, bands) = self.shape 20 | 21 | # 归一化处理 22 | 23 | after = np.transpose(np.reshape(after, (rows * cols, bands)), (1, 0)) 24 | before = np.transpose(np.reshape(before, (rows * cols, bands)), (1, 0)) 25 | 26 | # 执行标准化程序 27 | after = self.standardization(after) 28 | before = self.standardization(before) 29 | e = np.cov(after - before) 30 | sum1 = np.sum((after[0] - before[0]) @ (after[0] - before[0]).T) / (rows * cols) 31 | sum2 = np.sum((after[1] - before[1]) @ (after[1] - before[1]).T) / (rows * cols) 32 | print(after) 33 | e_x = np.cov(after) 34 | e_y = np.cov(before) 35 | B = 1 / 2 * (e_x + e_y) 36 | # 特征值与特征向量 37 | print(B) 38 | 39 | (value, vector) = eig(np.linalg.inv(B) @ e) 40 | SFA = (vector @ after) - (vector @ before) 41 | print(SFA) 42 | tr_sfa = np.transpose(SFA, (1, 0)) 43 | from sklearn.cluster import KMeans 44 | 45 | # re = np.reshape(T, (j, 1)) 46 | kmeans = KMeans(n_clusters=2, random_state=0).fit(tr_sfa[:,1]) 47 | img = np.reshape(kmeans.labels_, (rows, cols,)) 48 | center = kmeans.cluster_centers_ 49 | #tr_sfa = np.reshape(tr_sfa, (rows,cols,3)) 50 | pyplot.imshow(img) 51 | pyplot.show() 52 | 53 | def standardization(self, data): 54 | (rows, cols, bands) = self.shape 55 | data_mean = np.mean(data, axis=1) 56 | data_var = np.var(data, axis=1) 57 | data_mean_repeat = np.tile(data_mean, (rows * cols, 1)).transpose(1, 0) 58 | data_var_repeat = np.tile(data_var, (rows * cols, 1)).transpose(1, 0) 59 | data = (data - data_mean_repeat) / data_var_repeat 60 | print(data) 61 | return data 62 | 63 | import gdal 64 | if __name__ == "__main__": 65 | after = cv2.imread("../../data/abudhabi_8_after.png") 66 | before = cv2.imread("../../data/abudhabi_8_before.png") 67 | 68 | # dataset_after = gdal.Open(r"F:\deeplearndata\rssrai2019_change_detection\train\train\img_2017\image_2017_960_960_1.tif") 69 | # im_width = dataset_after.RasterXSize # 栅格矩阵的列数 70 | # im_height = dataset_after.RasterYSize # 栅格矩阵的行数 71 | # im_bands = dataset_after.RasterCount # 波段数 72 | # after = np.transpose(dataset_after.ReadAsArray(0, 0, im_width, im_height),(1,2,0)) # 获取数据 73 | # 74 | # dataset_before = gdal.Open(r"F:\deeplearndata\rssrai2019_change_detection\train\train\img_2018\image_2018_960_960_1.tif") 75 | # im_width = dataset_before.RasterXSize # 栅格矩阵的列数 76 | # im_height = dataset_before.RasterYSize # 栅格矩阵的行数 77 | # im_bands = dataset_before.RasterCount # 波段数 78 | # before = np.transpose(dataset_before.ReadAsArray(0, 0, im_width, im_height),(1,2,0)) # 获取数据 79 | sfa = SFA(after, before) 80 | sfa.process() 81 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuxgis/changeDetection/96cc04c3e41f24cf2aa03ed98016687eea2c5e51/__init__.py -------------------------------------------------------------------------------- /deeplearning/Loss/BalanceBCELoss.py: -------------------------------------------------------------------------------- 1 | """ 2 | @author:yux 3 | @email:yuxer@qq.com 4 | @time:2020/4/2 21:21 5 | @description: 平衡BCE损失 6 | """ 7 | import torch.nn as nn 8 | import torch 9 | class BalanceBCELoss(nn.Module): 10 | def __init__(self): 11 | super(BalanceBCELoss, self).__init__() 12 | def forward(self,predict,target): 13 | #计算权重 14 | # 计算target 的变化与非变化比例 15 | pix_weight = torch.rand_like(target) 16 | for i, batch in enumerate(target): 17 | zero_count = torch.sum(batch == 0) 18 | one_count = torch.sum(batch == 1) 19 | zero_weight = zero_count.float() / (zero_count + one_count) 20 | one_weight = one_count.float() / (zero_count + one_count) 21 | pix_weight[i][batch == 0.] = one_weight 22 | pix_weight[i][batch == 1.] = zero_weight 23 | loss = nn.BCELoss(weight=pix_weight)(predict,target) 24 | return loss 25 | -------------------------------------------------------------------------------- /deeplearning/Loss/CustomLoss.py: -------------------------------------------------------------------------------- 1 | """ 2 | @author:yux 3 | @email:yuxer@qq.com 4 | @time:2020/4/6 20:19 5 | @description: 最终损失 6 | """ 7 | import torch 8 | import torch.nn as nn 9 | from Loss.DiceLoss import DiceLoss 10 | from Loss.BalanceBCELoss import BalanceBCELoss 11 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 12 | class CustomLoss(nn.Module): 13 | def __init__(self,nmn): 14 | super(CustomLoss, self).__init__() 15 | self.namna = nmn 16 | def forward(self,predict_array,target):#含有batch 17 | final_loss = torch.zeros([1]).to(device) 18 | layer_weight = [0.5, 0.5, 0.75, 0.5, 1.0] 19 | for i,predict in enumerate(predict_array): 20 | #分别计算损失 21 | predict = torch.sigmoid(predict) 22 | 23 | balance_loss = BalanceBCELoss()(predict,target) 24 | dice_loss = DiceLoss()(predict,target)*self.namna 25 | loss = (balance_loss+dice_loss)*layer_weight[i] 26 | 27 | final_loss += loss 28 | return final_loss 29 | 30 | 31 | -------------------------------------------------------------------------------- /deeplearning/Loss/DiceLoss.py: -------------------------------------------------------------------------------- 1 | """ 2 | @author:yux 3 | @email:yuxer@qq.com 4 | @time:2020/4/3 19:35 5 | @description: Dice Loss 损失 6 | """ 7 | import torch 8 | import torch.nn as nn 9 | import torch.nn.functional as F 10 | class DiceLoss(nn.Module): 11 | def __init__(self): 12 | super(DiceLoss, self).__init__() 13 | def forward(self,predict,target): 14 | num = predict.size(0) 15 | smooth = 1 16 | weight = 0.5 17 | probs = torch.sigmoid(predict) 18 | p = probs.view(num,-1) #flat 操作 19 | t = target.view(num,-1) 20 | intersection = p * t 21 | score = 2. * (intersection.sum(1)+smooth)/(p.sum(1)+weight*t.sum(1) + smooth) 22 | return score 23 | 24 | -------------------------------------------------------------------------------- /deeplearning/Loss/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | @author:yux 3 | @email:yuxer@qq.com 4 | @time:2020/4/2 21:16 5 | @description: 损失函数集合 6 | """ 7 | -------------------------------------------------------------------------------- /deeplearning/MxNet.py: -------------------------------------------------------------------------------- 1 | # @Time : 2019/12/12 12:24 2 | # @Author : yux 3 | # @Content : 混合Unet模型 4 | 5 | import torch.nn as nn 6 | import torch 7 | import numpy as np 8 | from torch import autograd 9 | 10 | 11 | class DoubleConv(nn.Module): 12 | def __init__(self, in_ch, out_ch): 13 | super(DoubleConv, self).__init__() 14 | self.conv = nn.Sequential( 15 | nn.Conv2d(in_ch, out_ch, 3, padding=1), 16 | nn.BatchNorm2d(out_ch), 17 | #nn.Dropout2d(p=0.2), 18 | nn.ReLU(inplace=True), 19 | nn.Conv2d(out_ch, out_ch, 3, padding=1), 20 | nn.BatchNorm2d(out_ch), 21 | #nn.Dropout2d(p=0.2), 22 | nn.ReLU(inplace=True) 23 | ) 24 | 25 | def forward(self, input): 26 | return self.conv(input) 27 | 28 | 29 | class MxUnet(nn.Module): 30 | def __init__(self, in_ch, out_ch): 31 | super(MxUnet, self).__init__() 32 | 33 | self.conv1 = DoubleConv(in_ch, 64) 34 | 35 | self.catconv = DoubleConv(2, 64) 36 | self.pool1 = nn.MaxPool2d(2) 37 | self.conv2 = DoubleConv(64, 128) 38 | self.pool2 = nn.MaxPool2d(2) 39 | self.conv3 = DoubleConv(128, 256) 40 | self.pool3 = nn.MaxPool2d(2) 41 | self.conv4 = DoubleConv(256, 512) 42 | self.pool4 = nn.MaxPool2d(2) 43 | self.conv5 = DoubleConv(512, 1024) 44 | self.up6 = nn.ConvTranspose2d(1024, 512, 2, stride=2) 45 | self.conv6 = DoubleConv(1536, 512) 46 | self.up7 = nn.ConvTranspose2d(512, 256, 2, stride=2) 47 | self.conv7 = DoubleConv(256*3, 256) 48 | self.up8 = nn.ConvTranspose2d(256, 128, 2, stride=2) 49 | self.conv8 = DoubleConv(128*3, 128) 50 | self.up9 = nn.ConvTranspose2d(128, 64, 2, stride=2) 51 | self.conv9 = DoubleConv(64*3, 64) 52 | 53 | #self.conv10 = nn.Conv2d(64,out_ch, 1) 54 | 55 | self.allmask = nn.Sequential( 56 | nn.Conv2d(128,out_ch,1), 57 | nn.BatchNorm2d(out_ch), 58 | nn.Sigmoid() 59 | ) 60 | #self.allmask = nn.Conv2d(128,out_ch,1) 61 | 62 | def forward(self, x1, x2): 63 | # before 以b开头 64 | bconv1 = self.conv1(x1) 65 | bpool1 = nn.MaxPool2d(2)(bconv1) 66 | bconv2 = self.conv2(bpool1) 67 | bpool2 = nn.MaxPool2d(2)(bconv2) 68 | bconv3 = self.conv3(bpool2) 69 | bpool3 = nn.MaxPool2d(2)(bconv3) 70 | bconv4 = self.conv4(bpool3) 71 | bpool4 = nn.MaxPool2d(2)(bconv4) 72 | bconv5 = self.conv5(bpool4) 73 | bup1 = self.up6(bconv5) 74 | 75 | # after 以a开头 76 | aconv1 = self.conv1(x2) 77 | apool1 = nn.MaxPool2d(2)(aconv1) 78 | aconv2 = self.conv2(apool1) 79 | apool2 = nn.MaxPool2d(2)(aconv2) 80 | aconv3 = self.conv3(apool2) 81 | apool3 = nn.MaxPool2d(2)(aconv3) 82 | aconv4 = self.conv4(apool3) 83 | apool4 = nn.MaxPool2d(2)(aconv4) 84 | aconv5 = self.conv5(apool4) 85 | aup1 = self.up6(aconv5) 86 | 87 | # before 以b开头 88 | bconcat1 = torch.cat([bup1, bconv4, aconv4], dim=1) 89 | bconv6 = self.conv6(bconcat1) 90 | bup2 = self.up7(bconv6) 91 | bconcat2 = torch.cat([bup2,bconv3,aconv3],dim=1) 92 | bconv7 = self.conv7(bconcat2) 93 | bup3 = self.up8(bconv7) 94 | bconcat3 = torch.cat([bup3,bconv2,aconv2],dim=1) 95 | bconv8 = self.conv8(bconcat3) 96 | bup4 = self.up9(bconv8) 97 | bconcat4 = torch.cat([bup4,bconv1,aconv1],dim = 1) 98 | bconv9 = self.conv9(bconcat4) 99 | 100 | # after 以a开头 101 | aconcat1 = torch.cat([aup1, aconv4, bconv4], dim=1) 102 | aconv6 = self.conv6(aconcat1) 103 | aup2 = self.up7(aconv6) 104 | aconcat2 = torch.cat([aup2, aconv3, bconv3], dim=1) 105 | aconv7 = self.conv7(aconcat2) 106 | aup3 = self.up8(aconv7) 107 | aconcat3 = torch.cat([aup3, aconv2, bconv2], dim=1) 108 | aconv8 = self.conv8(aconcat3) 109 | aup4 = self.up9(aconv8) 110 | aconcat4 = torch.cat([aup4, aconv1, bconv1], dim=1) 111 | aconv9 = self.conv9(aconcat4) 112 | 113 | 114 | # before after 合并 115 | 116 | dcat1 = torch.cat([bconv9,aconv9],dim=1) 117 | result = self.allmask(dcat1) 118 | result = nn.Sigmoid()(result) 119 | #print(result.size()) 120 | return result 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | -------------------------------------------------------------------------------- /deeplearning/NestUnet.py: -------------------------------------------------------------------------------- 1 | # @Time : 2019/12/25 11:12 2 | # @Author : yux 3 | # @Content : 4 | 5 | import torch 6 | import torch.nn as nn 7 | #from Mish import Mish 8 | class conv_block_nested(nn.Module): 9 | 10 | def __init__(self, in_ch, mid_ch, out_ch): 11 | super(conv_block_nested, self).__init__() 12 | self.activation = nn.ReLU(inplace=True) 13 | #self.activation = Mish() 14 | self.conv1 = nn.Conv2d(in_ch, mid_ch, kernel_size=3, padding=1, bias=True) 15 | self.bn1 = nn.BatchNorm2d(mid_ch) 16 | self.conv2 = nn.Conv2d(mid_ch, out_ch, kernel_size=3, padding=1, bias=True) 17 | self.bn2 = nn.BatchNorm2d(out_ch) 18 | 19 | def forward(self, x): 20 | x = self.conv1(x) 21 | x = self.bn1(x) 22 | x = self.activation(x) 23 | 24 | x = self.conv2(x) 25 | x = self.bn2(x) 26 | output = self.activation(x) 27 | 28 | return output 29 | 30 | class NestUnet(nn.Module): 31 | 32 | def __init__(self, in_ch=6, out_ch=1): 33 | super(NestUnet, self).__init__() 34 | 35 | n1 = 64 36 | filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16] 37 | 38 | self.pool = nn.MaxPool2d(kernel_size=2, stride=2) 39 | self.Up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) 40 | 41 | self.conv0_0 = conv_block_nested(in_ch, filters[0], filters[0]) 42 | self.conv1_0 = conv_block_nested(filters[0], filters[1], filters[1]) 43 | self.conv2_0 = conv_block_nested(filters[1], filters[2], filters[2]) 44 | self.conv3_0 = conv_block_nested(filters[2], filters[3], filters[3]) 45 | self.conv4_0 = conv_block_nested(filters[3], filters[4], filters[4]) 46 | 47 | self.conv0_1 = conv_block_nested(filters[0] + filters[1], filters[0], filters[0]) 48 | self.conv1_1 = conv_block_nested(filters[1] + filters[2], filters[1], filters[1]) 49 | self.conv2_1 = conv_block_nested(filters[2] + filters[3], filters[2], filters[2]) 50 | self.conv3_1 = conv_block_nested(filters[3] + filters[4], filters[3], filters[3]) 51 | 52 | self.conv0_2 = conv_block_nested(filters[0]*2 + filters[1], filters[0], filters[0]) 53 | self.conv1_2 = conv_block_nested(filters[1]*2 + filters[2], filters[1], filters[1]) 54 | self.conv2_2 = conv_block_nested(filters[2]*2 + filters[3], filters[2], filters[2]) 55 | 56 | self.conv0_3 = conv_block_nested(filters[0]*3 + filters[1], filters[0], filters[0]) 57 | self.conv1_3 = conv_block_nested(filters[1]*3 + filters[2], filters[1], filters[1]) 58 | 59 | self.conv0_4 = conv_block_nested(filters[0]*4 + filters[1], filters[0], filters[0]) 60 | 61 | self.final = nn.Sequential(nn.Conv2d(filters[0], out_ch, kernel_size=1),nn.Sigmoid()) 62 | 63 | def forward(self, x, y): 64 | 65 | 66 | x0_0 = self.conv0_0(x) 67 | x1_0 = self.conv1_0(self.pool(x0_0)) 68 | x0_1 = self.conv0_1(torch.cat([x0_0, self.Up(x1_0)], 1)) 69 | 70 | x2_0 = self.conv2_0(self.pool(x1_0)) 71 | x1_1 = self.conv1_1(torch.cat([x1_0, self.Up(x2_0)], 1)) 72 | x0_2 = self.conv0_2(torch.cat([x0_0, x0_1, self.Up(x1_1)], 1)) 73 | 74 | x3_0 = self.conv3_0(self.pool(x2_0)) 75 | x2_1 = self.conv2_1(torch.cat([x2_0, self.Up(x3_0)], 1)) 76 | #x1_2 = self.conv1_2(torch.cat([x1_0, x1_1, self.Up(x2_1)], 1)) 77 | #x0_3 = self.conv0_3(torch.cat([x0_0, x0_1, x0_2, self.Up(x1_2)], 1)) 78 | 79 | x4_0 = self.conv4_0(self.pool(x3_0)) 80 | #x3_1 = self.conv3_1(torch.cat([x3_0, self.Up(x4_0)], 1)) 81 | #x2_2 = self.conv2_2(torch.cat([x2_0, x2_1, self.Up(x3_1)], 1)) 82 | #x1_3 = self.conv1_3(torch.cat([x1_0, x1_1, x1_2, self.Up(x2_2)], 1)) 83 | #x0_4 = self.conv0_4(torch.cat([x0_0, x0_1, x0_2, x0_3, self.Up(x1_3)], 1)) 84 | 85 | 86 | 87 | 88 | y0_0 = self.conv0_0(y) 89 | y1_0 = self.conv1_0(self.pool(y0_0)) 90 | y0_1 = self.conv0_1(torch.cat([y0_0, self.Up(y1_0)], 1)) 91 | 92 | y2_0 = self.conv2_0(self.pool(y1_0)) 93 | y1_1 = self.conv1_1(torch.cat([y1_0, self.Up(y2_0)], 1)) 94 | y0_2 = self.conv0_2(torch.cat([y0_0, y0_1, self.Up(y1_1)], 1)) 95 | 96 | y3_0 = self.conv3_0(self.pool(y2_0)) 97 | y2_1 = self.conv2_1(torch.cat([y2_0, self.Up(y3_0)], 1)) 98 | #y1_2 = self.conv1_2(torch.cat([y1_0, y1_1, self.Up(y2_1)], 1)) 99 | #y0_3 = self.conv0_3(torch.cat([y0_0, y0_1, y0_2, self.Up(y1_2)], 1)) 100 | 101 | y4_0 = self.conv4_0(self.pool(y3_0)) 102 | #y3_1 = self.conv3_1(torch.cat([y3_0, self.Up(y4_0)], 1)) 103 | #y2_2 = self.conv2_2(torch.cat([y2_0, y2_1, self.Up(y3_1)], 1)) 104 | #y1_3 = self.conv1_3(torch.cat([y1_0, y1_1, y1_2, self.Up(y2_2)], 1)) 105 | #y0_4 = self.conv0_4(torch.cat([y0_0, y0_1, y0_2, y0_3, self.Up(y1_3)], 1)) 106 | 107 | 108 | 109 | #相互融合 110 | xy0_0 = torch.abs(x0_0-y0_0) 111 | xy1_0 = torch.abs(x1_0-y1_0) 112 | xy2_0 = torch.abs(x2_0-y2_0) 113 | xy3_0 = torch.abs(x3_0-y3_0) 114 | 115 | xy0_1 = torch.abs(x0_1-y0_1) 116 | xy0_2 = torch.abs(x0_2-y0_2) 117 | xy1_1 = torch.abs(x1_1-y1_1) 118 | xy2_1 = torch.abs(x2_1-y2_1) 119 | xy4_0 = torch.abs(x4_0-y4_0) 120 | 121 | xy1_2 = self.conv1_2(torch.cat([xy1_0, xy1_1, self.Up(xy2_1)], 1)) 122 | xy3_1 = self.conv3_1(torch.cat([xy3_0, self.Up(xy4_0)], 1)) 123 | xy0_3 = self.conv0_3(torch.cat([xy0_0, xy0_1, xy0_2, self.Up(xy1_2)], 1)) 124 | xy2_2 = self.conv2_2(torch.cat([xy2_0, xy2_1, self.Up(xy3_1)], 1)) 125 | xy1_3 = self.conv1_3(torch.cat([xy1_0, xy1_1, xy1_2, self.Up(xy2_2)], 1)) 126 | xy0_4 = self.conv0_4(torch.cat([xy0_0, xy0_1, xy0_2, xy0_3, self.Up(xy1_3)], 1)) 127 | 128 | 129 | 130 | 131 | 132 | 133 | output = self.final(xy0_4) 134 | return output -------------------------------------------------------------------------------- /deeplearning/RemoteImageDataset.py: -------------------------------------------------------------------------------- 1 | """ 2 | @author:yux 3 | @email:yuxer@qq.com 4 | @time:2020/4/2 14:25 5 | @description: 遥感影像数据加载 6 | """ 7 | 8 | from torch.utils.data import Dataset 9 | import glob 10 | import torch 11 | from PIL import Image 12 | 13 | from torchvision import transforms 14 | #收集文件名 15 | 16 | 17 | #读取GAN数据集 18 | class CollectFile(): 19 | def __init__(self,path): 20 | self.before_file_list = glob.glob(path+"/A/*.jpg") 21 | self.after_file_list = glob.glob(path+"/B/*.jpg") 22 | self.result_file_list = glob.glob(path+"/OUT/*.jpg") 23 | 24 | def __getitem__(self,index): 25 | return (self.before_file_list[index],self.after_file_list[index],self.result_file_list[index]) 26 | 27 | def __len__(self): 28 | return len(self.result_file_list) 29 | 30 | #OSCD数据集加载 31 | class OscdFile(): 32 | def __init__(self,root): 33 | self.before_file_list = glob.glob(root + r"/*before.png") 34 | self.after_file_list = glob.glob(root + r"/*after.png") 35 | self.mask_file_list = glob.glob(root + r"/*mask.png") 36 | 37 | def __getitem__(self, index): 38 | return (self.before_file_list[index], self.after_file_list[index], self.mask_file_list[index]) 39 | 40 | def __len__(self): 41 | return len(self.mask_file_list) 42 | 43 | class RemoteImageDataset(Dataset): 44 | def __init__(self,path): 45 | super(RemoteImageDataset).__init__() 46 | #self.file_list = CollectFile(path) 47 | self.file_list = OscdFile(path) 48 | def __len__(self): 49 | return len(self.file_list) 50 | def __getitem__(self,index): 51 | #print(index) 52 | image_arrays = torch.FloatTensor(1,3).zero_() 53 | 54 | 55 | #image_arrays[i] = Image.open(data[index]) 56 | #print(self.file_list[index]) 57 | before = transforms.ToTensor()(Image.open(self.file_list[index][0])) 58 | after = transforms.ToTensor()(Image.open(self.file_list[index][1])) 59 | change = transforms.ToTensor()(Image.open(self.file_list[index][2])) 60 | # n = before.numpy() 61 | # c = change.numpy() 62 | 63 | return before, after, change 64 | 65 | from torch.utils.data import DataLoader 66 | if __name__ == "__main__": 67 | loader = DataLoader(RemoteImageDataset(r"E:\迅雷下载\ChangeDetectionDataset\ChangeDetectionDataset\Real\subset\train"),batch_size=4) 68 | #print(iter(loader)) 69 | 70 | for a,b,c in loader: 71 | print(a) 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | -------------------------------------------------------------------------------- /deeplearning/Siam_Cat_Unet.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | from torch.nn.modules.padding import ReplicationPad2d 5 | 6 | class SiamUnet_conc(nn.Module): 7 | """SiamUnet_conc segmentation network.""" 8 | 9 | def __init__(self, input_nbr, label_nbr): 10 | super(SiamUnet_conc, self).__init__() 11 | 12 | self.input_nbr = input_nbr 13 | 14 | self.conv11 = nn.Conv2d(input_nbr, 16, kernel_size=3, padding=1) 15 | self.bn11 = nn.BatchNorm2d(16) 16 | self.do11 = nn.Dropout2d(p=0.2) 17 | self.conv12 = nn.Conv2d(16, 16, kernel_size=3, padding=1) 18 | self.bn12 = nn.BatchNorm2d(16) 19 | self.do12 = nn.Dropout2d(p=0.2) 20 | 21 | self.conv21 = nn.Conv2d(16, 32, kernel_size=3, padding=1) 22 | self.bn21 = nn.BatchNorm2d(32) 23 | self.do21 = nn.Dropout2d(p=0.2) 24 | self.conv22 = nn.Conv2d(32, 32, kernel_size=3, padding=1) 25 | self.bn22 = nn.BatchNorm2d(32) 26 | self.do22 = nn.Dropout2d(p=0.2) 27 | 28 | self.conv31 = nn.Conv2d(32, 64, kernel_size=3, padding=1) 29 | self.bn31 = nn.BatchNorm2d(64) 30 | self.do31 = nn.Dropout2d(p=0.2) 31 | self.conv32 = nn.Conv2d(64, 64, kernel_size=3, padding=1) 32 | self.bn32 = nn.BatchNorm2d(64) 33 | self.do32 = nn.Dropout2d(p=0.2) 34 | self.conv33 = nn.Conv2d(64, 64, kernel_size=3, padding=1) 35 | self.bn33 = nn.BatchNorm2d(64) 36 | self.do33 = nn.Dropout2d(p=0.2) 37 | 38 | self.conv41 = nn.Conv2d(64, 128, kernel_size=3, padding=1) 39 | self.bn41 = nn.BatchNorm2d(128) 40 | self.do41 = nn.Dropout2d(p=0.2) 41 | self.conv42 = nn.Conv2d(128, 128, kernel_size=3, padding=1) 42 | self.bn42 = nn.BatchNorm2d(128) 43 | self.do42 = nn.Dropout2d(p=0.2) 44 | self.conv43 = nn.Conv2d(128, 128, kernel_size=3, padding=1) 45 | self.bn43 = nn.BatchNorm2d(128) 46 | self.do43 = nn.Dropout2d(p=0.2) 47 | 48 | self.upconv4 = nn.ConvTranspose2d(128, 128, kernel_size=3, padding=1, stride=2, output_padding=1) 49 | 50 | self.conv43d = nn.ConvTranspose2d(384, 128, kernel_size=3, padding=1) 51 | self.bn43d = nn.BatchNorm2d(128) 52 | self.do43d = nn.Dropout2d(p=0.2) 53 | self.conv42d = nn.ConvTranspose2d(128, 128, kernel_size=3, padding=1) 54 | self.bn42d = nn.BatchNorm2d(128) 55 | self.do42d = nn.Dropout2d(p=0.2) 56 | self.conv41d = nn.ConvTranspose2d(128, 64, kernel_size=3, padding=1) 57 | self.bn41d = nn.BatchNorm2d(64) 58 | self.do41d = nn.Dropout2d(p=0.2) 59 | 60 | self.upconv3 = nn.ConvTranspose2d(64, 64, kernel_size=3, padding=1, stride=2, output_padding=1) 61 | 62 | self.conv33d = nn.ConvTranspose2d(192, 64, kernel_size=3, padding=1) 63 | self.bn33d = nn.BatchNorm2d(64) 64 | self.do33d = nn.Dropout2d(p=0.2) 65 | self.conv32d = nn.ConvTranspose2d(64, 64, kernel_size=3, padding=1) 66 | self.bn32d = nn.BatchNorm2d(64) 67 | self.do32d = nn.Dropout2d(p=0.2) 68 | self.conv31d = nn.ConvTranspose2d(64, 32, kernel_size=3, padding=1) 69 | self.bn31d = nn.BatchNorm2d(32) 70 | self.do31d = nn.Dropout2d(p=0.2) 71 | 72 | self.upconv2 = nn.ConvTranspose2d(32, 32, kernel_size=3, padding=1, stride=2, output_padding=1) 73 | 74 | self.conv22d = nn.ConvTranspose2d(96, 32, kernel_size=3, padding=1) 75 | self.bn22d = nn.BatchNorm2d(32) 76 | self.do22d = nn.Dropout2d(p=0.2) 77 | self.conv21d = nn.ConvTranspose2d(32, 16, kernel_size=3, padding=1) 78 | self.bn21d = nn.BatchNorm2d(16) 79 | self.do21d = nn.Dropout2d(p=0.2) 80 | 81 | self.upconv1 = nn.ConvTranspose2d(16, 16, kernel_size=3, padding=1, stride=2, output_padding=1) 82 | 83 | self.conv12d = nn.ConvTranspose2d(48, 16, kernel_size=3, padding=1) 84 | self.bn12d = nn.BatchNorm2d(16) 85 | self.do12d = nn.Dropout2d(p=0.2) 86 | self.conv11d = nn.ConvTranspose2d(16, label_nbr, kernel_size=3, padding=1) 87 | 88 | self.sm = nn.Sigmoid() 89 | 90 | def forward(self, x1, x2): 91 | 92 | """Forward method.""" 93 | # Stage 1 94 | x11 = self.do11(F.relu(self.bn11(self.conv11(x1)))) 95 | x12_1 = self.do12(F.relu(self.bn12(self.conv12(x11)))) 96 | x1p = F.max_pool2d(x12_1, kernel_size=2, stride=2) 97 | 98 | 99 | # Stage 2 100 | x21 = self.do21(F.relu(self.bn21(self.conv21(x1p)))) 101 | x22_1 = self.do22(F.relu(self.bn22(self.conv22(x21)))) 102 | x2p = F.max_pool2d(x22_1, kernel_size=2, stride=2) 103 | 104 | # Stage 3 105 | x31 = self.do31(F.relu(self.bn31(self.conv31(x2p)))) 106 | x32 = self.do32(F.relu(self.bn32(self.conv32(x31)))) 107 | x33_1 = self.do33(F.relu(self.bn33(self.conv33(x32)))) 108 | x3p = F.max_pool2d(x33_1, kernel_size=2, stride=2) 109 | 110 | # Stage 4 111 | x41 = self.do41(F.relu(self.bn41(self.conv41(x3p)))) 112 | x42 = self.do42(F.relu(self.bn42(self.conv42(x41)))) 113 | x43_1 = self.do43(F.relu(self.bn43(self.conv43(x42)))) 114 | x4p = F.max_pool2d(x43_1, kernel_size=2, stride=2) 115 | 116 | 117 | #################################################### 118 | # Stage 1 119 | x11 = self.do11(F.relu(self.bn11(self.conv11(x2)))) 120 | x12_2 = self.do12(F.relu(self.bn12(self.conv12(x11)))) 121 | x1p = F.max_pool2d(x12_2, kernel_size=2, stride=2) 122 | 123 | # Stage 2 124 | x21 = self.do21(F.relu(self.bn21(self.conv21(x1p)))) 125 | x22_2 = self.do22(F.relu(self.bn22(self.conv22(x21)))) 126 | x2p = F.max_pool2d(x22_2, kernel_size=2, stride=2) 127 | 128 | # Stage 3 129 | x31 = self.do31(F.relu(self.bn31(self.conv31(x2p)))) 130 | x32 = self.do32(F.relu(self.bn32(self.conv32(x31)))) 131 | x33_2 = self.do33(F.relu(self.bn33(self.conv33(x32)))) 132 | x3p = F.max_pool2d(x33_2, kernel_size=2, stride=2) 133 | 134 | # Stage 4 135 | x41 = self.do41(F.relu(self.bn41(self.conv41(x3p)))) 136 | x42 = self.do42(F.relu(self.bn42(self.conv42(x41)))) 137 | x43_2 = self.do43(F.relu(self.bn43(self.conv43(x42)))) 138 | x4p = F.max_pool2d(x43_2, kernel_size=2, stride=2) 139 | 140 | 141 | #################################################### 142 | # Stage 4d 143 | x4d = self.upconv4(x4p) 144 | pad4 = ReplicationPad2d((0, x43_1.size(3) - x4d.size(3), 0, x43_1.size(2) - x4d.size(2))) 145 | x4d = torch.cat((pad4(x4d), x43_1, x43_2), 1) 146 | x43d = self.do43d(F.relu(self.bn43d(self.conv43d(x4d)))) 147 | x42d = self.do42d(F.relu(self.bn42d(self.conv42d(x43d)))) 148 | x41d = self.do41d(F.relu(self.bn41d(self.conv41d(x42d)))) 149 | 150 | # Stage 3d 151 | x3d = self.upconv3(x41d) 152 | pad3 = ReplicationPad2d((0, x33_1.size(3) - x3d.size(3), 0, x33_1.size(2) - x3d.size(2))) 153 | x3d = torch.cat((pad3(x3d), x33_1, x33_2), 1) 154 | x33d = self.do33d(F.relu(self.bn33d(self.conv33d(x3d)))) 155 | x32d = self.do32d(F.relu(self.bn32d(self.conv32d(x33d)))) 156 | x31d = self.do31d(F.relu(self.bn31d(self.conv31d(x32d)))) 157 | 158 | # Stage 2d 159 | x2d = self.upconv2(x31d) 160 | pad2 = ReplicationPad2d((0, x22_1.size(3) - x2d.size(3), 0, x22_1.size(2) - x2d.size(2))) 161 | x2d = torch.cat((pad2(x2d), x22_1, x22_2), 1) 162 | x22d = self.do22d(F.relu(self.bn22d(self.conv22d(x2d)))) 163 | x21d = self.do21d(F.relu(self.bn21d(self.conv21d(x22d)))) 164 | 165 | # Stage 1d 166 | x1d = self.upconv1(x21d) 167 | pad1 = ReplicationPad2d((0, x12_1.size(3) - x1d.size(3), 0, x12_1.size(2) - x1d.size(2))) 168 | x1d = torch.cat((pad1(x1d), x12_1, x12_2), 1) 169 | x12d = self.do12d(F.relu(self.bn12d(self.conv12d(x1d)))) 170 | x11d = self.conv11d(x12d) 171 | 172 | return self.sm(x11d) -------------------------------------------------------------------------------- /deeplearning/Siam_Diff_Unet.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | from torch.nn.modules.padding import ReplicationPad2d 5 | 6 | class SiamUnet_diff(nn.Module): 7 | """SiamUnet_diff segmentation network.""" 8 | 9 | def __init__(self, input_nbr, label_nbr): 10 | super(SiamUnet_diff, self).__init__() 11 | 12 | self.input_nbr = input_nbr 13 | 14 | self.conv11 = nn.Conv2d(input_nbr, 16, kernel_size=3, padding=1) 15 | self.bn11 = nn.BatchNorm2d(16) 16 | self.do11 = nn.Dropout2d(p=0.2) 17 | self.conv12 = nn.Conv2d(16, 16, kernel_size=3, padding=1) 18 | self.bn12 = nn.BatchNorm2d(16) 19 | self.do12 = nn.Dropout2d(p=0.2) 20 | 21 | self.conv21 = nn.Conv2d(16, 32, kernel_size=3, padding=1) 22 | self.bn21 = nn.BatchNorm2d(32) 23 | self.do21 = nn.Dropout2d(p=0.2) 24 | self.conv22 = nn.Conv2d(32, 32, kernel_size=3, padding=1) 25 | self.bn22 = nn.BatchNorm2d(32) 26 | self.do22 = nn.Dropout2d(p=0.2) 27 | 28 | self.conv31 = nn.Conv2d(32, 64, kernel_size=3, padding=1) 29 | self.bn31 = nn.BatchNorm2d(64) 30 | self.do31 = nn.Dropout2d(p=0.2) 31 | self.conv32 = nn.Conv2d(64, 64, kernel_size=3, padding=1) 32 | self.bn32 = nn.BatchNorm2d(64) 33 | self.do32 = nn.Dropout2d(p=0.2) 34 | self.conv33 = nn.Conv2d(64, 64, kernel_size=3, padding=1) 35 | self.bn33 = nn.BatchNorm2d(64) 36 | self.do33 = nn.Dropout2d(p=0.2) 37 | 38 | self.conv41 = nn.Conv2d(64, 128, kernel_size=3, padding=1) 39 | self.bn41 = nn.BatchNorm2d(128) 40 | self.do41 = nn.Dropout2d(p=0.2) 41 | self.conv42 = nn.Conv2d(128, 128, kernel_size=3, padding=1) 42 | self.bn42 = nn.BatchNorm2d(128) 43 | self.do42 = nn.Dropout2d(p=0.2) 44 | self.conv43 = nn.Conv2d(128, 128, kernel_size=3, padding=1) 45 | self.bn43 = nn.BatchNorm2d(128) 46 | self.do43 = nn.Dropout2d(p=0.2) 47 | 48 | self.upconv4 = nn.ConvTranspose2d(128, 128, kernel_size=3, padding=1, stride=2, output_padding=1) 49 | 50 | self.conv43d = nn.ConvTranspose2d(256, 128, kernel_size=3, padding=1) 51 | self.bn43d = nn.BatchNorm2d(128) 52 | self.do43d = nn.Dropout2d(p=0.2) 53 | self.conv42d = nn.ConvTranspose2d(128, 128, kernel_size=3, padding=1) 54 | self.bn42d = nn.BatchNorm2d(128) 55 | self.do42d = nn.Dropout2d(p=0.2) 56 | self.conv41d = nn.ConvTranspose2d(128, 64, kernel_size=3, padding=1) 57 | self.bn41d = nn.BatchNorm2d(64) 58 | self.do41d = nn.Dropout2d(p=0.2) 59 | 60 | self.upconv3 = nn.ConvTranspose2d(64, 64, kernel_size=3, padding=1, stride=2, output_padding=1) 61 | 62 | self.conv33d = nn.ConvTranspose2d(128, 64, kernel_size=3, padding=1) 63 | self.bn33d = nn.BatchNorm2d(64) 64 | self.do33d = nn.Dropout2d(p=0.2) 65 | self.conv32d = nn.ConvTranspose2d(64, 64, kernel_size=3, padding=1) 66 | self.bn32d = nn.BatchNorm2d(64) 67 | self.do32d = nn.Dropout2d(p=0.2) 68 | self.conv31d = nn.ConvTranspose2d(64, 32, kernel_size=3, padding=1) 69 | self.bn31d = nn.BatchNorm2d(32) 70 | self.do31d = nn.Dropout2d(p=0.2) 71 | 72 | self.upconv2 = nn.ConvTranspose2d(32, 32, kernel_size=3, padding=1, stride=2, output_padding=1) 73 | 74 | self.conv22d = nn.ConvTranspose2d(64, 32, kernel_size=3, padding=1) 75 | self.bn22d = nn.BatchNorm2d(32) 76 | self.do22d = nn.Dropout2d(p=0.2) 77 | self.conv21d = nn.ConvTranspose2d(32, 16, kernel_size=3, padding=1) 78 | self.bn21d = nn.BatchNorm2d(16) 79 | self.do21d = nn.Dropout2d(p=0.2) 80 | 81 | self.upconv1 = nn.ConvTranspose2d(16, 16, kernel_size=3, padding=1, stride=2, output_padding=1) 82 | 83 | self.conv12d = nn.ConvTranspose2d(32, 16, kernel_size=3, padding=1) 84 | self.bn12d = nn.BatchNorm2d(16) 85 | self.do12d = nn.Dropout2d(p=0.2) 86 | self.conv11d = nn.ConvTranspose2d(16, label_nbr, kernel_size=3, padding=1) 87 | 88 | self.sm = nn.Sigmoid() 89 | 90 | def forward(self, x1, x2): 91 | 92 | 93 | """Forward method.""" 94 | # Stage 1 95 | x11 = self.do11(F.relu(self.bn11(self.conv11(x1)))) 96 | x12_1 = self.do12(F.relu(self.bn12(self.conv12(x11)))) 97 | x1p = F.max_pool2d(x12_1, kernel_size=2, stride=2) 98 | 99 | 100 | # Stage 2 101 | x21 = self.do21(F.relu(self.bn21(self.conv21(x1p)))) 102 | x22_1 = self.do22(F.relu(self.bn22(self.conv22(x21)))) 103 | x2p = F.max_pool2d(x22_1, kernel_size=2, stride=2) 104 | 105 | # Stage 3 106 | x31 = self.do31(F.relu(self.bn31(self.conv31(x2p)))) 107 | x32 = self.do32(F.relu(self.bn32(self.conv32(x31)))) 108 | x33_1 = self.do33(F.relu(self.bn33(self.conv33(x32)))) 109 | x3p = F.max_pool2d(x33_1, kernel_size=2, stride=2) 110 | 111 | # Stage 4 112 | x41 = self.do41(F.relu(self.bn41(self.conv41(x3p)))) 113 | x42 = self.do42(F.relu(self.bn42(self.conv42(x41)))) 114 | x43_1 = self.do43(F.relu(self.bn43(self.conv43(x42)))) 115 | x4p = F.max_pool2d(x43_1, kernel_size=2, stride=2) 116 | 117 | #################################################### 118 | # Stage 1 119 | x11 = self.do11(F.relu(self.bn11(self.conv11(x2)))) 120 | x12_2 = self.do12(F.relu(self.bn12(self.conv12(x11)))) 121 | x1p = F.max_pool2d(x12_2, kernel_size=2, stride=2) 122 | 123 | 124 | # Stage 2 125 | x21 = self.do21(F.relu(self.bn21(self.conv21(x1p)))) 126 | x22_2 = self.do22(F.relu(self.bn22(self.conv22(x21)))) 127 | x2p = F.max_pool2d(x22_2, kernel_size=2, stride=2) 128 | 129 | # Stage 3 130 | x31 = self.do31(F.relu(self.bn31(self.conv31(x2p)))) 131 | x32 = self.do32(F.relu(self.bn32(self.conv32(x31)))) 132 | x33_2 = self.do33(F.relu(self.bn33(self.conv33(x32)))) 133 | x3p = F.max_pool2d(x33_2, kernel_size=2, stride=2) 134 | 135 | # Stage 4 136 | x41 = self.do41(F.relu(self.bn41(self.conv41(x3p)))) 137 | x42 = self.do42(F.relu(self.bn42(self.conv42(x41)))) 138 | x43_2 = self.do43(F.relu(self.bn43(self.conv43(x42)))) 139 | x4p = F.max_pool2d(x43_2, kernel_size=2, stride=2) 140 | 141 | 142 | 143 | # Stage 4d 144 | x4d = self.upconv4(x4p) 145 | pad4 = ReplicationPad2d((0, x43_1.size(3) - x4d.size(3), 0, x43_1.size(2) - x4d.size(2))) 146 | x4d = torch.cat((pad4(x4d), torch.abs(x43_1 - x43_2)), 1) 147 | x43d = self.do43d(F.relu(self.bn43d(self.conv43d(x4d)))) 148 | x42d = self.do42d(F.relu(self.bn42d(self.conv42d(x43d)))) 149 | x41d = self.do41d(F.relu(self.bn41d(self.conv41d(x42d)))) 150 | 151 | # Stage 3d 152 | x3d = self.upconv3(x41d) 153 | pad3 = ReplicationPad2d((0, x33_1.size(3) - x3d.size(3), 0, x33_1.size(2) - x3d.size(2))) 154 | x3d = torch.cat((pad3(x3d), torch.abs(x33_1 - x33_2)), 1) 155 | x33d = self.do33d(F.relu(self.bn33d(self.conv33d(x3d)))) 156 | x32d = self.do32d(F.relu(self.bn32d(self.conv32d(x33d)))) 157 | x31d = self.do31d(F.relu(self.bn31d(self.conv31d(x32d)))) 158 | 159 | # Stage 2d 160 | x2d = self.upconv2(x31d) 161 | pad2 = ReplicationPad2d((0, x22_1.size(3) - x2d.size(3), 0, x22_1.size(2) - x2d.size(2))) 162 | x2d = torch.cat((pad2(x2d), torch.abs(x22_1 - x22_2)), 1) 163 | x22d = self.do22d(F.relu(self.bn22d(self.conv22d(x2d)))) 164 | x21d = self.do21d(F.relu(self.bn21d(self.conv21d(x22d)))) 165 | 166 | # Stage 1d 167 | x1d = self.upconv1(x21d) 168 | pad1 = ReplicationPad2d((0, x12_1.size(3) - x1d.size(3), 0, x12_1.size(2) - x1d.size(2))) 169 | x1d = torch.cat((pad1(x1d), torch.abs(x12_1 - x12_2)), 1) 170 | x12d = self.do12d(F.relu(self.bn12d(self.conv12d(x1d)))) 171 | x11d = self.conv11d(x12d) 172 | 173 | return self.sm(x11d) -------------------------------------------------------------------------------- /deeplearning/UNet_Plus.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch.nn as nn 3 | import torch 4 | from torchvision import models 5 | import torchvision 6 | class Block(nn.Module): 7 | def __init__(self, in_channels, middle_channels, out_channels, act_func=nn.ReLU(inplace=True)): 8 | super(Block,self).__init__() 9 | self.act_func = act_func 10 | self.conv1 = nn.Conv2d(in_channels, middle_channels, 3, padding=1) 11 | self.bn1 = nn.BatchNorm2d(middle_channels) 12 | self.conv2 = nn.Conv2d(middle_channels, out_channels, 3, padding=1) 13 | self.bn2 = nn.BatchNorm2d(out_channels) 14 | def forward(self,x): 15 | out = self.conv1(x) 16 | out = self.bn1(out) 17 | out = self.act_func(out) 18 | 19 | out = self.conv2(out) 20 | out = self.bn2(out) 21 | out = self.act_func(out) 22 | 23 | return out 24 | class UNet_Plus(nn.Module): 25 | def __init__(self,input_channel=6): 26 | super(UNet_Plus,self).__init__() 27 | channels = [32, 64, 128, 256, 512] 28 | self.pool = nn.MaxPool2d(2,2) 29 | self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) 30 | self.conv0_0 = Block(input_channel,channels[0],channels[0]) 31 | self.conv1_0 = Block(channels[0],channels[1],channels[1]) 32 | self.conv2_0 = Block(channels[1],channels[2],channels[2]) 33 | self.conv3_0 = Block(channels[2],channels[3],channels[3]) 34 | self.conv4_0 = Block(channels[3],channels[4],channels[4]) 35 | 36 | self.conv0_1 = Block(channels[0]+channels[1], channels[0], channels[0]) 37 | self.conv1_1 = Block(channels[1]+channels[2], channels[1], channels[1]) 38 | self.conv2_1 = Block(channels[2]+channels[3], channels[2],channels[2]) 39 | self.conv3_1 = Block(channels[3]+channels[4], channels[3], channels[3]) 40 | 41 | self.conv0_2 = Block(channels[0]*2+channels[1],channels[0],channels[0]) 42 | self.conv1_2 = Block(channels[1]*2+channels[2],channels[1],channels[1]) 43 | self.conv2_2 = Block(channels[2]*2+channels[3],channels[2],channels[2]) 44 | 45 | self.conv0_3 = Block(channels[0]*3+channels[1],channels[0],channels[0]) 46 | self.conv1_3 = Block(channels[1]*3+channels[2],channels[1],channels[1]) 47 | 48 | self.conv0_4 = Block(channels[0]*4+channels[1],channels[0],channels[0]) 49 | self.final = nn.Conv2d(channels[0],1,kernel_size=1) 50 | 51 | 52 | def forward(self,before,after): #输入不同时相的图像 53 | input = torch.cat([before,after],1) 54 | x0_0 = self.conv0_0(input) 55 | x1_0 = self.conv1_0(self.pool(x0_0)) 56 | x0_1 = self.conv0_1(torch.cat([x0_0, self.up(x1_0)], 1)) 57 | 58 | x2_0 = self.conv2_0(self.pool(x1_0)) 59 | x1_1 = self.conv1_1(torch.cat([x1_0, self.up(x2_0)], 1)) 60 | x0_2 = self.conv0_2(torch.cat([x0_0, x0_1, self.up(x1_1)], 1)) 61 | 62 | x3_0 = self.conv3_0(self.pool(x2_0)) 63 | x2_1 = self.conv2_1(torch.cat([x2_0, self.up(x3_0)], 1)) 64 | x1_2 = self.conv1_2(torch.cat([x1_0, x1_1, self.up(x2_1)], 1)) 65 | x0_3 = self.conv0_3(torch.cat([x0_0, x0_1, x0_2, self.up(x1_2)], 1)) 66 | 67 | x4_0 = self.conv4_0(self.pool(x3_0)) 68 | x3_1 = self.conv3_1(torch.cat([x3_0, self.up(x4_0)], 1)) 69 | x2_2 = self.conv2_2(torch.cat([x2_0, x2_1, self.up(x3_1)], 1)) 70 | x1_3 = self.conv1_3(torch.cat([x1_0, x1_1, x1_2, self.up(x2_2)], 1)) 71 | x0_4 = self.conv0_4(torch.cat([x0_0, x0_1, x0_2, x0_3, self.up(x1_3)], 1)) 72 | 73 | 74 | output0_4 = self.final(x0_4) 75 | output0_3 = self.final(x0_3) 76 | output0_2 = self.final(x0_2) 77 | output0_1 = self.final(x0_1) 78 | output0_0 = self.final(x0_0) 79 | 80 | return output0_0, output0_1, output0_2, output0_3, output0_4 81 | 82 | 83 | 84 | 85 | -------------------------------------------------------------------------------- /deeplearning/UpdateNestUnet.py: -------------------------------------------------------------------------------- 1 | # @Time : 2019/12/25 11:12 2 | # @Author : yux 3 | # @Content : 4 | 5 | import torch 6 | import torch.nn as nn 7 | #from Mish import Mish 8 | class conv_block_nested(nn.Module): 9 | 10 | def __init__(self, in_ch, mid_ch, out_ch): 11 | super(conv_block_nested, self).__init__() 12 | self.activation = nn.ReLU(inplace=True) 13 | #self.activation = Mish() 14 | self.conv1 = nn.Conv2d(in_ch, mid_ch, kernel_size=3, padding=1, bias=True) 15 | self.bn1 = nn.BatchNorm2d(mid_ch) 16 | self.conv2 = nn.Conv2d(mid_ch, out_ch, kernel_size=3, padding=1, bias=True) 17 | self.bn2 = nn.BatchNorm2d(out_ch) 18 | 19 | def forward(self, x): 20 | x = self.conv1(x) 21 | x = self.bn1(x) 22 | x = self.activation(x) 23 | 24 | x = self.conv2(x) 25 | x = self.bn2(x) 26 | output = self.activation(x) 27 | 28 | return output 29 | 30 | class UpdateNestUnet(nn.Module): 31 | 32 | def __init__(self, in_ch=6, out_ch=1): 33 | super(UpdateNestUnet, self).__init__() 34 | 35 | n1 = 64 36 | filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16] 37 | 38 | self.pool = nn.MaxPool2d(kernel_size=2, stride=2) 39 | self.Up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) 40 | 41 | self.conv0_0 = conv_block_nested(in_ch, filters[0], filters[0]) 42 | self.conv1_0 = conv_block_nested(filters[0], filters[1], filters[1]) 43 | self.conv2_0 = conv_block_nested(filters[1], filters[2], filters[2]) 44 | self.conv3_0 = conv_block_nested(filters[2], filters[3], filters[3]) 45 | self.conv4_0 = conv_block_nested(filters[3], filters[4], filters[4]) 46 | 47 | self.conv0_1 = conv_block_nested(filters[0] + filters[1], filters[0], filters[0]) 48 | self.conv1_1 = conv_block_nested(filters[1] + filters[2], filters[1], filters[1]) 49 | self.conv2_1 = conv_block_nested(filters[2] + filters[3], filters[2], filters[2]) 50 | self.conv3_1 = conv_block_nested(filters[3] + filters[4], filters[3], filters[3]) 51 | 52 | self.conv0_2 = conv_block_nested(filters[0]*2 + filters[1], filters[0], filters[0]) 53 | self.conv1_2 = conv_block_nested(filters[1]*2 + filters[2], filters[1], filters[1]) 54 | self.conv2_2 = conv_block_nested(filters[2]*2 + filters[3], filters[2], filters[2]) 55 | 56 | self.conv0_3 = conv_block_nested(filters[0]*3 + filters[1], filters[0], filters[0]) 57 | self.conv1_3 = conv_block_nested(filters[1]*3 + filters[2], filters[1], filters[1]) 58 | 59 | self.conv0_4 = conv_block_nested(filters[0]*4 + filters[1], filters[0], filters[0]) 60 | 61 | self.final = nn.Conv2d(filters[0], out_ch, kernel_size=1) 62 | def forward(self, x, y): 63 | 64 | 65 | x0_0 = self.conv0_0(x) 66 | x1_0 = self.conv1_0(self.pool(x0_0)) 67 | x0_1 = self.conv0_1(torch.cat([x0_0, self.Up(x1_0)], 1)) 68 | 69 | x2_0 = self.conv2_0(self.pool(x1_0)) 70 | x1_1 = self.conv1_1(torch.cat([x1_0, self.Up(x2_0)], 1)) 71 | x0_2 = self.conv0_2(torch.cat([x0_0, x0_1, self.Up(x1_1)], 1)) 72 | 73 | x3_0 = self.conv3_0(self.pool(x2_0)) 74 | x2_1 = self.conv2_1(torch.cat([x2_0, self.Up(x3_0)], 1)) 75 | #x1_2 = self.conv1_2(torch.cat([x1_0, x1_1, self.Up(x2_1)], 1)) 76 | #x0_3 = self.conv0_3(torch.cat([x0_0, x0_1, x0_2, self.Up(x1_2)], 1)) 77 | 78 | x4_0 = self.conv4_0(self.pool(x3_0)) 79 | #x3_1 = self.conv3_1(torch.cat([x3_0, self.Up(x4_0)], 1)) 80 | #x2_2 = self.conv2_2(torch.cat([x2_0, x2_1, self.Up(x3_1)], 1)) 81 | #x1_3 = self.conv1_3(torch.cat([x1_0, x1_1, x1_2, self.Up(x2_2)], 1)) 82 | #x0_4 = self.conv0_4(torch.cat([x0_0, x0_1, x0_2, x0_3, self.Up(x1_3)], 1)) 83 | 84 | 85 | 86 | 87 | y0_0 = self.conv0_0(y) 88 | y1_0 = self.conv1_0(self.pool(y0_0)) 89 | y0_1 = self.conv0_1(torch.cat([y0_0, self.Up(y1_0)], 1)) 90 | 91 | y2_0 = self.conv2_0(self.pool(y1_0)) 92 | y1_1 = self.conv1_1(torch.cat([y1_0, self.Up(y2_0)], 1)) 93 | y0_2 = self.conv0_2(torch.cat([y0_0, y0_1, self.Up(y1_1)], 1)) 94 | 95 | y3_0 = self.conv3_0(self.pool(y2_0)) 96 | y2_1 = self.conv2_1(torch.cat([y2_0, self.Up(y3_0)], 1)) 97 | #y1_2 = self.conv1_2(torch.cat([y1_0, y1_1, self.Up(y2_1)], 1)) 98 | #y0_3 = self.conv0_3(torch.cat([y0_0, y0_1, y0_2, self.Up(y1_2)], 1)) 99 | 100 | y4_0 = self.conv4_0(self.pool(y3_0)) 101 | #y3_1 = self.conv3_1(torch.cat([y3_0, self.Up(y4_0)], 1)) 102 | #y2_2 = self.conv2_2(torch.cat([y2_0, y2_1, self.Up(y3_1)], 1)) 103 | #y1_3 = self.conv1_3(torch.cat([y1_0, y1_1, y1_2, self.Up(y2_2)], 1)) 104 | #y0_4 = self.conv0_4(torch.cat([y0_0, y0_1, y0_2, y0_3, self.Up(y1_3)], 1)) 105 | 106 | 107 | 108 | #相互融合 109 | xy0_0 = torch.abs(x0_0-y0_0) 110 | xy1_0 = torch.abs(x1_0-y1_0) 111 | xy2_0 = torch.abs(x2_0-y2_0) 112 | xy3_0 = torch.abs(x3_0-y3_0) 113 | 114 | xy0_1 = torch.abs(x0_1-y0_1) 115 | xy0_2 = torch.abs(x0_2-y0_2) 116 | xy1_1 = torch.abs(x1_1-y1_1) 117 | xy2_1 = torch.abs(x2_1-y2_1) 118 | xy4_0 = torch.abs(x4_0-y4_0) 119 | 120 | xy1_2 = self.conv1_2(torch.cat([xy1_0, xy1_1, self.Up(xy2_1)], 1)) 121 | xy3_1 = self.conv3_1(torch.cat([xy3_0, self.Up(xy4_0)], 1)) 122 | xy0_3 = self.conv0_3(torch.cat([xy0_0, xy0_1, xy0_2, self.Up(xy1_2)], 1)) 123 | xy2_2 = self.conv2_2(torch.cat([xy2_0, xy2_1, self.Up(xy3_1)], 1)) 124 | xy1_3 = self.conv1_3(torch.cat([xy1_0, xy1_1, xy1_2, self.Up(xy2_2)], 1)) 125 | xy0_4 = self.conv0_4(torch.cat([xy0_0, xy0_1, xy0_2, xy0_3, self.Up(xy1_3)], 1)) 126 | 127 | output0_4 = self.final(xy0_4) 128 | output0_3 = self.final(xy0_3) 129 | output0_2 = self.final(xy0_2) 130 | output0_1 = self.final(xy0_1) 131 | output0_0 = self.final(xy0_0) 132 | 133 | 134 | 135 | #output = self.final(xy0_4) 136 | 137 | return output0_0, output0_1, output0_2, output0_3, output0_4 -------------------------------------------------------------------------------- /deeplearning/accuracy_cal.py: -------------------------------------------------------------------------------- 1 | import glob 2 | from PIL import Image 3 | import numpy as np 4 | def accuracy(true_path,predict_path): 5 | true_file_list = glob.glob(true_path+"/*.jpg") 6 | preditc_file_list = glob.glob(predict_path+"/*.png") 7 | accuracy = [] 8 | precision = [] 9 | recall = [] 10 | f1 = [] 11 | for i in range(len(true_file_list)): 12 | t_img = np.asarray(Image.open(true_file_list[i])) 13 | t_img.flags.writeable = True 14 | t_img[t_img<=1] = 0 15 | t_img[t_img>1] = 255 16 | p_img = np.asarray(Image.open(preditc_file_list[i]))[:,:,0] 17 | print(np.unique(t_img,return_counts=True)) 18 | print(np.unique(p_img,return_counts=True)) 19 | tp = (t_img == 0) & (p_img == 68) 20 | fn = (t_img == 0) & (p_img == 253) 21 | fp = (t_img == 255) & (p_img == 68) 22 | tn = (t_img == 255) & (p_img == 253) 23 | #print(np.sum(tp)+np.sum(fn)+np.sum(fp)+np.sum(tn)) 24 | accuracy.append((np.sum(tp)+np.sum(tn))/(np.sum(tp)+np.sum(fn)+np.sum(fp)+np.sum(tn))) 25 | pre = np.sum(tp)/(np.sum(tp)+np.sum(fp)) 26 | precision.append(pre) 27 | rec = np.sum(tp)/(np.sum(tp)+np.sum(fn)) 28 | recall.append(rec) 29 | f1.append(2*pre*rec/(pre+rec)) 30 | print(np.mean(np.array(accuracy))) 31 | print(np.mean(np.array(precision))) 32 | print(np.mean(np.nan_to_num(np.array(recall)))) 33 | print(np.mean(np.nan_to_num(np.array(f1)))) 34 | accuracy(r"H:\yux\data\ChangeDetectionDataset\ChangeDetectionDataset\Real\subset\test\OUT",r"H:\yux\data\ChangeDetectionDataset\ChangeDetectionDataset\Real\subset\test\PREDICT") 35 | 36 | -------------------------------------------------------------------------------- /deeplearning/test.py: -------------------------------------------------------------------------------- 1 | from keras import backend as K 2 | import numpy as np 3 | import tensorflow as tf 4 | import math 5 | y_true = tf.constant([[[2.,5.,3]]]) 6 | y_pred = tf.constant([[[0.5,2.,5]]]) 7 | bce = K.binary_crossentropy(y_true, y_pred) 8 | class_loglosses = K.mean(bce, axis=[0, 1,2]) 9 | 10 | class_weights = [0.1, 0.9]#note that the weights can be computed automatically using the training smaples 11 | weighted_bce = K.sum(class_loglosses * K.constant(class_weights)) 12 | 13 | # return K.weighted_binary_crossentropy(y_true, y_pred,pos_weight) + 0.35 * (self.dice_coef_loss(y_true, y_pred)) #not work 14 | 15 | sess = tf.Session() 16 | 17 | print(sess.run([bce,class_loglosses])) 18 | 19 | #print(-(target[0]*math.log(lossinput[0])+(1-target[0])*math.log(1-lossinput[0]))) 20 | print(math.log(10)) 21 | print(-(2.*math.log(0.5)+(1.-2.)*math.log(1.-0.5))) -------------------------------------------------------------------------------- /deeplearning/test2.py: -------------------------------------------------------------------------------- 1 | """ 2 | @author:yux 3 | @email:yuxer@qq.com 4 | @time:2020/4/3 22:31 5 | @description: 6 | """ 7 | import torch.nn.functional as F 8 | import torch 9 | from torch.autograd import Variable 10 | loss_fn = torch.nn.BCELoss(reduction='none',weight=torch.tensor([2.,3.,3.])) 11 | input = torch.tensor([2., 3.3,4.]) 12 | target = torch.tensor([1., 0.,5.]) 13 | loss = loss_fn(F.sigmoid(input), target) 14 | print(input) 15 | print(target) 16 | print(loss) 17 | print(loss.mean()) 18 | 19 | -------------------------------------------------------------------------------- /deeplearning/train.py: -------------------------------------------------------------------------------- 1 | """ 2 | @author:yux 3 | @email:yuxer@qq.com 4 | @time:2020/4/2 13:45 5 | @description: 训练文件 6 | """ 7 | import torch 8 | from RemoteImageDataset import RemoteImageDataset 9 | from torch.utils.data import DataLoader 10 | from UNet_Plus import UNet_Plus 11 | from Loss.CustomLoss import CustomLoss 12 | from torch import autograd, optim 13 | from UpdateNestUnet import UpdateNestUnet 14 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 15 | print(device) 16 | 17 | def train(epochs): 18 | # model = UNet_Plus(6).to(device) 19 | model = UpdateNestUnet(in_ch = 3).to(device) 20 | optimizer = optim.SGD(model.parameters(), lr=0.1) 21 | for i in range(epochs): 22 | 23 | epoch_loss = 0 24 | # loader = DataLoader(RemoteImageDataset( 25 | # r"E:\迅雷下载\ChangeDetectionDataset\ChangeDetectionDataset\Real\subset\train"),batch_size=5) 26 | loader = DataLoader(RemoteImageDataset( 27 | r"H:\yux\data\256png\train"), batch_size=1) 28 | for a,b,c in loader: 29 | a = a.to(device) 30 | b = b.to(device) 31 | c = c.to(device) 32 | optimizer.zero_grad() 33 | m = model(a,b) 34 | loss = CustomLoss(nmn=0.5)(m,c) 35 | loss.backward() 36 | optimizer.step() 37 | epoch_loss += loss.item() 38 | path = "./Pth/val_no_enhance_conc_adam_l2_%d.pth" % epochs 39 | torch.save(model.state_dict(), path) 40 | print(i,"----->",epoch_loss) 41 | 42 | 43 | 44 | 45 | 46 | 47 | if __name__=="__main__": 48 | train(1805) 49 | 50 | 51 | -------------------------------------------------------------------------------- /deeplearning/valid.py: -------------------------------------------------------------------------------- 1 | """ 2 | @author:yux 3 | @email:yuxer@qq.com 4 | @time:2020/4/11 12:36 5 | @description: 验证模型 6 | """ 7 | from torch.autograd import Variable 8 | from torch.utils.data import DataLoader 9 | 10 | from RemoteImageDataset import RemoteImageDataset 11 | from UNet_Plus import UNet_Plus 12 | from UpdateNestUnet import UpdateNestUnet 13 | import torch 14 | import matplotlib.pyplot as plt 15 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 16 | 17 | def valid(): 18 | # model = UNet_Plus(6).to(device) 19 | model = UpdateNestUnet(in_ch=3).to(device) 20 | model.load_state_dict(torch.load("./Pth/val_no_enhance_conc_adam_l2_1805.pth", map_location='cpu')) 21 | loader = DataLoader(RemoteImageDataset( 22 | r"H:\yux\data\256png\valid"), batch_size=1) 23 | model.eval() 24 | with torch.no_grad(): 25 | for img1, img2, mask in loader: 26 | images1 = Variable(img1) 27 | images2 = Variable(img2) 28 | 29 | output = model(images1, images2) 30 | img_y = torch.squeeze(output[-1]).numpy() 31 | # img_y[img_y >= 0.5] = 255 32 | # img_y[img_y < 0.5] = 0 33 | plt.subplot(1, 2, 1) 34 | plt.imshow(img_y) 35 | plt.subplot(1, 2, 2) 36 | plt.imshow(torch.squeeze(mask).numpy()) 37 | plt.show() 38 | if __name__ == "__main__": 39 | valid() 40 | -------------------------------------------------------------------------------- /readme: -------------------------------------------------------------------------------- 1 | 遥感影像变化检测方法库 2 | 主要包括一些主流的方法: 3 | CVA 4 | MAD 5 | PCA 6 | IRMAD 7 | 以及深度学习相关方法。。。 8 | -------------------------------------------------------------------------------- /spectral.py: -------------------------------------------------------------------------------- 1 | #谱聚类 2 | import numpy as np 3 | import cv2 4 | from sklearn import datasets 5 | from sklearn.cluster import SpectralClustering 6 | from sklearn import metrics 7 | import matplotlib.pyplot as pyplot 8 | #X, y = datasets.make_blobs(n_samples=500, n_features=6, centers=5, cluster_std=[0.4, 0.3, 0.4, 0.3, 0.4], random_state=11) 9 | after = cv2.imread("../../data/abudhabi_5_after.png") 10 | before = cv2.imread("../../data/abudhabi_5_before.png") 11 | X = after - before 12 | X = X[80:160,80:160,:] 13 | (rows,cols,bands ) = X.shape 14 | X= np.reshape(X,(rows*cols,bands))[0:4900,:] 15 | y_pred = cv2.imread("../../data/abudhabi_5_mask.png")[:,:,0] 16 | y_pred = y_pred[80:160,80:160] 17 | 18 | y_pred = np.reshape(y_pred,(rows*cols))[0:4900] 19 | y_pred[y_pred == 255] = 1 20 | 21 | for index, gamma in enumerate((0.1,1,10)): 22 | y_pred = SpectralClustering(n_clusters=2, gamma=gamma).fit_predict(X) 23 | img = np.reshape(y_pred, (70, 70,)) 24 | pyplot.imshow(np.uint8(img)) 25 | pyplot.show() 26 | print("Calinski-Harabasz Score with gamma=", gamma, "n_clusters=", 2,"score:", metrics.calinski_harabaz_score(X, y_pred)) 27 | 28 | print(X) 29 | 30 | --------------------------------------------------------------------------------