├── .idea
├── .gitignore
├── RemoteSensingLab.iml
├── deployment.xml
├── inspectionProfiles
│ ├── Project_Default.xml
│ └── profiles_settings.xml
├── misc.xml
├── modules.xml
└── vcs.xml
├── Config.py
├── DataHelper.py
├── DataSetsTools
├── TIF2RGB.py
├── cover2tif.py
├── crop_cia_image.py
└── show_tif.py
├── ESTARFM.py
├── FSDAF
├── FSDAF.py
├── FSDAF_Preclassification.py
├── isodata.py
├── parameters_fsdaf.yaml
└── utils.py
├── Fit-FC
├── Fit_FC_Python.py
└── functions.py
├── README.md
├── STARFM.py
└── evaluatPointSwin.py
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /shelf/
3 | /workspace.xml
4 | # Editor-based HTTP Client requests
5 | /httpRequests/
6 | # Datasource local storage ignored files
7 | /dataSources/
8 | /dataSources.local.xml
9 |
--------------------------------------------------------------------------------
/.idea/RemoteSensingLab.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/.idea/deployment.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/Project_Default.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
18 |
19 |
20 |
27 |
28 |
29 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/profiles_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/Config.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 |
4 | class ConfigForTraining(object):
5 | def __init__(self, choice, save_dir_name, PATCH_STRIDE=200, batch_size=4, batch_size_test=2,
6 | num_workers=8, epochs=500):
7 | self.choice = choice
8 | assert self.choice in ['LGC', 'CIA']
9 | if self.choice == 'LGC':
10 | self.root_dir = Path("/home/zbl/datasets_paper/LGC/")
11 | self.save_dir = Path("/home/zbl/datasets/STFusion/RunLog/STFINet/LGC-gan/") / save_dir_name
12 | self.image_size = [2720, 3200]
13 | self.patch_size = [340, 400]
14 | self.PATCH_SIZE = 256
15 | self.PATCH_STRIDE = 200
16 | else:
17 | self.root_dir = Path("/home/zbl/datasets_paper/CIA/")
18 | self.save_dir = Path("/home/zbl/datasets/STFusion/RunLog/STFINet/CIA-gan/") / save_dir_name
19 | self.image_size = [2040, 1720]
20 | self.patch_size = [255, 430]
21 | self.PATCH_SIZE = 256
22 | self.PATCH_STRIDE = 200
23 | self.train_dir = self.root_dir / 'train'
24 | self.val_dir = self.root_dir / 'val'
25 | self.save_tif_dir = self.save_dir / 'test'
26 | self.last_h2sp = self.save_dir / 'xnet.pth'
27 | self.best_h2sp = self.save_dir / 'best.pth'
28 | self.csv_history = self.save_dir / 'history.csv'
29 | self.batch_size = batch_size
30 | self.batch_size_test = batch_size_test
31 | self.num_workers = num_workers
32 | self.epochs = epochs
33 |
34 | class ConfigForTrainingSwin(object):
35 | def __init__(self, choice, save_dir_name, PATCH_SIZE=256, PATCH_STRIDE=200, batch_size=4, batch_size_test=2,
36 | num_workers=8, epochs=500):
37 | self.choice = choice
38 | assert self.choice in ['LGC', 'CIA']
39 | if self.choice == 'LGC':
40 | self.root_dir = Path("/home/zbl/datasets_paper/LGC-swinSTFM/")
41 | self.save_dir = Path("/home/zbl/datasets/STFusion/RunLog/FinePainterNet/LGC/") / save_dir_name
42 | self.image_size = [2720, 3200]
43 | self.patch_size = [340, 200]
44 | else:
45 | self.root_dir = Path("/home/zbl/datasets_paper/CIA-swinSTFM/")
46 | self.save_dir = Path("/home/zbl/datasets/STFusion/RunLog/STFINet/CIA/") / save_dir_name
47 | self.image_size = [2040, 1720]
48 | self.patch_size = [255, 215]
49 | self.train_dir = self.root_dir / 'train'
50 | self.val_dir = self.root_dir / 'val'
51 | self.save_tif_dir = self.save_dir / 'test'
52 | self.last_h2sp = self.save_dir / 'xnet.pth'
53 | self.best_h2sp = self.save_dir / 'best.pth'
54 | self.csv_history = self.save_dir / 'history.csv'
55 | self.PATCH_SIZE = PATCH_SIZE
56 | self.PATCH_STRIDE = PATCH_STRIDE
57 | self.batch_size = batch_size
58 | self.batch_size_test = batch_size_test
59 | self.num_workers = num_workers
60 | self.epochs = epochs
61 |
62 | class ConfigForTrainingGAN(object):
63 | def __init__(self, choice, save_dir_name, PATCH_SIZE=256, PATCH_STRIDE=200, batch_size=4, batch_size_test=2,
64 | num_workers=12, epochs=500):
65 | self.choice = choice
66 | assert self.choice in ['LGC', 'CIA']
67 | if self.choice == 'LGC':
68 | self.root_dir = Path("/home/zbl/datasets_paper/LGC-swinSTFM/")
69 | self.save_dir = Path("/home/zbl/datasets_paper/RunLog/FinePainterNet-GAN/LGC/") / save_dir_name
70 | self.image_size = [2720, 3200]
71 | self.patch_size = [340, 200]
72 | else:
73 | self.root_dir = Path("/home/zbl/datasets_paper/CIA-temp")
74 | self.save_dir = Path("/home/zbl/datasets_paper/RunLog/FinePainterNet-GAN/CIA/") / save_dir_name
75 | self.image_size = [2040, 1720]
76 | self.patch_size = [255, 430]
77 | self.train_dir = self.root_dir / 'train'
78 | self.val_dir = self.root_dir / 'val'
79 | self.save_tif_dir = self.save_dir / 'test'
80 | self.last_g = self.save_dir / 'generator.pth'
81 | self.last_d = self.save_dir / 'discriminator.pth'
82 | self.best = self.save_dir / 'best.pth'
83 | self.csv_history = self.save_dir / 'history.csv'
84 | self.PATCH_SIZE = PATCH_SIZE
85 | self.PATCH_STRIDE = PATCH_STRIDE
86 | self.batch_size = batch_size
87 | self.batch_size_test = batch_size_test
88 | self.num_workers = num_workers
89 | self.epochs = epochs
90 |
91 |
92 | class ConfigForEvaluation(object):
93 | def __init__(self, choice, save_dir_name):
94 | self.choice = choice
95 | assert choice in ['LGC', 'CIA']
96 | if choice == 'LGC':
97 | self.ground_truth_dir = "/home/zbl/datasets/STFusion/LGC/LGC_data/refs/"
98 | self.predict_dir = save_dir_name
99 | self.predict_img_names = ['PRED_2005_029_0129-2005_013_0113.tif', 'PRED_2005_045_0214-2005_029_0129.tif',
100 | 'PRED_2005_045_0214-2005_061_0302.tif', 'PRED_2005_061_0302-2005_045_0214.tif']
101 | self.ref_img_names = ['20050113_TM.tif', '20050129_TM.tif', '20050302_TM.tif', '20050214_TM.tif']
102 | else:
103 | self.ground_truth_dir = "/home/zbl/datasets_paper/CIA/refs/"
104 | self.predict_dir = save_dir_name
105 | self.predict_img_names = ['PRED_2001_306_1102-2001_290_1017.tif', 'PRED_2001_290_1017-2001_306_1102.tif',
106 | 'PRED_2001_306_1102-2001_313_1109.tif', 'PRED_2001_338_1204-2001_329_1125.tif',
107 | 'PRED_2001_329_1125-2001_338_1204.tif']
108 | self.ref_img_names = ['20011017_TM.tif', '20011102_TM.tif', '20011109_TM.tif', '20011125_TM.tif',
109 | '20011204_TM.tif']
110 |
111 |
112 | class ConfigForEvaluationForSwin(object):
113 | def __init__(self, choice, save_dir_name):
114 | assert choice in ['LGC', 'CIA']
115 | self.choice = choice
116 | if choice == 'LGC':
117 | self.ground_truth_dir = "/home/zbl/datasets_paper/LGC-swinSTFM/refs/"
118 | self.predict_dir = save_dir_name
119 | self.predict_img_names = ['PRED_2004_331_1126-2004_347_1212.tif']
120 | self.ref_img_names = ['20041212_TM.tif']
121 | else:
122 | self.ground_truth_dir = "/home/zbl/datasets_paper/CIA-swinSTFM/refs/"
123 | self.predict_dir = save_dir_name
124 | # self.predict_img_names = ['PRED_2001_329_1125-2002_012_0112.tif', 'PRED_2002_005_0105-2002_012_0112.tif',
125 | # 'PRED_2002_044_0213-2002_012_0112.tif', 'PRED_2002_076_0317-2002_012_0112.tif']
126 | # self.ref_img_names = ['20020112_TM.tif', '20020112_TM.tif', '20020112_TM.tif', '20020112_TM.tif']
127 | self.predict_img_names = ['PRED_2001_329_1125-2002_012_0112.tif']
128 | self.ref_img_names = ['20020112_TM.tif']
129 |
130 | class ConfigForEvaluationForMLFF_GAN(object):
131 | def __init__(self, choice, save_dir_name):
132 | self.choice = choice
133 | assert choice in ['LGC', 'CIA']
134 | if choice == 'LGC':
135 | self.ground_truth_dir = "/home/zbl/datasets/STFusion/LGC/LGC_data/refs/"
136 | self.predict_dir = save_dir_name
137 | self.predict_img_names = ['PRED_2005_029_0129-2005_013_0113.tif', 'PRED_2005_045_0214-2005_029_0129.tif',
138 | 'PRED_2005_045_0214-2005_061_0302.tif', 'PRED_2005_061_0302-2005_045_0214.tif']
139 | self.ref_img_names = ['20050113_TM.tif', '20050129_TM.tif', '20050302_TM.tif', '20050214_TM.tif']
140 | else:
141 | self.ground_truth_dir = "/home/zbl/datasets_paper/CIA-temp/refs/"
142 | self.predict_dir = save_dir_name
143 | self.predict_img_names = ['PRED_2002_076_0317-2002_092_0402.tif', 'PRED_2002_092_0402-2002_101_0411.tif',
144 | 'PRED_2002_101_0411-2002_108_0418.tif', 'PRED_2002_108_0418-2002_117_0427.tif',
145 | 'PRED_2002_117_0427-2002_124_0504.tif']
146 | self.ref_img_names = ['20020402_TM.tif', '20020411_TM.tif', '20020418_TM.tif', '20020427_TM.tif',
147 | '20020504_TM.tif']
148 |
--------------------------------------------------------------------------------
/DataHelper.py:
--------------------------------------------------------------------------------
1 | """
2 | @Project :RemoteSensingLab
3 | @File :DataHelper.py
4 | @IDE :PyCharm
5 | @Author :paul623
6 | @Date :2024/4/28 13:48
7 | """
8 | import os
9 | from pathlib import Path
10 |
11 |
12 | def get_pair_path(directory_name, root):
13 | ref_label, pred_label = directory_name.split('-')
14 | ref_tokens, pred_tokens = ref_label.split('_'), pred_label.split('_')
15 | paths = [None] * 4 # 索引 0, 1, 2 分别对应 pred_modis, ref_lansat, pred_landsat
16 | # 使用 Path 对象处理路径
17 | directory_path = Path(os.path.join(root, directory_name))
18 |
19 | # 使用 glob 查找 .tif 文件
20 | for f in directory_path.glob('*.tif'):
21 | if f.match(f"*{ref_tokens[0]}{ref_tokens[1]}*"):
22 | paths[0] = f.resolve().__str__()
23 | elif f.match(f"*{ref_tokens[0]}{ref_tokens[2]}*"):
24 | paths[1] = f.resolve().__str__()
25 | elif f.match(f"*{pred_tokens[0]}{pred_tokens[1]}*"):
26 | paths[2] = f.resolve().__str__()
27 | elif f.match(f"*{pred_tokens[0]}{pred_tokens[2]}*"):
28 | paths[3] = f.resolve().__str__()
29 |
30 | # 检查 paths 是否包含 None,确保所有路径都被找到
31 | if None in paths:
32 | raise FileNotFoundError("Not all expected files were found in the directory.")
33 |
34 | return paths
35 |
36 |
37 | def getDataLoader(option):
38 | list = []
39 | names = []
40 | assert option in ["LGC", "CIA"]
41 | if option == "LGC":
42 | root = Path("/home/zbl/datasets_paper/LGC/val/")
43 | else:
44 | root = Path("/home/zbl/datasets_paper/CIA/val/")
45 | for path in os.listdir(root):
46 | list.append(get_pair_path(path, root))
47 | names.append(path)
48 | return list, names # return m1,f1,m2,f2 2 is target
49 |
--------------------------------------------------------------------------------
/DataSetsTools/TIF2RGB.py:
--------------------------------------------------------------------------------
1 | """
2 | Created on Sun Oct 8 15:09:42 2023
3 | @author: Administrator
4 | """
5 | import numpy as np
6 |
7 |
8 | from osgeo import gdal
9 | import matplotlib.pyplot as plt
10 |
11 |
12 | # 以下为三种拉伸方式,如果不拉伸,图像太黑,拉伸完显示的图像更好看
13 | def optimized_linear(arr):
14 | a, b = np.percentile(arr, (2.5, 99))
15 | c = a - 0.1 * (b - a)
16 | d = b + 0.5 * (b - a)
17 | arr = (arr - c) / (d - c) * 255
18 | arr = np.clip(arr, 0, 255)
19 | return np.uint8(arr)
20 |
21 |
22 | def percent_linear(arr, percent=2):
23 | arr_min, arr_max = np.percentile(arr, (percent, 100 - percent))
24 | arr = (arr - arr_min) / (arr_max - arr_min) * 255
25 | arr = np.clip(arr, 0, 255)
26 | return np.uint8(arr)
27 |
28 |
29 | def linear(arr):
30 | arr_min, arr_max = arr.min(), arr.max()
31 | arr = (arr - arr_min) / (arr_max - arr_min) * 255
32 | arr = np.clip(arr, 0, 255)
33 | return np.uint8(arr)
34 |
35 |
36 | path = r"C:\Users\zhuba\Desktop\RunLog\GAN-STFM\CIA\PRED_2002_076_0317-2002_092_0402.tif"
37 | data = gdal.Open(path) # 读取tif文件
38 | num_bands = data.RasterCount # 获取波段数
39 | print(num_bands)
40 | tmp_img = data.ReadAsArray() # 将数据转为数组
41 | print(tmp_img.shape)
42 | img_rgb = tmp_img.transpose(1, 2, 0) # 由波段、行、列——>行、列、波段
43 |
44 | img_rgb = np.array(img_rgb, dtype=np.uint16) # 设置数据类型,np.unit8可修改
45 | # img_rgb = np.array(img_rgb)
46 | r = img_rgb[:, :, 3]
47 | g = img_rgb[:, :, 2]
48 | b = img_rgb[:, :, 1]
49 |
50 | img_rgb = np.dstack((percent_linear(r), percent_linear(g), percent_linear(b))) # 波段组合
51 |
52 | # img_rgb = np.array(img_rgb, dtype=np.uint8)
53 |
54 | # plt.imshow(img_rgb)
55 | # plt.show()
56 | # 通过调用plt.axis(“ off”),可以删除编号的轴
57 | plt.figure(dpi=800)
58 | plt.axis("off")
59 | plt.imshow(img_rgb)
60 | plt.show()
--------------------------------------------------------------------------------
/DataSetsTools/cover2tif.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import os
3 | from pathlib import Path
4 |
5 | import numpy as np
6 | from osgeo import gdal
7 | """
8 | CIA、LGC数据集转换工具
9 | 转成tif
10 | """
11 |
12 | # 依据BIL存储规则,按照存储完一行的所有波段再存储下一行,进行提取并存入数组。
13 | def read_as_bil(dataarr, bands, rows, col):
14 | imgarr = np.zeros((bands, rows, col))
15 | for r in range(rows): # 取出一行的所有波段
16 | start = r * col * bands
17 | end = start + col * bands
18 | arrtem = dataarr[start:end]
19 | for b in range(bands): # 取出每个波段
20 | start2 = b * col
21 | end2 = start2 + col
22 | imgarr[b, r, :] = arrtem[start2:end2] # 存入数组对应位置
23 | return imgarr
24 |
25 |
26 | # 依据BSQ存储规则,按照存储完单波段整幅图像后再存储下一波段的存储方法进行提取并存入数组。
27 | def read_as_bsq(dataarr, bands, rows, col):
28 | imgarr = np.zeros((bands, rows, col))
29 | for b in range(bands): # 取出每个波段
30 | start = b * rows * col
31 | end = start + rows * col
32 | arrtem = dataarr[start:end]
33 | for r in range(rows): # 一维数组按行取出,存入对应三维数组。
34 | start2 = r * col
35 | end2 = start2 + col
36 | imgarr[b, r, :] = arrtem[start2:end2]
37 | return imgarr
38 |
39 |
40 | # 依据BIP存储规则,按照一个像素所有波段进行存储完,再存储下一个像素所有波段的存储方法进行提取并存入数组。
41 | def read_as_bip(dataarr, bands, rows, col):
42 | imgarr = np.zeros((bands, rows, col))
43 | for r in range(rows): # 按行列遍历每个像元
44 | for c in range(col):
45 | if r == 0:
46 | pix = c
47 | else:
48 | pix = r * col + c
49 | start = pix * bands
50 | end = start + bands
51 | arrtem = dataarr[start:end] # 从一维数组中取出每个像元的全波段元素(6个)
52 | for b in range(bands):
53 | imgarr[b, r, c] = arrtem[b] # 赋值给对应数组
54 | return imgarr
55 |
56 |
57 |
58 |
59 | bands, rows, col = 6, 2040, 1720
60 |
61 | def readInt(path):
62 | f = open(path, 'rb')
63 | fint = np.fromfile(f, dtype=np.int16)
64 | return read_as_bsq(fint, bands, rows, col)
65 |
66 | def readBil(path):
67 | f = open(path, 'rb')
68 | fint = np.fromfile(f, dtype=np.int16)
69 | return read_as_bil(fint, bands, rows, col)
70 |
71 | def saveImg(path, imgarr):
72 | datatype = gdal.GDT_UInt16
73 | bands, high, width = imgarr.shape
74 | driver = gdal.GetDriverByName("GTiff")
75 | datas = driver.Create(path, col, rows, bands, datatype)
76 | for i in range(bands):
77 | datas.GetRasterBand(i + 1).WriteArray(imgarr[i])
78 | del datas
79 |
80 |
81 | def read_data_dir(root):
82 | list_all = os.listdir(root)
83 | for folder in list_all:
84 | year = folder.split('_')[0]
85 | date = folder.split('_')[2]
86 | for filename in os.listdir(os.path.join(root, folder)):
87 | if filename.endswith('.int'):
88 | path = os.path.join(root, folder, filename)
89 | save_path = os.path.join(root, folder, filename.split('.')[0]+".tif")
90 | saveImg(save_path, readInt(path))
91 | elif filename.endswith('.bil'):
92 | path = os.path.join(root, folder, filename)
93 | save_path = os.path.join(root, folder, year+date+'_TM.tif')
94 | saveImg(save_path, readBil(path))
95 | print(filename+" 转换成功")
96 |
97 | def delTrashFiles(root):
98 | list_all = os.listdir(root)
99 | for folder in list_all:
100 | for filename in os.listdir(os.path.join(root, folder)):
101 | if filename.endswith('xml') or filename.endswith('.int') or filename.endswith('.bil'):
102 | path = os.path.join(root, folder, filename)
103 | os.remove(path)
104 | print(f"path:{path}删除成功")
105 |
106 | root_dir = r"D:\CodeLab\CIA0\CIA-v2\all"
107 | if __name__ == "__main__":
108 | # read_data_dir(root_dir)
109 | delTrashFiles(root_dir)
110 |
--------------------------------------------------------------------------------
/DataSetsTools/crop_cia_image.py:
--------------------------------------------------------------------------------
1 | '''
2 | https://zhuanlan.zhihu.com/p/407049191
3 | 参照论文
4 | Remote Sensing Image Spatiotemporal Fusion Using a Generative Adversarial Network,确定裁剪大小(行1792,列1280,波段6
5 | '''
6 | import os
7 |
8 | # -*- coding: utf-8 -*-
9 | import numpy as np
10 | import matplotlib.pyplot as plt
11 | from osgeo import gdal
12 | """
13 | 针对CIA的异常数据进行裁剪
14 | 没什么用~
15 | """
16 |
17 | def crop2tif(path, save_path, row, col):
18 | top = 20
19 | left = 100
20 | img = gdal.Open(path)
21 | bands = img.RasterCount
22 | scol = img.RasterXSize
23 | srow = img.RasterYSize
24 | image_geotrans = img.GetGeoTransform() # 获取仿射矩阵信息
25 | image_projetion = img.GetProjection() # 获取投影信息
26 | img_data = img.ReadAsArray()
27 | imgarr = img_data[:, top:row + top, left:col + left]
28 | bands, r, c = imgarr.shape
29 | datatype = gdal.GDT_UInt16
30 | driver = gdal.GetDriverByName("GTiff")
31 | datas = driver.Create(save_path, c, r, bands, datatype)
32 |
33 | # 设置地理坐标和仿射变换信息,注意这里源图像没有添加坐标和仿射变换信息,所以继承了源图像,存储后的图像不能使用ENVI打开
34 | datas.SetGeoTransform(image_geotrans)
35 | datas.SetProjection(image_projetion)
36 |
37 | for i in range(bands):
38 | datas.GetRasterBand(i + 1).WriteArray(imgarr[i])
39 | del datas
40 |
41 |
42 | rows = 1792
43 | cols = 1280
44 |
45 | root_dir = r'D:\CodeLab\CIA0\CIA-v2\all'
46 | cropped_dir = r'D:\CodeLab\CIA0\CIA-v2\cropped'
47 | if __name__ == "__main__":
48 | if not os.path.exists(cropped_dir):
49 | os.makedirs(cropped_dir)
50 | list_all = os.listdir(root_dir)
51 | for folder in list_all:
52 | if not os.path.exists(os.path.join(cropped_dir, folder)):
53 | os.makedirs(os.path.join(cropped_dir, folder))
54 | for filename in os.listdir(os.path.join(root_dir, folder)):
55 | if filename.endswith('tif'):
56 | path = os.path.join(root_dir, folder, filename)
57 | save_path = os.path.join(cropped_dir, folder, filename)
58 | crop2tif(path, save_path, rows, cols)
59 | print(f"path:{path}转换成功")
60 |
--------------------------------------------------------------------------------
/DataSetsTools/show_tif.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import numpy as np
3 |
4 | import matplotlib.pyplot as plt
5 | from osgeo import gdal
6 |
7 | """
8 | 读取tif并显示
9 | 其实用QGIS很方便的说
10 | """
11 | def test():
12 | x = r"C:\Users\zhuba\Desktop\fake.tif"
13 | #显示第一个波段图像
14 | show_image(x, 4) # 此函数前面文章有实现细节,不在重复
15 |
16 |
17 | def show_image(imgpath, band=999):
18 | img = gdal.Open(imgpath)
19 | bands = img.RasterCount
20 | img_width = img.RasterXSize
21 | img_height = img.RasterYSize
22 | imgmata = img.GetMetadataItem("PHOTOMETRIC", "DMD_CREATIONOPTIONLIST")
23 | print("bands:",bands)
24 | print("img_width:",img_width)
25 | print("img_height:",img_height)
26 | img_data = img.ReadAsArray()
27 | if len(img_data.shape) < 3:
28 | print("this image just has one band ")
29 | print("img_data:", img_data)
30 | plt.figure('landsat: img_data')
31 | plt.imshow(img_data)
32 | plt.show()
33 | return
34 | if band == 999:
35 | print("please enter a band number! example:show_image_band(img_x,3).")
36 | return
37 | if band >= bands:
38 | print("out range of bands, it should be < ", bands)
39 | return
40 | print("show image in band " + str(band))
41 | img1_band = img_data[band, 0:img_height, 0:img_width]
42 | plt.figure('landsat: img_peer_band')
43 | plt.imshow(img1_band)
44 | plt.show()
45 |
46 |
47 | if __name__ == "__main__":
48 | test()
--------------------------------------------------------------------------------
/ESTARFM.py:
--------------------------------------------------------------------------------
1 | import math
2 | import numpy as np
3 | from osgeo import gdal, gdalconst
4 | import os
5 | import datetime
6 | from tkinter import filedialog
7 | import tkinter as tk
8 | import yaml
9 | import idlwrap
10 | import statsmodels.api as sm
11 |
12 | import numpy as np
13 | import os
14 |
15 |
16 | def read_raster(infile):
17 | gdal.PushErrorHandler('CPLQuietErrorHandler')
18 | gdal.UseExceptions()
19 | fp = gdal.Open(infile)
20 | cols = fp.RasterXSize
21 | rows = fp.RasterYSize
22 | nb = fp.RasterCount
23 |
24 | data = np.zeros([nb, rows, cols])
25 | for i in range(0, nb):
26 | band = fp.GetRasterBand(i + 1)
27 | data[i, :, :] = band.ReadAsArray()
28 | band.GetScale()
29 | band.GetOffset()
30 | band.GetNoDataValue()
31 | return rows, cols, data
32 |
33 |
34 | def writeimage(bands, path, in_ds):
35 | suffix = os.path.splitext(in_ds)[-1]
36 | in_ds = gdal.Open(in_ds)
37 | if bands is None or bands.__len__() == 0:
38 | return
39 | else:
40 | band1 = bands[0]
41 | img_width = band1.shape[1]
42 | img_height = band1.shape[0]
43 | num_bands = bands.__len__()
44 |
45 | if num_bands == 1:
46 | bands = list(bands)
47 |
48 | if 'int8' in band1.dtype.name:
49 | datatype = gdal.GDT_Byte
50 | elif 'int16' in band1.dtype.name:
51 | datatype = gdal.GDT_UInt16
52 | else:
53 | datatype = gdal.GDT_Float32
54 |
55 | if suffix == '.tif':
56 | driver = gdal.GetDriverByName("GTiff")
57 | elif suffix == "" or suffix == ".dat":
58 | driver = gdal.GetDriverByName("ENVI")
59 |
60 | dataset = driver.Create(path, img_width, img_height, num_bands, datatype)
61 | if dataset is not None:
62 | for i in range(bands.__len__()):
63 | dataset.GetRasterBand(i + 1).WriteArray(bands[i])
64 | geoTransform = in_ds.GetGeoTransform()
65 | dataset.SetGeoTransform(geoTransform)
66 | proj = in_ds.GetProjection()
67 | dataset.SetProjection(proj)
68 |
69 | # ******************************************************************************************************
70 | # ESTARFM PROGRAM
71 | # Using two pairs of fine and coarse images
72 | # the program can be used for whole TM scene and VI index product
73 |
74 | # ******************************************************************************************************
75 | # *******************************Set parameters and read input data*************************************
76 |
77 | root = tk.Tk()
78 | root.withdraw()
79 |
80 | # please set the following parameters
81 | f = open(filedialog.askopenfilename(title=u"Open the parameter settings file:"))
82 | param = yaml.safe_load(f)
83 | w = param['w'] # set the half window size, if 25, the window size is 25*2+1=51 fine pixels
84 | num_class = param['num_class'] # set the estimated number of classes, please set a larger value if blending images with very few bands
85 | DN_min = param['DN_min'] # set the range of DN value of the image,If byte, 0 and 255
86 | DN_max = param['DN_max']
87 | background = param['background'] # set the value of background pixels. 0 means that pixels will be considered as background if one of its bands= 0
88 | patch_long = param['patch_long'] # set the size of each block,if process whole ETM scene, set 500-1000
89 |
90 | # set path of a folder to store temporary files
91 | temp_file = filedialog.askdirectory(title=u"Set the temporary folder")
92 |
93 | # open the fine image of the first pair
94 | path1 = filedialog.askopenfilename(title=u"open the fine image of the first pair:")
95 | suffix = os.path.splitext(path1)[-1]
96 | nl, ns, FileName1 = read_raster(path1)
97 | orig_ns = ns
98 | orig_nl = nl
99 | fp = gdal.Open(path1)
100 | nb = fp.RasterCount
101 |
102 | n_nl = math.ceil(orig_nl / patch_long)
103 | n_ns = math.ceil(orig_ns / patch_long)
104 |
105 | ind_patch = np.zeros((n_nl * n_ns, 4), dtype=np.int)
106 |
107 | for i_ns in range(0, n_ns):
108 | for i_nl in range(0, n_nl):
109 | ind_patch[n_ns * i_nl + i_ns, 0] = i_ns * patch_long
110 | ind_patch[n_ns * i_nl + i_ns, 1] = np.min([ns - 1, (i_ns + 1) * patch_long - 1])
111 | ind_patch[n_ns * i_nl + i_ns, 2] = i_nl * patch_long
112 | ind_patch[n_ns * i_nl + i_ns, 3] = np.min([nl - 1, (i_nl + 1) * patch_long - 1])
113 |
114 | tempoutname = temp_file + '\\temp_F1'
115 |
116 | for isub in range(0, n_nl * n_ns):
117 | col1 = ind_patch[isub, 0]
118 | col2 = ind_patch[isub, 1]
119 | row1 = ind_patch[isub, 2]
120 | row2 = ind_patch[isub, 3]
121 | data = FileName1[:, row1:row2 + 1, col1:col2 + 1]
122 | out_name = tempoutname + str(isub + 1) + suffix
123 | fp = path1
124 | writeimage(data, out_name, fp)
125 |
126 | # open the coarse image of the first pair
127 | path2 = filedialog.askopenfilename(title=u"open the coarse image of the first pair:")
128 | _, _, FileName2 = read_raster(path2)
129 |
130 | tempoutname = temp_file + '\\temp_C1'
131 | for isub in range(0, n_nl * n_ns):
132 | col1 = ind_patch[isub, 0]
133 | col2 = ind_patch[isub, 1]
134 | row1 = ind_patch[isub, 2]
135 | row2 = ind_patch[isub, 3]
136 | data = FileName2[:, row1:row2 + 1, col1:col2 + 1]
137 | out_name = tempoutname + str(isub + 1) + suffix
138 | fp = path1
139 | writeimage(data, out_name, fp)
140 |
141 | # open the fine image of the second pair
142 | path3 = filedialog.askopenfilename(title=u"open the fine image of the second pair:")
143 | _, _, FileName3 = read_raster(path3)
144 |
145 | tempoutname = temp_file + '\\temp_F2'
146 | for isub in range(0, n_nl * n_ns):
147 | col1 = ind_patch[isub, 0]
148 | col2 = ind_patch[isub, 1]
149 | row1 = ind_patch[isub, 2]
150 | row2 = ind_patch[isub, 3]
151 | data = FileName3[:, row1:row2 + 1, col1:col2 + 1]
152 | out_name = tempoutname + str(isub + 1) + suffix
153 | fp = path1
154 | writeimage(data, out_name, fp)
155 |
156 | # open the coarse image of the second pair
157 | path4 = filedialog.askopenfilename(title=u"open the coarse image of the second pair:")
158 | _, _, FileName4 = read_raster(path4)
159 |
160 | tempoutname = temp_file + '\\temp_C2'
161 | for isub in range(0, n_nl * n_ns):
162 | col1 = ind_patch[isub, 0]
163 | col2 = ind_patch[isub, 1]
164 | row1 = ind_patch[isub, 2]
165 | row2 = ind_patch[isub, 3]
166 | data = FileName4[:, row1:row2 + 1, col1:col2 + 1]
167 | out_name = tempoutname + str(isub + 1) + suffix
168 | fp = path1
169 | writeimage(data, out_name, fp)
170 |
171 | # open the coarse image of the prediction time
172 | path5 = filedialog.askopenfilename(title=u"open the coarse image of the prediction time:")
173 | _, _, FileName5 = read_raster(path5)
174 |
175 | tempoutname = temp_file + '\\temp_C0'
176 | for isub in range(0, n_nl * n_ns):
177 | col1 = ind_patch[isub, 0]
178 | col2 = ind_patch[isub, 1]
179 | row1 = ind_patch[isub, 2]
180 | row2 = ind_patch[isub, 3]
181 | data = FileName5[:, row1:row2 + 1, col1:col2 + 1]
182 | out_name = tempoutname + str(isub + 1) + suffix
183 | fp = path1
184 | writeimage(data, out_name, fp)
185 |
186 | # *******************************************************
187 | # process each clock
188 | # *******************************************************
189 |
190 | starttime = datetime.datetime.now() # the initial time of program running
191 | print('there are total', n_nl*n_ns, 'blocks')
192 |
193 | for isub in range(0, n_nl * n_ns):
194 |
195 | # open each block image
196 |
197 | FileName = temp_file + '\\temp_F1' + str(isub + 1) + suffix
198 | nl, ns, fine1 = read_raster(FileName)
199 |
200 | FileName = temp_file + '\\temp_C1' + str(isub + 1) + suffix
201 | _, _, coarse1 = read_raster(FileName)
202 |
203 | FileName = temp_file + '\\temp_F2' + str(isub + 1) + suffix
204 | _, _, fine2 = read_raster(FileName)
205 |
206 | FileName = temp_file + '\\temp_C2' + str(isub + 1) + suffix
207 | _, _, coarse2 = read_raster(FileName)
208 |
209 | FileName = temp_file + '\\temp_C0' + str(isub + 1) + suffix
210 | _, _, coarse0 = read_raster(FileName)
211 |
212 | fine0 = np.zeros((nb, nl, ns)).astype(float) # place the blended result
213 |
214 | # row index of images
215 | row_index = np.zeros((nl, ns)).astype(int)
216 | for i in range(0, nl):
217 | row_index[i, :] = i
218 |
219 | # column index of images
220 | col_index = np.zeros((nl, ns)).astype(int)
221 | for i in range(0, ns):
222 | col_index[:, i] = i
223 |
224 | # compute the uncertainty,0.2% of each band is uncertain
225 | uncertain = (DN_max*0.002) * np.sqrt(2)
226 |
227 | # compute the threshold of similar pixel seeking
228 | similar_th = np.zeros((2, nb)).astype(float)
229 | for iband in range(0, nb):
230 | similar_th[0, iband] = np.std(fine1[iband, :, :] * 2.0 / num_class)
231 | similar_th[1, iband] = np.std(fine2[iband, :, :] * 2.0 / num_class)
232 |
233 | # compute the distance of each pixel in the window with the target pixel (integrate window)
234 | D_temp1 = w - np.tile((idlwrap.indgen(w*2+1)), (int(w*2+1), 1))
235 | d1 = np.power(D_temp1, 2)
236 | D_temp2 = w - np.tile(idlwrap.indgen(1, w*2+1), (1, int(w*2+1)))
237 | d2 = np.power(D_temp2, 2)
238 | D_D_all = 1.0 + np.sqrt(d1 + d2) / float(w)
239 | D_D_all = D_D_all.flatten()
240 |
241 | # find interaction of valid pixels of all input images: exclude missing pixels and background
242 | valid_index = np.zeros((nl, ns)).astype(int)
243 | ind_valid = np.where((fine1[0, :, :] != background) & (fine2[0, :, :] != background) & (coarse1[0, :, :] != background) \
244 | & (coarse2[0, :, :] != background) & (coarse0[0, :, :] != background))
245 | num_valid = int(int(np.size(ind_valid)) / len(ind_valid))
246 | if num_valid > 0:
247 | valid_index[ind_valid] = 1 # mark good pixels in all images
248 |
249 | for j in range(0, nl): # retrieve each target pixel
250 | for i in range(0, ns):
251 |
252 | if valid_index[j, i] == 1: # do not process the background
253 |
254 | ai = int(np.max([0, i - w]))
255 | bi = int(np.min([ns - 1, i + w]))
256 | aj = int(np.max([0, j - w]))
257 | bj = int(np.min([nl - 1, j + w]))
258 |
259 | ind_wind_valid = np.where((valid_index[aj:bj+1, ai:bi+1]).ravel() == 1)
260 | position_cand = idlwrap.intarr((bi-ai+1)*(bj-aj+1)) + 1 # place the location of each similar pixel
261 | row_wind = row_index[aj:bj+1, ai:bi+1]
262 | col_wind = col_index[aj:bj + 1, ai:bi + 1]
263 |
264 | # searching for similar pixels
265 | for ipair in [0, 1]:
266 | for iband in range(0, nb):
267 | cand_band = idlwrap.intarr((bi-ai+1)*(bj-aj+1))
268 | if ipair == 0:
269 | S_S = np.abs(fine1[iband, aj:bj+1, ai:bi+1] - fine1[iband, j, i])
270 | elif ipair == 1:
271 | S_S = np.abs(fine2[iband, aj:bj + 1, ai:bi + 1] - fine2[iband, j, i])
272 | ind_cand = np.where(S_S.ravel() < similar_th[ipair, iband])
273 | cand_band[ind_cand] = 1
274 | position_cand = position_cand * cand_band
275 |
276 | cand_band = 0
277 | indcand = np.where((position_cand != 0) & ((valid_index[aj:bj+1, ai:bi+1]).ravel() == 1))
278 | number_cand = int(int(np.size(indcand)) / len(indcand))
279 |
280 | if number_cand > 5: # compute the correlation
281 | S_D_cand = np.zeros(number_cand).astype(float)
282 | x_cand = (col_wind.ravel())[indcand]
283 | y_cand = (row_wind.ravel())[indcand]
284 | finecand = np.zeros((nb*2, number_cand)).astype(float)
285 | coarsecand = np.zeros((nb*2, number_cand)).astype(float)
286 |
287 | for ib in range(0, nb):
288 | finecand[ib, :] = (fine1[ib, aj:bj+1, ai:bi+1]).ravel()[indcand]
289 | finecand[ib+nb, :] = (fine2[ib, aj:bj+1, ai:bi+1]).ravel()[indcand]
290 | coarsecand[ib, :] = (coarse1[ib, aj:bj+1, ai:bi+1]).ravel()[indcand]
291 | coarsecand[ib+nb, :] = (coarse2[ib, aj:bj+1, ai:bi+1]).ravel()[indcand]
292 |
293 | if nb == 1: # for images with one band, like NDVI
294 | S_D_cand = 1.0 - 0.5*(np.abs((finecand[0, :]-coarsecand[0, :]) / (finecand[0, :]+coarsecand[0, :])) +
295 | np.abs((finecand[1, :]-coarsecand[1, :]) / (finecand[1, :]+coarsecand[1, :])))
296 | else:
297 | # for images with multiple bands
298 | sdx = np.std(finecand, axis=0, ddof=1)
299 | sdy = np.std(coarsecand, axis=0, ddof=1)
300 | meanx = np.mean(finecand, axis=0)
301 | meany = np.mean(coarsecand, axis=0)
302 |
303 | x_meanx = np.zeros((nb*2, number_cand)).astype(float)
304 | y_meany = np.zeros((nb*2, number_cand)).astype(float)
305 | for ib in range(0, nb*2):
306 | x_meanx[ib, :] = finecand[ib, :] - meanx
307 | y_meany[ib, :] = coarsecand[ib, :] - meany
308 |
309 | S_D_cand = nb*2.0*np.mean(x_meanx*y_meany, axis=0) / (sdx*sdy) / (nb*2.0-1)
310 |
311 | ind_nan = np.where(S_D_cand != S_D_cand)
312 | num_nan = int(int(np.size(ind_nan)) / len(ind_nan))
313 | if num_nan > 0:
314 | S_D_cand[ind_nan] = 0.5 # correct the NaN value of correlation
315 |
316 | D_D_cand = np.zeros(number_cand).astype(float) # spatial distance
317 | if (bi-ai+1)*(bj-aj+1) < (w*2.0+1)*(w*2.0+1): # not an integrate window
318 | D_D_cand = 1.0 + np.sqrt((i-x_cand)**2+(j-y_cand)**2) / w
319 | else:
320 | D_D_cand[0:number_cand] = D_D_all[indcand] # integrate window
321 |
322 | C_D = (1.0-S_D_cand) * D_D_cand + 0.0000001 # combined distance
323 | weight = (1.0/C_D)/np.sum(1.0/C_D)
324 |
325 | for ib in range(0, nb): # compute V
326 | fine_cand = np.hstack(((fine1[ib, aj:bj+1, ai:bi+1]).ravel()[indcand], (fine2[ib, aj:bj+1, ai:bi+1]).ravel()[indcand]))
327 | coarse_cand = np.hstack(((coarse1[ib, aj:bj+1, ai:bi+1]).ravel()[indcand], (coarse2[ib, aj:bj+1, ai:bi+1]).ravel()[indcand]))
328 | coarse_change = np.abs(np.mean((coarse1[ib, aj:bj+1, ai:bi+1]).ravel()[indcand]) - np.mean((coarse2[ib, aj:bj+1, ai:bi+1]).ravel()[indcand]))
329 | if coarse_change >= DN_max*0.02: # to ensure changes in coarse image large enough to obtain the conversion coefficient
330 |
331 | X = coarse_cand.reshape(-1, 1)
332 | Y = fine_cand.reshape(-1, 1)
333 | XX = sm.add_constant(X)
334 | model = sm.OLS(Y, XX).fit()
335 | regress_result = model.params
336 | sig = model.f_pvalue
337 |
338 | # correct the result with no significancy or inconsistent change or too large value
339 | if sig <= 0.05 and 0 < regress_result[1] <= 5:
340 | V_cand = regress_result[1]
341 | else:
342 | V_cand = 1.0
343 |
344 | else:
345 | V_cand = 1.0
346 |
347 | # compute the temporal weight
348 | difc_pair1 = np.abs(np.mean((coarse0[ib, aj:bj+1, ai:bi+1]).ravel()[ind_wind_valid])-np.mean((coarse1[ib, aj:bj+1, ai:bi+1]).ravel()[ind_wind_valid]))+0.01**5
349 | difc_pair2 = np.abs(np.mean((coarse0[ib, aj:bj+1, ai:bi+1]).ravel()[ind_wind_valid])-np.mean((coarse2[ib, aj:bj+1, ai:bi+1]).ravel()[ind_wind_valid]))+0.01**5
350 | T_weight1 = (1.0/difc_pair1) / (1.0/difc_pair1+1.0/difc_pair2)
351 | T_weight2 = (1.0/difc_pair2) / (1.0/difc_pair1+1.0/difc_pair2)
352 |
353 | # predict from pair1
354 | coase0_cand = (coarse0[ib, aj:bj+1, ai:bi+1]).ravel()[indcand]
355 | coase1_cand = (coarse1[ib, aj:bj+1, ai:bi+1]).ravel()[indcand]
356 | fine01 = fine1[ib, j, i] + np.sum(weight * V_cand * (coase0_cand-coase1_cand))
357 | # predict from pair2
358 | coase2_cand = (coarse2[ib, aj:bj+1, ai:bi+1]).ravel()[indcand]
359 | fine02 = fine2[ib, j, i] + np.sum(weight * V_cand * (coase0_cand-coase2_cand))
360 | # the final prediction
361 | fine0[ib, j, i] = T_weight1 * fine01 + T_weight2 * fine02
362 | # revise the abnormal prediction
363 | if fine0[ib, j, i] <= DN_min or fine0[ib, j, i] >= DN_max:
364 | fine01 = np.sum(weight*(fine1[ib, aj:bj+1, ai:bi+1]).ravel()[indcand])
365 | fine02 = np.sum(weight*(fine2[ib, aj:bj+1, ai:bi+1]).ravel()[indcand])
366 | fine0[ib, j, i] = T_weight1 * fine01 + T_weight2 * fine02
367 |
368 | else: # for the case of no enough similar pixel selected
369 |
370 | for ib in range(0, nb):
371 | # compute the temporal weight
372 | difc_pair1 = np.mean((coarse0[ib, aj:bj+1, ai:bi+1]).ravel()[ind_wind_valid])-np.mean((coarse1[ib, aj:bj+1, ai:bi+1]).ravel()[ind_wind_valid])+0.01**5
373 | difc_pair1_a = np.abs(difc_pair1)
374 | difc_pair2 = np.mean((coarse0[ib, aj:bj+1, ai:bi+1]).ravel()[ind_wind_valid])-np.mean((coarse2[ib, aj:bj+1, ai:bi+1]).ravel()[ind_wind_valid])+0.01**5
375 | difc_pair2_a = np.abs(difc_pair2)
376 | T_weight1 = (1.0/difc_pair1_a) / (1.0/difc_pair1_a+1.0/difc_pair2_a)
377 | T_weight2 = (1.0/difc_pair2_a) / (1.0/difc_pair1_a+1.0/difc_pair2_a)
378 | fine0[ib, j, i] = T_weight1 * (fine1[ib, j, i] + difc_pair1) + T_weight2 * (fine2[ib, j, i] + difc_pair2)
379 |
380 | print('finish ', str(isub + 1), 'block')
381 | tempoutname1 = temp_file + '\\temp_blended'
382 | Out_Name = tempoutname1 + str(isub + 1) + suffix
383 | fp = path1
384 | writeimage(fine0, Out_Name, fp)
385 |
386 | # # ***************************************************************
387 | # # mosaic all the blended patch
388 |
389 | datalist = []
390 | minx_list = []
391 | maxX_list = []
392 | minY_list = []
393 | maxY_list = []
394 |
395 | for isub in range(0, n_ns * n_nl):
396 | out_name = temp_file + '\\temp_blended' + str(isub+1) + suffix
397 | datalist.append(out_name)
398 |
399 | col1 = ind_patch[isub, 0]
400 | col2 = ind_patch[isub, 1]
401 | row1 = ind_patch[isub, 2]
402 | row2 = ind_patch[isub, 3]
403 |
404 | minx_list.append(col1)
405 | maxX_list.append(col2)
406 | minY_list.append(row1)
407 | maxY_list.append(row2)
408 |
409 | minX = min(minx_list)
410 | maxX = max(maxX_list)
411 | minY = min(minY_list)
412 | maxY = max(maxY_list)
413 |
414 | xOffset_list = []
415 | yOffset_list = []
416 | i = 0
417 | for data in datalist:
418 | xOffset = int(minx_list[i] - minX)
419 | yOffset = int(minY_list[i] - minY)
420 | xOffset_list.append(xOffset)
421 | yOffset_list.append(yOffset)
422 | i += 1
423 |
424 | in_ds = gdal.Open(path1)
425 | path = os.path.splitext(path5)[0] + "_ESTARFM" + suffix
426 | if suffix == '.tif':
427 | driver = gdal.GetDriverByName("GTiff")
428 | elif suffix == "" or suffix == ".dat":
429 | driver = gdal.GetDriverByName("ENVI")
430 | dataset = driver.Create(path, orig_ns, orig_nl, nb, gdal.GDT_Float32)
431 |
432 | i = 0
433 | for data in datalist:
434 | nl, ns, datavalue = read_raster(data)
435 | for j in range(0, nb):
436 | dd = datavalue[j, :, :]
437 | dataset.GetRasterBand(j + 1).WriteArray(dd, xOffset_list[i], yOffset_list[i])
438 | i += 1
439 |
440 | geoTransform = in_ds.GetGeoTransform()
441 | dataset.SetGeoTransform(geoTransform)
442 | proj = in_ds.GetProjection()
443 | dataset.SetProjection(proj)
444 |
--------------------------------------------------------------------------------
/FSDAF/FSDAF.py:
--------------------------------------------------------------------------------
1 | from utils import read_raster, writeimage, read_raster_new
2 | import math
3 | import numpy as np
4 | from osgeo import gdal
5 | import os
6 | import datetime
7 | from tkinter import filedialog
8 | import tkinter as tk
9 | import yaml
10 | import idlwrap
11 | from scipy.interpolate import Rbf
12 | import statsmodels.api as sm
13 | from isodata import myISODATA
14 | from tqdm import tqdm
15 |
16 | def value_locate(refx, x):
17 | refx = np.array(refx)
18 | x = np.atleast_1d(x)
19 | loc = np.zeros(len(x), dtype='int')
20 | for i in range(len(x)):
21 | ix = x[i]
22 | ind = ((refx - ix) <= 0).nonzero()[0]
23 | if len(ind) == 0:
24 | loc[i] = -1
25 | else:
26 | loc[i] = ind[-1]
27 | return loc
28 |
29 |
30 | # ******************************************************************************************************
31 | # A new spatiotemporal data fusion model
32 | # Using one pairs of fine and coarse images
33 | # The program can be used for whole TM scene
34 | # Note: this version requires users to input pre-classified fine image at t1.
35 | # This version is appropriate for areas with complex land cover types and training samples
36 | # so users can first classify the fine image by supervised classifers like SVM.
37 |
38 | # ******************************************************************************************************
39 | # *******************************Set parameters and read input data*************************************
40 |
41 | root = tk.Tk()
42 | root.withdraw()
43 |
44 | # please set the following parameters
45 | f = open(filedialog.askopenfilename(title=u"Open the parameter settings file:"))
46 | param = yaml.safe_load(f)
47 | w = param['w'] # set the half window size, if 25, the window size is 25*2+1=51
48 | num_similar_pixel = param['num_similar_pixel'] # set number of similar pixels
49 | min_class = param['min_class'] # set the estimated minimum and maximum number of classes
50 | max_class = param['max_class']
51 | num_pure = param['num_pure'] # number of most purest coarse pixels in each class selected fro change calculation
52 | DN_min = param['DN_min'] # set the range of DN value of the image,If byte, 0 and 255
53 | DN_max = param['DN_max']
54 | scale_factor = param['scale_factor'] # set the scale factor, it is integer=coarse resolution/fine resolution, e.g., 480/30=16
55 | block_size = param['block_size'] # set the size of block, e.g., 20 means 20*20 coarse pixels, if process whole ETM scene, set 30~50
56 | background = param['background'] # set the value of background pixels. 0 means that pixels will be considered as background if one of its bands= 0
57 | background_band = param['background_band'] # which band with value = background indicating background pixels. Sometimes, background pixels have different values in different bands
58 | # parameters for isodata classification
59 | I = param['I'] # max number of iterations
60 | maxStdv = param['maxStdv'] # threshold value for standard deviation (for split)
61 | minDis = param['minDis'] # threshold value for pairwise distances (for merge)
62 | minS = param['minS'] # threshold value for min number in each cluster
63 | M = param['M'] # threshold change in the clusters between each iter
64 |
65 | # set path of a folder to store temporary files
66 | temp_file = filedialog.askdirectory(title=u"Set the temporary folder")
67 |
68 | # open the fine image of the first pair
69 | path1 = filedialog.askopenfilename(title=u"open the fine image of the first pair:")
70 | suffix = os.path.splitext(path1)[-1]
71 | nl, ns, fine1_whole = read_raster(path1)
72 | orig_ns = ns
73 | orig_nl = nl
74 | fp = gdal.Open(path1)
75 | nb = fp.RasterCount
76 |
77 | patch_long = block_size*scale_factor
78 |
79 | # divide the whole scene into blocks
80 | n_nl = math.ceil(orig_nl / patch_long)
81 | n_ns = math.ceil(orig_ns / patch_long)
82 |
83 | ind_patch1 = np.zeros((n_nl * n_ns, 4), dtype=np.int32)
84 | ind_patch = np.zeros((n_nl * n_ns, 4), dtype=np.int32)
85 | location = np.zeros((n_nl * n_ns, 4), dtype=np.int32)
86 |
87 | for i_ns in range(0, n_ns):
88 | for i_nl in range(0, n_nl):
89 | ind_patch1[n_ns * i_nl + i_ns, 0] = i_ns * patch_long
90 | ind_patch[n_ns * i_nl + i_ns, 0] = np.max([0, ind_patch1[n_ns * i_nl + i_ns, 0] - scale_factor])
91 | location[n_ns * i_nl + i_ns, 0] = ind_patch1[n_ns * i_nl + i_ns, 0] - ind_patch[n_ns * i_nl + i_ns, 0]
92 |
93 | ind_patch1[n_ns * i_nl + i_ns, 1] = np.min([ns - 1, (i_ns + 1) * patch_long - 1])
94 | ind_patch[n_ns * i_nl + i_ns, 1] = np.min([ns - 1, ind_patch1[n_ns * i_nl + i_ns, 1] + scale_factor])
95 | location[n_ns * i_nl + i_ns, 1] = ind_patch1[n_ns * i_nl + i_ns, 1] - ind_patch1[n_ns * i_nl + i_ns, 0] + location[n_ns * i_nl + i_ns, 0]
96 |
97 | ind_patch1[n_ns * i_nl + i_ns, 2] = i_nl * patch_long
98 | ind_patch[n_ns * i_nl + i_ns, 2] = np.max([0, ind_patch1[n_ns * i_nl + i_ns, 2] - scale_factor])
99 | location[n_ns * i_nl + i_ns, 2] = ind_patch1[n_ns * i_nl + i_ns, 2] - ind_patch[n_ns * i_nl + i_ns, 2]
100 |
101 | ind_patch1[n_ns * i_nl + i_ns, 3] = np.min([nl - 1, (i_nl + 1) * patch_long - 1])
102 | ind_patch[n_ns * i_nl + i_ns, 3] = np.min([nl - 1, ind_patch1[n_ns * i_nl + i_ns, 3] + scale_factor])
103 | location[n_ns * i_nl + i_ns, 3] = ind_patch1[n_ns * i_nl + i_ns, 3] - ind_patch1[n_ns * i_nl + i_ns, 2] + location[n_ns * i_nl + i_ns, 2]
104 |
105 | tempoutname = temp_file + '\\temp_F1'
106 |
107 | for isub in range(0, n_nl * n_ns):
108 | col1 = ind_patch[isub, 0]
109 | col2 = ind_patch[isub, 1]
110 | row1 = ind_patch[isub, 2]
111 | row2 = ind_patch[isub, 3]
112 | data = fine1_whole[:, row1:row2 + 1, col1:col2 + 1]
113 | out_name = tempoutname + str(isub + 1) + suffix
114 | fp = path1
115 | writeimage(data, out_name, fp)
116 |
117 | # open the coarse image of the first pair
118 | path2 = filedialog.askopenfilename(title=u"open the coarse image of the first pair:")
119 | _, _, FileName2 = read_raster(path2)
120 |
121 | tempoutname = temp_file + '\\temp_C1'
122 | for isub in range(0, n_nl * n_ns):
123 | col1 = ind_patch[isub, 0]
124 | col2 = ind_patch[isub, 1]
125 | row1 = ind_patch[isub, 2]
126 | row2 = ind_patch[isub, 3]
127 | data = FileName2[:, row1:row2 + 1, col1:col2 + 1]
128 | out_name = tempoutname + str(isub + 1) + suffix
129 | fp = path1
130 | writeimage(data, out_name, fp)
131 |
132 | # open the coarse image of the prediction time
133 | path3 = filedialog.askopenfilename(title=u"open the coarse image of the prediction time:")
134 | _, _, FileName3 = read_raster(path3)
135 |
136 | tempoutname = temp_file + '\\temp_C0'
137 | for isub in tqdm(range(0, n_nl * n_ns), desc="prepare for prediction"):
138 | col1 = ind_patch[isub, 0]
139 | col2 = ind_patch[isub, 1]
140 | row1 = ind_patch[isub, 2]
141 | row2 = ind_patch[isub, 3]
142 | data = FileName3[:, row1:row2 + 1, col1:col2 + 1]
143 | out_name = tempoutname + str(isub + 1) + suffix
144 | fp = path1
145 | writeimage(data, out_name, fp)
146 |
147 | # open the class image
148 | # replace background pixels by mean of non-background to avoid its effects on classification
149 | background_whole = np.zeros((nl, ns)).astype(bytes)
150 | ind_back = np.where(fine1_whole[background_band-1, :, :] == background)
151 | num_back = int(int(np.size(ind_back)) / len(ind_back))
152 | if num_back > 0:
153 | background_whole[ind_back] = 1
154 | for iband in range(0, nb):
155 | temp = fine1_whole[iband, :, :]
156 | temp[ind_back] = np.mean(temp[np.where(background_whole == 0)])
157 | fine1_whole[iband, :, :] = temp
158 |
159 | tempoutname11 = temp_file + "\\fine1_nobackground" + suffix
160 | fp = path1
161 | writeimage(fine1_whole, tempoutname11, fp)
162 |
163 | ind_back = 0 # clear this variable
164 | temp = 0 # clear this variable
165 | fine1_whole = 0 # clear this variable
166 | background_whole = 0 # clear this variable
167 |
168 | # ******************************************************************************************************
169 | # *******************************
170 |
171 | # step1: get spectral classes from fine resolution image at t1 by isodata
172 | print("get spectral classes from fine resolution image at t1 by isodata")
173 | _, _, imagei_new = read_raster_new(tempoutname11)
174 | imagei_new = np.maximum(imagei_new, 0)
175 |
176 | # run Isodata
177 | params = {"K": min_class, "I": I, "P": 2, "maxStdv": maxStdv, "minDis": minDis,
178 | "minS": minS, "M": M}
179 | # params = {"K": min_class, "I": I, "P": 2, "THETA_S": maxStdv, "THETA_C": minDis,
180 | # "THETA_M": minS, "THETA_O": M}
181 | # labels = isodata_classification(imagei_new, parameters=params)
182 | [labels, centers] = myISODATA(imagei_new, parameters=params) # MOTE! the classification result is slightly different from ENVI ISODATA result
183 | labels0 = labels + 1
184 |
185 | tempoutname = temp_file + '\\class'
186 | for isub in tqdm(range(0, n_nl * n_ns), desc="process image blocks"):
187 | col1 = ind_patch[isub, 0]
188 | col2 = ind_patch[isub, 1]
189 | row1 = ind_patch[isub, 2]
190 | row2 = ind_patch[isub, 3]
191 | data = [labels0[row1:row2 + 1, col1:col2 + 1]]
192 | out_name = tempoutname + str(isub + 1) + suffix
193 | fp = path1
194 | writeimage(data, out_name, fp)
195 |
196 | # *******************************************************
197 | # process each clock
198 | # *******************************************************
199 |
200 | starttime = datetime.datetime.now() # the initial time of program running
201 |
202 | print('\nthere are total', n_nl*n_ns, 'blocks')
203 | print("process each clock")
204 | for isub in tqdm(range(0, n_nl * n_ns), desc="change prediction and TPS prediction"):
205 |
206 | # open each block image
207 |
208 | FileName = temp_file + '\\temp_F1' + str(isub + 1) + suffix
209 | nl, ns, fine1 = read_raster(FileName)
210 |
211 | FileName = temp_file + '\\temp_C1' + str(isub + 1) + suffix
212 | _, _, coarse1 = read_raster(FileName)
213 |
214 | FileName = temp_file + '\\temp_C0' + str(isub + 1) + suffix
215 | _, _, coarse2 = read_raster(FileName)
216 |
217 | FileName = temp_file + '\\class' + str(isub + 1) + suffix
218 | _, _, L1_class0 = read_raster(FileName)
219 |
220 | num_class = int(np.max(L1_class0))
221 | # recode the classification map if the subset does not have all classes
222 | i_new_c = 0
223 | L1_class = np.zeros((nl, ns)).astype(int)
224 | for iclass in range(0, num_class):
225 |
226 | ind_ic = np.logical_and(L1_class0[0] == iclass + 1, fine1[background_band - 1, :, :] != background)
227 | num_ic = np.sum(ind_ic)
228 |
229 | if num_ic > 0:
230 | L1_class[ind_ic] = i_new_c + 1
231 | i_new_c = i_new_c + 1
232 |
233 | num_class = np.max(L1_class)
234 |
235 | if num_class > 0: # do not process if the whole subset is background
236 |
237 | # correct extreme noise in fine1 because extreme values will affect the allowed data range
238 | for ib in range(0, nb):
239 | fine1_band = fine1[ib, :, :]
240 | fine1_band_1 = fine1_band.flatten()
241 | sortIndex = np.argsort(fine1_band_1, kind='mergesort')
242 | sortIndices = (idlwrap.findgen(float(ns) * nl + 1)) / (float(ns) * nl)
243 | Percentiles = [0.0001, 0.9999]
244 | dataIndices = value_locate(sortIndices, Percentiles)
245 | data_1_4 = fine1_band_1[sortIndex[dataIndices]]
246 | # correct too small values
247 | ind_small = np.logical_or(fine1[ib, :, :] <= data_1_4[0], fine1[ib, :, :] < DN_min)
248 | temp = fine1[ib, :, :]
249 | temp[ind_small] = np.min((fine1[ib, :, :])[np.logical_and(fine1[ib, :, :] > data_1_4[0], fine1[ib, :, :] >= DN_min)])
250 | fine1[ib, :, :] = temp
251 | # correct too large values
252 | ind_large = np.logical_or(fine1[ib, :, :] >= data_1_4[1], fine1[ib, :, :] > DN_max)
253 | temp = fine1[ib, :, :]
254 | temp[ind_large] = np.max((fine1[ib, :, :])[np.logical_and(fine1[ib, :, :] < data_1_4[1], fine1[ib, :, :] <= DN_max)])
255 | fine1[ib, :, :] = temp
256 |
257 | # get index image between coarse and fine resolutions
258 | ii = 0
259 | ns_c = int(np.floor(ns / scale_factor))
260 | nl_c = int(np.floor(nl / scale_factor))
261 | index_f = np.zeros((nl, ns)).astype(int)
262 | index_c = np.zeros((nl_c, ns_c)).astype(int)
263 | for i in range(0, ns_c):
264 | for j in range(0, nl_c):
265 | index_f[j * scale_factor:(j + 1) * scale_factor, i * scale_factor:(i + 1) * scale_factor] = ii
266 | index_c[j, i] = ii
267 | ii = ii + 1.0
268 |
269 | # col and row index
270 | row_ind = np.zeros((nl, ns)).astype(int)
271 | col_ind = np.zeros((nl, ns)).astype(int)
272 | for i in range(0, ns):
273 | col_ind[:, i] = i
274 |
275 | for i in range(0, nl):
276 | row_ind[i, :] = i
277 |
278 | # resample coarse image to coarse resolution
279 | fine_c1 = np.zeros((nb, nl_c, ns_c)).astype(float)
280 | coarse_c1 = np.zeros((nb, nl_c, ns_c)).astype(float)
281 | coarse_c2 = np.zeros((nb, nl_c, ns_c)).astype(float)
282 | row_c = np.zeros((nl_c, ns_c)).astype(float)
283 | col_c = np.zeros((nl_c, ns_c)).astype(float)
284 | for ic in range(0, ns_c):
285 | for jc in range(0, nl_c):
286 | ind_c = np.where(index_f == index_c[jc, ic])
287 | row_c[jc, ic] = np.mean(row_ind[ind_c])
288 | col_c[jc, ic] = np.mean(col_ind[ind_c])
289 | for ib in range(0, nb):
290 | fine_c1[ib, jc, ic] = np.mean((fine1[ib, :, :])[ind_c])
291 | coarse_c1[ib, jc, ic] = np.mean((coarse1[ib, :, :])[ind_c])
292 | coarse_c2[ib, jc, ic] = np.mean((coarse2[ib, :, :])[ind_c])
293 |
294 | # step 2: get fracture of each class within each coarse pixel at t1
295 | Fraction1 = np.zeros((num_class, nl_c, ns_c)).astype(float)
296 | for ic in range(0, ns_c):
297 | for jc in range(0, nl_c):
298 | ind_c = np.where(index_f == index_c[jc, ic])
299 | num_c = int(int(np.size(ind_c)) / len(ind_c))
300 | if num_c == 0:
301 | continue
302 | L1_class_c = L1_class[ind_c]
303 | for iclass in range(0, num_class):
304 | ind_ic = np.where(L1_class_c == iclass+1)
305 | num_ic = int(int(np.size(ind_ic)) / len(ind_ic))
306 | Fraction1[iclass, jc, ic] = num_ic / num_c
307 |
308 | if np.sum(Fraction1[:, jc, ic]) <= 0.999: # avoild pixels have background fine pixels
309 | Fraction1[:, jc, ic] = 0
310 |
311 | # get the heterogenity of each fine pixel
312 | het_index = np.zeros((nl, ns)).astype(float)
313 | scale_d = w
314 |
315 | for i in range(0, ns):
316 | for j in range(0, nl):
317 | # the window location
318 | ai = int(np.max([0, i - scale_d]))
319 | bi = int(np.min([ns - 1, i + scale_d]))
320 | aj = int(np.max([0, j - scale_d]))
321 | bj = int(np.min([nl - 1, j + scale_d]))
322 | class_t = L1_class[j, i]
323 | # select same-class pixels
324 | ind_same_class = np.where(L1_class[aj:bj+1, ai:bi+1] == class_t)
325 | num_sameclass = int(int(np.size(ind_same_class)) / len(ind_same_class))
326 | het_index[j, i] = float(num_sameclass) / ((bi-ai+1.0) * (bj-aj+1.0))
327 |
328 | # step 3: METHOD2:estimate average spectral change of each class using pixels without land cover change
329 | c_rate = np.zeros((nb, num_class)).astype(float)
330 |
331 | # allowed change value for each band
332 | min_allow = np.zeros(nb).astype(float)
333 | max_allow = np.zeros(nb).astype(float)
334 | for ib in range(0, nb):
335 | min_allow[ib] = np.min(coarse_c2[ib, :, :] - coarse_c1[ib, :, :]) - np.std(coarse_c2[ib, :, :] - coarse_c1[ib, :, :])
336 | max_allow[ib] = np.max(coarse_c2[ib, :, :] - coarse_c1[ib, :, :]) + np.std(coarse_c2[ib, :, :] - coarse_c1[ib, :, :])
337 |
338 | for ib in range(0, nb):
339 | x_matrix = np.zeros((num_pure * num_class, num_class)).astype(float)
340 | y_matrix = np.zeros((num_pure * num_class, 1)).astype(float)
341 | ii = 0
342 | for ic in range(0, num_class):
343 | order_s = np.argsort((Fraction1[ic, :, :]).flatten(), kind='mergesort')
344 | order = order_s[::-1]
345 | ind_f = np.where(Fraction1[ic, :, :] > 0.01) # make sure all selected modis pixel contain class i
346 | num_f = int(int(np.size(ind_f)) / len(ind_f))
347 | if num_f == 0:
348 | continue
349 | num_pure1 = np.min([num_f, num_pure])
350 | change_c = (coarse_c2[ib, :, :].flatten())[order[0:num_pure1]] - (coarse_c1[ib, :, :].flatten())[order[0:num_pure1]]
351 |
352 | # only use 0.1-0.9 samples to exclude the land cover change pixels
353 | sortIndex = np.argsort(change_c, kind='mergesort')
354 | sortIndices = (idlwrap.findgen(float(num_pure1+1))) / num_pure1
355 | Percentiles = [0.1, 0.9]
356 | dataIndices = value_locate(sortIndices, Percentiles)
357 | num_indices = len(sortIndex[dataIndices])
358 | data_1_4 = change_c[sortIndex[dataIndices]]
359 | ind_nonchange = np.logical_and(change_c >= data_1_4[0], change_c <= data_1_4[1])
360 | num_nonc = np.sum(ind_nonchange)
361 | if num_nonc > 0:
362 | y_matrix[ii:ii+num_nonc, 0] = change_c[ind_nonchange]
363 | for icc in range(0, num_class):
364 | f_c = (Fraction1[icc, :, :].flatten())[order[0:num_pure1]]
365 | x_matrix[ii:ii+num_nonc, icc] = f_c[ind_nonchange]
366 | ii = ii + num_nonc
367 | x_matrix = x_matrix[0:ii, :]
368 | y_matrix = y_matrix[0:ii, 0]
369 |
370 | model = sm.OLS(y_matrix, x_matrix).fit()
371 | opt = model.params
372 | c_rate[ib, :] = opt
373 |
374 | # step4: predict L2 assuming no land cover change
375 | L2_1 = fine1.copy()
376 | for ic in range(1, num_class+1):
377 | ind_L1_class = np.where(L1_class == ic)
378 | for ib in range(0, nb):
379 | temp = L2_1[ib, :, :]
380 | temp[ind_L1_class] = (fine1[ib, :, :])[ind_L1_class] + c_rate[ib, ic-1]
381 |
382 | # resample L2_1 image to coarse resolution
383 | coarse_c2_p = np.zeros((nb, nl_c, ns_c)).astype(float)
384 | for ic in range(0, ns_c):
385 | for jc in range(0, nl_c):
386 | ind_c = np.where(index_f == index_c[jc, ic])
387 | for ib in range(0, nb):
388 | coarse_c2_p[ib, jc, ic] = np.mean((L2_1[ib, :, :])[ind_c])
389 |
390 | # allowed minmum value for each band at t2
391 | min_allow = np.zeros(nb).astype(float)
392 | max_allow = np.zeros(nb).astype(float)
393 | for ib in range(0, nb):
394 | min_allow0 = np.min([np.min(coarse2[ib, :, :]), np.min(L2_1[ib, :, :])])
395 | min_allow[ib] = np.max([min_allow0, DN_min])
396 | max_allow0 = np.max([np.max(coarse2[ib, :, :]), np.max(L2_1[ib, :, :])])
397 | max_allow[ib] = np.min([max_allow0, DN_max])
398 |
399 | # step5: predict L2 using TPS
400 | L2_tps = np.zeros((nb, nl, ns)).astype(float)
401 | for ib in range(0, nb):
402 | rbf = Rbf(row_c.ravel(), col_c.ravel(), (coarse_c2[ib, :, :]).ravel(), function='multiquadric')
403 | tps = rbf(row_ind.ravel(), col_ind.ravel()).reshape([nl, ns])
404 | L2_tps[ib, :, :] = tps
405 |
406 | # print('finish TPS prediction')
407 |
408 | # step 6: redistribute residual
409 | # change residual
410 | predict_change_c = coarse_c2_p - fine_c1 # predict change
411 | real_change_c = coarse_c2 - coarse_c1 # real change
412 | change_R = real_change_c - predict_change_c
413 |
414 | # redistribution residual
415 | change_21_c = np.zeros((nb, nl_c, ns_c)).astype(float)
416 | change_21 = np.zeros((nb, nl, ns)).astype(float)
417 |
418 | for ic in range(0, ns_c):
419 | for jc in range(0, nl_c):
420 |
421 | ind_c = np.where(index_f == index_c[jc, ic])
422 | num_ii = int(int(np.size(ind_c)) / len(ind_c))
423 |
424 | for ib in range(0, nb):
425 | diff_change = change_R[ib, jc, ic]
426 | w_change_tps = (L2_tps[ib, :, :])[ind_c] - (L2_1[ib, :, :])[ind_c]
427 | if diff_change <= 0:
428 | ind_noc = np.where(w_change_tps > 0)
429 | num_noc = int(int(np.size(ind_noc)) / len(ind_noc))
430 | if num_noc > 0:
431 | w_change_tps[ind_noc] = 0
432 | else:
433 | ind_noc = np.where(w_change_tps < 0)
434 | num_noc = int(int(np.size(ind_noc)) / len(ind_noc))
435 | if num_noc > 0:
436 | w_change_tps[ind_noc] = 0
437 |
438 | w_change_tps = np.abs(w_change_tps)
439 | w_unform = np.zeros(num_ii).astype(float) # evenly distributing residuals to sub-pixels
440 | w_unform[:] = np.abs(diff_change)
441 |
442 | w_change = w_change_tps * het_index[ind_c] + w_unform*(1.0-het_index[ind_c]) + 0.000001 # combine these two weights
443 | w_change = w_change / (np.mean(w_change)) # normalize weight
444 |
445 | # avoid extreme weights
446 | ind_extrem = np.where(w_change > 10)
447 | num_extrem = int(int(np.size(ind_extrem)) / len(ind_extrem))
448 | if num_extrem > 0:
449 | w_change[ind_extrem] = np.mean(w_change)
450 | w_change = w_change / (np.mean(w_change))
451 |
452 | # distribute residuals according to WEIGHT
453 | temp = change_21[ib, :, :]
454 | temp[ind_c] = w_change * diff_change
455 | change_21[ib, :, :] = temp
456 |
457 | # second prediction: L1+change
458 | fine2_2 = L2_1 + change_21
459 | # correct abnormal detected change
460 | for ib in range(0, nb):
461 | temp = fine2_2[ib, :, :]
462 | ind_min = np.where(temp < min_allow[ib])
463 | num_min = int(int(np.size(ind_min)) / len(ind_min))
464 | if num_min > 0:
465 | temp[ind_min] = min_allow[ib]
466 | ind_max = np.where(temp > max_allow[ib])
467 | num_max = int(int(np.size(ind_max)) / len(ind_max))
468 | if num_max > 0:
469 | temp[ind_max] = max_allow[ib]
470 | fine2_2[ib, :, :] = temp
471 |
472 | change_21 = fine2_2 - fine1
473 |
474 | else:
475 | change_21 = fine1 - fine1
476 |
477 | change_21 = change_21[:, location[isub, 2]:location[isub, 3] + 1, location[isub, 0]:location[isub, 1] + 1]
478 |
479 | # print('finish change prediction step ', isub+1, 'block')
480 | tempoutname1 = temp_file + '\\temp_change'
481 | Out_Name = tempoutname1 + str(isub + 1) + suffix
482 | fp = path1
483 | writeimage(change_21, Out_Name, fp)
484 |
485 | # **************************mosaic all the change patch********************************
486 | print("mosaic all the change patch")
487 | datalist = []
488 | minx_list = []
489 | maxX_list = []
490 | minY_list = []
491 | maxY_list = []
492 |
493 | for isub in range(0, n_ns * n_nl):
494 | out_name = temp_file + '/temp_change' + str(isub + 1) + suffix
495 | datalist.append(out_name)
496 |
497 | col1 = ind_patch1[isub, 0]
498 | col2 = ind_patch1[isub, 1]
499 | row1 = ind_patch1[isub, 2]
500 | row2 = ind_patch1[isub, 3]
501 |
502 | minx_list.append(col1)
503 | maxX_list.append(col2)
504 | minY_list.append(row1)
505 | maxY_list.append(row2)
506 |
507 | minX = min(minx_list)
508 | maxX = max(maxX_list)
509 | minY = min(minY_list)
510 | maxY = max(maxY_list)
511 |
512 | xOffset_list = []
513 | yOffset_list = []
514 | i = 0
515 | for data in datalist:
516 | xOffset = int(minx_list[i] - minX)
517 | yOffset = int(minY_list[i] - minY)
518 | xOffset_list.append(xOffset)
519 | yOffset_list.append(yOffset)
520 | i += 1
521 |
522 | in_ds = gdal.Open(path1)
523 | path = temp_file + "/temp_change" + suffix
524 | if suffix == '.tif':
525 | driver = gdal.GetDriverByName("GTiff")
526 | elif suffix == "":
527 | driver = gdal.GetDriverByName("ENVI")
528 | dataset = driver.Create(path, orig_ns, orig_nl, nb, gdal.GDT_Float32)
529 |
530 | i = 0
531 | for data in datalist:
532 | nl, ns, datavalue = read_raster(data)
533 | for j in range(0, nb):
534 | dataset.GetRasterBand(j + 1).WriteArray(datavalue[j], xOffset_list[i], yOffset_list[i])
535 | i += 1
536 |
537 | geoTransform = in_ds.GetGeoTransform()
538 | dataset.SetGeoTransform(geoTransform)
539 | proj = in_ds.GetProjection()
540 | dataset.SetProjection(proj)
541 |
542 | del dataset
543 |
544 |
545 | # *******************************step 5: final prediction*********************************
546 | print("final prediction")
547 | FileName6 = temp_file + "\\temp_change" + suffix
548 | _, _, change = read_raster(FileName6)
549 |
550 | tempoutname = temp_file + '\\temp_change'
551 | for isub in range(0, n_nl * n_ns):
552 | col1 = ind_patch[isub, 0]
553 | col2 = ind_patch[isub, 1]
554 | row1 = ind_patch[isub, 2]
555 | row2 = ind_patch[isub, 3]
556 | data = change[:, row1:row2 + 1, col1:col2 + 1]
557 | out_name = tempoutname + str(isub + 1) + suffix
558 | fp = path1
559 | writeimage(data, out_name, fp)
560 |
561 | for isub in range(0, n_nl * n_ns):
562 | # print('final prediction: ', str(isub + 1), 'block')
563 | # open each block image
564 |
565 | FileName = temp_file + '\\temp_F1' + str(isub + 1) + suffix
566 | nl, ns, fine1 = read_raster(FileName)
567 |
568 | FileName = temp_file + '\\temp_C1' + str(isub + 1) + suffix
569 | _, _, coarse1 = read_raster(FileName)
570 |
571 | FileName = temp_file + '\\temp_C0' + str(isub + 1) + suffix
572 | _, _, coarse2 = read_raster(FileName)
573 |
574 | FileName = temp_file + '\\class' + str(isub + 1) + suffix
575 | _, _, L1_class = read_raster(FileName)
576 |
577 | FileName = temp_file + '\\temp_change' + str(isub + 1) + suffix
578 | _, _, change_21 = read_raster(FileName)
579 |
580 | # place the blended result
581 | fine2 = np.zeros([nb, location[isub, 3]-location[isub, 2]+1, location[isub, 1]-location[isub, 0]+1]).astype(float)
582 |
583 | # compute the distance of each pixel in the window with the target pixel (integrate window)
584 | D_temp1 = w - np.tile((idlwrap.indgen(w*2+1)), (int(w*2+1), 1))
585 | d1 = np.power(D_temp1, 2)
586 | D_temp2 = w - np.tile(idlwrap.indgen(1, w*2+1), (1, int(w*2+1)))
587 | d2 = np.power(D_temp2, 2)
588 | D_D_all = np.sqrt(d1 + d2)
589 | D_D_all = D_D_all.flatten()
590 |
591 | similar_th = np.zeros(nb).astype(float)
592 | for iband in range(0, nb):
593 | similar_th[iband] = np.std(fine1[iband, :, :]) * 2.0 / float(num_class)
594 |
595 | for i in tqdm(range(location[isub, 0], location[isub, 1] + 1), desc=f"[{str(isub+1)}/{n_nl * n_ns}]retrieve each target pixel"): # retrieve each target pixel
596 | for j in range(location[isub, 2], location[isub, 3] + 1):
597 | if fine1[background_band - 1, j, i] != background: # do not process the background
598 |
599 | ai = int(np.max([0, i - w]))
600 | bi = int(np.min([ns - 1, i + w]))
601 | aj = int(np.max([0, j - w]))
602 | bj = int(np.min([nl - 1, j + w]))
603 |
604 | ci = i - ai # location of target pixel
605 | cj = j - aj
606 |
607 | col_wind = np.tile(idlwrap.indgen(bi-ai+1), (int(bj-aj+1), 1))
608 | row_wind = np.tile(idlwrap.indgen(1, bj-aj+1), (1, int(bi-ai+1)))
609 |
610 | # search similar pixels within window
611 | similar_cand = np.zeros((bi-ai+1)*(bj-aj+1)).astype(float) # place the similarity measure between each pixel and the target pixel
612 | position_cand = np.zeros((bi-ai+1)*(bj-aj+1)).astype(int) + 1 # place the location of each similar pixel
613 | for ib in range(0, nb):
614 | cand_band = np.zeros((bi-ai+1)*(bj-aj+1)).astype(int)
615 | wind_fine = fine1[ib, aj:bj+1, ai:bi+1]
616 | S_S = np.abs(wind_fine - wind_fine[cj, ci])
617 | similar_cand = similar_cand + (S_S / (wind_fine[cj, ci] + 0.00000001)).flatten()
618 | ind_cand = np.where(S_S.flatten() < similar_th[ib])
619 | cand_band[ind_cand] = 1
620 | position_cand = position_cand * cand_band
621 |
622 | indcand = np.where(position_cand != 0)
623 | number_cand0 = int(int(np.size(indcand)) / len(indcand)) # select similar pixel initially
624 | # place the spatial distance measure between each calidated pixesls and the target pixel
625 | if (bi-ai+1) * (bj-aj+1) < (w*2.0+1) * (w*2.0+1): # not an integrate window
626 | distance_cand = np.sqrt((ci-col_wind)**2 + (cj-row_wind)**2) + 0.00001
627 | else:
628 | distance_cand = D_D_all # integrate window
629 |
630 | # add a small weight from spatial distance to spectral distance to avoid all pixels in the window with same similarity. This happens in perfect simulated images
631 | combine_similar_cand = (similar_cand+0.00001)*(10.0+distance_cand/w).flatten() # spatial distance has very small effect
632 | order_dis = np.argsort(combine_similar_cand[indcand], kind='mergesort')
633 | number_cand = np.min([number_cand0, num_similar_pixel])
634 | ind_same_class = (indcand[0])[order_dis[0:int(number_cand)]] # select the N most similar samples
635 |
636 | # normalize these distances
637 | D_D_cand = (distance_cand.flatten())[ind_same_class]
638 | C_D = (1.0+D_D_cand/w) * (similar_cand[ind_same_class]+1.0)
639 | C_D = 1.0 / C_D
640 | weight = C_D / np.sum(C_D)
641 |
642 | for iband in range(0, nb):
643 | # predict the value
644 | change_21_win = change_21[iband, aj:bj+1, ai:bi+1]
645 | change_cand = (change_21_win.flatten())[ind_same_class]
646 | fine2[iband, j-location[isub, 2], i - location[isub, 0]] = fine1[iband, j, i] + np.sum(weight * change_cand)
647 |
648 | # revise the abnormal prediction
649 | if fine2[iband, j-location[isub, 2], i - location[isub, 0]] < DN_min:
650 | another_predict = np.max([DN_min, fine1[iband, j, i]] + coarse2[iband, j, i] - coarse1[iband, j, i])
651 | fine2[iband, j-location[isub, 2], i - location[isub, 0]] = np.min([DN_max, another_predict])
652 |
653 | if fine2[iband, j-location[isub, 2], i - location[isub, 0]] > DN_max:
654 | another_predict = np.min([DN_max, fine1[iband, j, i]] + coarse2[iband, j, i] - coarse1[iband, j, i])
655 | fine2[iband, j - location[isub, 2], i - location[isub, 0]] = np.max([DN_min, another_predict])
656 |
657 | fine2[fine2 < DN_min] = DN_min
658 | fine2[fine2 > DN_max] = DN_max
659 |
660 | # print('finish final prediction', str(isub+1), 'block')
661 | tempoutname1 = temp_file + '\\temp_blended'
662 | Out_Name = tempoutname1 + str(isub+1) + suffix
663 | fp = path1
664 | writeimage(fine2, Out_Name, fp)
665 |
666 | endtime = datetime.datetime.now()
667 | print('time used:', (endtime - starttime).seconds, 'seconds')
668 |
669 | # # ***************************************************************
670 | # # mosaic all the blended patch
671 | print("mosaic all the blended patch")
672 | datalist = []
673 | minx_list = []
674 | maxX_list = []
675 | minY_list = []
676 | maxY_list = []
677 |
678 | for isub in range(0, n_ns * n_nl):
679 | out_name = temp_file + '\\temp_blended' + str(isub+1) + suffix
680 | datalist.append(out_name)
681 |
682 | col1 = ind_patch1[isub, 0]
683 | col2 = ind_patch1[isub, 1]
684 | row1 = ind_patch1[isub, 2]
685 | row2 = ind_patch1[isub, 3]
686 |
687 | minx_list.append(col1)
688 | maxX_list.append(col2)
689 | minY_list.append(row1)
690 | maxY_list.append(row2)
691 |
692 | minX = min(minx_list)
693 | maxX = max(maxX_list)
694 | minY = min(minY_list)
695 | maxY = max(maxY_list)
696 |
697 | xOffset_list = []
698 | yOffset_list = []
699 | i = 0
700 | for data in datalist:
701 | xOffset = int(minx_list[i] - minX)
702 | yOffset = int(minY_list[i] - minY)
703 | xOffset_list.append(xOffset)
704 | yOffset_list.append(yOffset)
705 | i += 1
706 |
707 | in_ds = gdal.Open(path1)
708 | path = os.path.splitext(path3)[0] + "_FSDAF4" + suffix
709 | if suffix == '.tif':
710 | driver = gdal.GetDriverByName("GTiff")
711 | elif suffix == "":
712 | driver = gdal.GetDriverByName("ENVI")
713 | dataset = driver.Create(path, orig_ns, orig_nl, nb, gdal.GDT_Float32)
714 |
715 | i = 0
716 | for data in datalist:
717 | nl, ns, datavalue = read_raster(data)
718 | for j in range(0, nb):
719 | dd = datavalue[j, :, :]
720 | dataset.GetRasterBand(j + 1).WriteArray(dd, xOffset_list[i], yOffset_list[i])
721 | i += 1
722 |
723 | geoTransform = in_ds.GetGeoTransform()
724 | dataset.SetGeoTransform(geoTransform)
725 | proj = in_ds.GetProjection()
726 | dataset.SetProjection(proj)
727 | print("all done!")
728 |
--------------------------------------------------------------------------------
/FSDAF/FSDAF_Preclassification.py:
--------------------------------------------------------------------------------
1 | from utils import read_raster, writeimage
2 | import math
3 | import numpy as np
4 | from osgeo import gdal
5 | import os
6 | import datetime
7 | from tkinter import filedialog
8 | import tkinter as tk
9 | import yaml
10 | import idlwrap
11 | from scipy.interpolate import Rbf
12 | import statsmodels.api as sm
13 |
14 |
15 | def value_locate(refx, x):
16 | refx = np.array(refx)
17 | x = np.atleast_1d(x)
18 | loc = np.zeros(len(x), dtype='int')
19 | for i in range(len(x)):
20 | ix = x[i]
21 | ind = ((refx - ix) <= 0).nonzero()[0]
22 | if len(ind) == 0:
23 | loc[i] = -1
24 | else:
25 | loc[i] = ind[-1]
26 | return loc
27 |
28 |
29 | # ******************************************************************************************************
30 | # A new spatiotemporal data fusion model
31 | # Using one pairs of fine and coarse images
32 | # The program can be used for whole TM scene
33 | # Note: this version requires users to input pre-classified fine image at t1.
34 | # This version is appropriate for areas with complex land cover types and training samples
35 | # so users can first classify the fine image by supervised classifers like SVM.
36 |
37 | # ******************************************************************************************************
38 | # *******************************Set parameters and read input data*************************************
39 |
40 | root = tk.Tk()
41 | root.withdraw()
42 |
43 | # please set the following parameters
44 | f = open(filedialog.askopenfilename(title=u"Open the parameter settings file:", initialdir=r'F:\Xiaolin lab\python_fsdaf_updated_20210331\py_FSDAF_0331\ToSunyue'))
45 | param = yaml.safe_load(f)
46 | w = param['w'] # set the half window size, if 25, the window size is 25*2+1=51
47 | num_similar_pixel = param['num_similar_pixel'] # set number of similar pixels
48 | num_pure = param['num_pure'] # number of most purest coarse pixels in each class selected fro change calculation
49 | DN_min = param['DN_min'] # set the range of DN value of the image,If byte, 0 and 255
50 | DN_max = param['DN_max']
51 | scale_factor = param['scale_factor'] # set the scale factor, it is integer=coarse resolution/fine resolution, e.g., 480/30=16
52 | block_size = param['block_size'] # set the size of block, e.g., 20 means 20*20 coarse pixels, if process whole ETM scene, set 30~50
53 | background = param['background'] # set the value of background pixels. 0 means that pixels will be considered as background if one of its bands= 0
54 | background_band = param['background_band'] # which band with value = background indicating background pixels. Sometimes, background pixels have different values in different bands
55 |
56 | # set path of a folder to store temporary files
57 | temp_file = filedialog.askdirectory(title=u"Set the temporary folder", initialdir=r'F:\Xiaolin lab\python_fsdaf_updated_20210331\py_FSDAF_0331\ToSunyue')
58 |
59 | # open the fine image of the first pair
60 | path1 = filedialog.askopenfilename(title=u"open the fine image of the first pair:", initialdir=r'F:\Xiaolin lab\python_fsdaf_updated_20210331\py_FSDAF_0331\ToSunyue')
61 | suffix = os.path.splitext(path1)[-1]
62 | nl, ns, FileName1 = read_raster(path1)
63 | orig_ns = ns
64 | orig_nl = nl
65 | fp = gdal.Open(path1)
66 | nb = fp.RasterCount
67 |
68 | patch_long = block_size*scale_factor
69 |
70 | # divide the whole scene into blocks
71 | n_nl = math.ceil(orig_nl / patch_long)
72 | n_ns = math.ceil(orig_ns / patch_long)
73 |
74 | ind_patch1 = np.zeros((n_nl * n_ns, 4), dtype=np.int)
75 | ind_patch = np.zeros((n_nl * n_ns, 4), dtype=np.int)
76 | location = np.zeros((n_nl * n_ns, 4), dtype=np.int)
77 |
78 | for i_ns in range(0, n_ns):
79 | for i_nl in range(0, n_nl):
80 | ind_patch1[n_ns * i_nl + i_ns, 0] = i_ns * patch_long
81 | ind_patch[n_ns * i_nl + i_ns, 0] = np.max([0, ind_patch1[n_ns * i_nl + i_ns, 0] - scale_factor])
82 | location[n_ns * i_nl + i_ns, 0] = ind_patch1[n_ns * i_nl + i_ns, 0] - ind_patch[n_ns * i_nl + i_ns, 0]
83 |
84 | ind_patch1[n_ns * i_nl + i_ns, 1] = np.min([ns - 1, (i_ns + 1) * patch_long - 1])
85 | ind_patch[n_ns * i_nl + i_ns, 1] = np.min([ns - 1, ind_patch1[n_ns * i_nl + i_ns, 1] + scale_factor])
86 | location[n_ns * i_nl + i_ns, 1] = ind_patch1[n_ns * i_nl + i_ns, 1] - ind_patch1[n_ns * i_nl + i_ns, 0] + location[n_ns * i_nl + i_ns, 0]
87 |
88 | ind_patch1[n_ns * i_nl + i_ns, 2] = i_nl * patch_long
89 | ind_patch[n_ns * i_nl + i_ns, 2] = np.max([0, ind_patch1[n_ns * i_nl + i_ns, 2] - scale_factor])
90 | location[n_ns * i_nl + i_ns, 2] = ind_patch1[n_ns * i_nl + i_ns, 2] - ind_patch[n_ns * i_nl + i_ns, 2]
91 |
92 | ind_patch1[n_ns * i_nl + i_ns, 3] = np.min([nl - 1, (i_nl + 1) * patch_long - 1])
93 | ind_patch[n_ns * i_nl + i_ns, 3] = np.min([nl - 1, ind_patch1[n_ns * i_nl + i_ns, 3] + scale_factor])
94 | location[n_ns * i_nl + i_ns, 3] = ind_patch1[n_ns * i_nl + i_ns, 3] - ind_patch1[n_ns * i_nl + i_ns, 2] + location[n_ns * i_nl + i_ns, 2]
95 |
96 | tempoutname = temp_file + '\\temp_F1'
97 |
98 | for isub in range(0, n_nl * n_ns):
99 | col1 = ind_patch[isub, 0]
100 | col2 = ind_patch[isub, 1]
101 | row1 = ind_patch[isub, 2]
102 | row2 = ind_patch[isub, 3]
103 | data = FileName1[:, row1:row2 + 1, col1:col2 + 1]
104 | out_name = tempoutname + str(isub + 1) + suffix
105 | fp = path1
106 | writeimage(data, out_name, fp)
107 |
108 | # open the coarse image of the first pair
109 | path2 = filedialog.askopenfilename(title=u"open the coarse image of the first pair:", initialdir=r'F:\Xiaolin lab\python_fsdaf_updated_20210331\py_FSDAF_0331\ToSunyue')
110 | _, _, FileName2 = read_raster(path2)
111 |
112 | tempoutname = temp_file + '\\temp_C1'
113 | for isub in range(0, n_nl * n_ns):
114 | col1 = ind_patch[isub, 0]
115 | col2 = ind_patch[isub, 1]
116 | row1 = ind_patch[isub, 2]
117 | row2 = ind_patch[isub, 3]
118 | data = FileName2[:, row1:row2 + 1, col1:col2 + 1]
119 | out_name = tempoutname + str(isub + 1) + suffix
120 | fp = path1
121 | writeimage(data, out_name, fp)
122 |
123 | # open the coarse image of the prediction time
124 | path3 = filedialog.askopenfilename(title=u"open the coarse image of the prediction time:", initialdir=r'F:\Xiaolin lab\python_fsdaf_updated_20210331\py_FSDAF_0331\ToSunyue')
125 | _, _, FileName3 = read_raster(path3)
126 |
127 | tempoutname = temp_file + '\\temp_C0'
128 | for isub in range(0, n_nl * n_ns):
129 | col1 = ind_patch[isub, 0]
130 | col2 = ind_patch[isub, 1]
131 | row1 = ind_patch[isub, 2]
132 | row2 = ind_patch[isub, 3]
133 | data = FileName3[:, row1:row2 + 1, col1:col2 + 1]
134 | out_name = tempoutname + str(isub + 1) + suffix
135 | fp = path1
136 | writeimage(data, out_name, fp)
137 |
138 | # open the class image
139 | path4 = filedialog.askopenfilename(title=u"open the class image of fine image in the 1st pair:")
140 | _, _, FileName4 = read_raster(path4)
141 |
142 | tempoutname = temp_file + '\\class'
143 | for isub in range(0, n_nl * n_ns):
144 | col1 = ind_patch[isub, 0]
145 | col2 = ind_patch[isub, 1]
146 | row1 = ind_patch[isub, 2]
147 | row2 = ind_patch[isub, 3]
148 | data = FileName4[:, row1:row2 + 1, col1:col2 + 1]
149 | out_name = tempoutname + str(isub + 1) + suffix
150 | fp = path1
151 | writeimage(data, out_name, fp)
152 |
153 | # *******************************************************
154 | # process each clock
155 | # *******************************************************
156 |
157 | starttime = datetime.datetime.now() # the initial time of program running
158 |
159 | print('there are total', n_nl*n_ns, 'blocks')
160 |
161 | for isub in range(0, n_nl * n_ns):
162 |
163 | # open each block image
164 |
165 | FileName = temp_file + '\\temp_F1' + str(isub + 1) + suffix
166 | nl, ns, fine1 = read_raster(FileName)
167 |
168 | FileName = temp_file + '\\temp_C1' + str(isub + 1) + suffix
169 | _, _, coarse1 = read_raster(FileName)
170 |
171 | FileName = temp_file + '\\temp_C0' + str(isub + 1) + suffix
172 | _, _, coarse2 = read_raster(FileName)
173 |
174 | FileName = temp_file + '\\class' + str(isub + 1) + suffix
175 | _, _, L1_class0 = read_raster(FileName)
176 |
177 | num_class = int(np.max(L1_class0))
178 | # recode the classification map if the subset does not have all classes
179 | i_new_c = 0
180 | L1_class = np.zeros((nl, ns)).astype(int)
181 | for iclass in range(0, num_class):
182 |
183 | ind_ic = np.logical_and(L1_class0[0] == iclass + 1, fine1[background_band - 1, :, :] != background)
184 | num_ic = np.sum(ind_ic)
185 |
186 | if num_ic > 0:
187 | L1_class[ind_ic] = i_new_c + 1
188 | i_new_c = i_new_c + 1
189 |
190 | num_class = np.max(L1_class)
191 |
192 | if num_class > 0: # do not process if the whole subset is background
193 |
194 | # correct extreme noise in fine1 because extreme values will affect the allowed data range
195 | for ib in range(0, nb):
196 | fine1_band = fine1[ib, :, :]
197 | fine1_band_1 = fine1_band.flatten()
198 | sortIndex = np.argsort(fine1_band_1, kind='mergesort')
199 | sortIndices = (idlwrap.findgen(float(ns) * nl + 1)) / (float(ns) * nl)
200 | Percentiles = [0.0001, 0.9999]
201 | dataIndices = value_locate(sortIndices, Percentiles)
202 | data_1_4 = fine1_band_1[sortIndex[dataIndices]]
203 | # correct too small values
204 | ind_small = np.logical_or(fine1[ib, :, :] <= data_1_4[0], fine1[ib, :, :] < DN_min)
205 | temp = fine1[ib, :, :]
206 | temp[ind_small] = np.min((fine1[ib, :, :])[np.logical_and(fine1[ib, :, :] > data_1_4[0], fine1[ib, :, :] >= DN_min)])
207 | fine1[ib, :, :] = temp
208 | # correct too large values
209 | ind_large = np.logical_or(fine1[ib, :, :] >= data_1_4[1], fine1[ib, :, :] > DN_max)
210 | temp = fine1[ib, :, :]
211 | temp[ind_large] = np.max((fine1[ib, :, :])[np.logical_and(fine1[ib, :, :] < data_1_4[1], fine1[ib, :, :] <= DN_max)])
212 | fine1[ib, :, :] = temp
213 |
214 | # get index image between coarse and fine resolutions
215 | ii = 0
216 | ns_c = int(np.floor(ns / scale_factor))
217 | nl_c = int(np.floor(nl / scale_factor))
218 | index_f = np.zeros((nl, ns)).astype(int)
219 | index_c = np.zeros((nl_c, ns_c)).astype(int)
220 | for i in range(0, ns_c):
221 | for j in range(0, nl_c):
222 | index_f[j * scale_factor:(j + 1) * scale_factor, i * scale_factor:(i + 1) * scale_factor] = ii
223 | index_c[j, i] = ii
224 | ii = ii + 1.0
225 |
226 | # col and row index
227 | row_ind = np.zeros((nl, ns)).astype(int)
228 | col_ind = np.zeros((nl, ns)).astype(int)
229 | for i in range(0, ns):
230 | col_ind[:, i] = i
231 |
232 | for i in range(0, nl):
233 | row_ind[i, :] = i
234 |
235 | # resample coarse image to coarse resolution
236 | fine_c1 = np.zeros((nb, nl_c, ns_c)).astype(float)
237 | coarse_c1 = np.zeros((nb, nl_c, ns_c)).astype(float)
238 | coarse_c2 = np.zeros((nb, nl_c, ns_c)).astype(float)
239 | row_c = np.zeros((nl_c, ns_c)).astype(float)
240 | col_c = np.zeros((nl_c, ns_c)).astype(float)
241 | for ic in range(0, ns_c):
242 | for jc in range(0, nl_c):
243 | ind_c = np.where(index_f == index_c[jc, ic])
244 | row_c[jc, ic] = np.mean(row_ind[ind_c])
245 | col_c[jc, ic] = np.mean(col_ind[ind_c])
246 | for ib in range(0, nb):
247 | fine_c1[ib, jc, ic] = np.mean((fine1[ib, :, :])[ind_c])
248 | coarse_c1[ib, jc, ic] = np.mean((coarse1[ib, :, :])[ind_c])
249 | coarse_c2[ib, jc, ic] = np.mean((coarse2[ib, :, :])[ind_c])
250 |
251 | # step 2: get fracture of each class within each coarse pixel at t1
252 | Fraction1 = np.zeros((num_class, nl_c, ns_c)).astype(float)
253 | for ic in range(0, ns_c):
254 | for jc in range(0, nl_c):
255 | ind_c = np.where(index_f == index_c[jc, ic])
256 | num_c = int(int(np.size(ind_c)) / len(ind_c))
257 | L1_class_c = L1_class[ind_c]
258 | for iclass in range(0, num_class):
259 | ind_ic = np.where(L1_class_c == iclass+1)
260 | num_ic = int(int(np.size(ind_ic)) / len(ind_ic))
261 | Fraction1[iclass, jc, ic] = num_ic / num_c
262 |
263 | if np.sum(Fraction1[:, jc, ic]) <= 0.999: # avoild pixels have background fine pixels
264 | Fraction1[:, jc, ic] = 0
265 |
266 | # get the heterogenity of each fine pixel
267 | het_index = np.zeros((nl, ns)).astype(float)
268 | scale_d = w
269 |
270 | for i in range(0, ns):
271 | for j in range(0, nl):
272 | # the window location
273 | ai = int(np.max([0, i - scale_d]))
274 | bi = int(np.min([ns - 1, i + scale_d]))
275 | aj = int(np.max([0, j - scale_d]))
276 | bj = int(np.min([nl - 1, j + scale_d]))
277 | class_t = L1_class[j, i]
278 | # select same-class pixels
279 | ind_same_class = np.where(L1_class[aj:bj+1, ai:bi+1] == class_t)
280 | num_sameclass = int(int(np.size(ind_same_class)) / len(ind_same_class))
281 | het_index[j, i] = float(num_sameclass) / ((bi-ai+1.0) * (bj-aj+1.0))
282 |
283 | # step 3: METHOD2:estimate average spectral change of each class using pixels without land cover change
284 | c_rate = np.zeros((nb, num_class)).astype(float)
285 |
286 | # allowed change value for each band
287 | min_allow = np.zeros(nb).astype(float)
288 | max_allow = np.zeros(nb).astype(float)
289 | for ib in range(0, nb):
290 | min_allow[ib] = np.min(coarse_c2[ib, :, :] - coarse_c1[ib, :, :]) - np.std(coarse_c2[ib, :, :] - coarse_c1[ib, :, :])
291 | max_allow[ib] = np.max(coarse_c2[ib, :, :] - coarse_c1[ib, :, :]) + np.std(coarse_c2[ib, :, :] - coarse_c1[ib, :, :])
292 |
293 | for ib in range(0, nb):
294 | x_matrix = np.zeros((num_pure * num_class, num_class)).astype(float)
295 | y_matrix = np.zeros((num_pure * num_class, 1)).astype(float)
296 | ii = 0
297 | for ic in range(0, num_class):
298 | order_s = np.argsort((Fraction1[ic, :, :]).flatten(), kind='mergesort')
299 | order = order_s[::-1]
300 | ind_f = np.where(Fraction1[ic, :, :] > 0.01) # make sure all selected modis pixel contain class i
301 | num_f = int(int(np.size(ind_f)) / len(ind_f))
302 | num_pure1 = np.min([num_f, num_pure])
303 | change_c = (coarse_c2[ib, :, :].flatten())[order[0:num_pure1]] - (coarse_c1[ib, :, :].flatten())[order[0:num_pure1]]
304 |
305 | # only use 0.1-0.9 samples to exclude the land cover change pixels
306 | sortIndex = np.argsort(change_c, kind='mergesort')
307 | sortIndices = (idlwrap.findgen(float(num_pure1+1))) / num_pure1
308 | Percentiles = [0.1, 0.9]
309 | dataIndices = value_locate(sortIndices, Percentiles)
310 | data_1_4 = change_c[sortIndex[dataIndices]]
311 | ind_nonchange = np.logical_and(change_c >= data_1_4[0], change_c <= data_1_4[1])
312 | num_nonc = np.sum(ind_nonchange)
313 | if num_nonc > 0:
314 | y_matrix[ii:ii+num_nonc, 0] = change_c[ind_nonchange]
315 | for icc in range(0, num_class):
316 | f_c = (Fraction1[icc, :, :].flatten())[order[0:num_pure1]]
317 | x_matrix[ii:ii+num_nonc, icc] = f_c[ind_nonchange]
318 | ii = ii + num_nonc
319 | x_matrix = x_matrix[0:ii, :]
320 | y_matrix = y_matrix[0:ii, 0]
321 |
322 | model = sm.OLS(y_matrix, x_matrix).fit()
323 | opt = model.params
324 | c_rate[ib, :] = opt
325 |
326 | # step4: predict L2 assuming no land cover change
327 | L2_1 = fine1.copy()
328 | for ic in range(1, num_class+1):
329 | ind_L1_class = np.where(L1_class == ic)
330 | for ib in range(0, nb):
331 | temp = L2_1[ib, :, :]
332 | temp[ind_L1_class] = (fine1[ib, :, :])[ind_L1_class] + c_rate[ib, ic-1]
333 |
334 | # resample L2_1 image to coarse resolution
335 | coarse_c2_p = np.zeros((nb, nl_c, ns_c)).astype(float)
336 | for ic in range(0, ns_c):
337 | for jc in range(0, nl_c):
338 | ind_c = np.where(index_f == index_c[jc, ic])
339 | for ib in range(0, nb):
340 | coarse_c2_p[ib, jc, ic] = np.mean((L2_1[ib, :, :])[ind_c])
341 |
342 | # allowed minmum value for each band at t2
343 | min_allow = np.zeros(nb).astype(float)
344 | max_allow = np.zeros(nb).astype(float)
345 | for ib in range(0, nb):
346 | min_allow0 = np.min([np.min(coarse2[ib, :, :]), np.min(L2_1[ib, :, :])])
347 | min_allow[ib] = np.max([min_allow0, DN_min])
348 | max_allow0 = np.max([np.max(coarse2[ib, :, :]), np.max(L2_1[ib, :, :])])
349 | max_allow[ib] = np.min([max_allow0, DN_max])
350 |
351 | # step5: predict L2 using TPS
352 | L2_tps = np.zeros((nb, nl, ns)).astype(float)
353 | for ib in range(0, nb):
354 | rbf = Rbf(row_c.ravel(), col_c.ravel(), (coarse_c2[ib, :, :]).ravel(), function='multiquadric')
355 | tps = rbf(row_ind.ravel(), col_ind.ravel()).reshape([nl, ns])
356 | L2_tps[ib, :, :] = tps
357 |
358 | print('finish TPS prediction')
359 |
360 | # step 6: redistribute residual
361 | # change residual
362 | predict_change_c = coarse_c2_p - fine_c1 # predict change
363 | real_change_c = coarse_c2 - coarse_c1 # real change
364 | change_R = real_change_c - predict_change_c
365 |
366 | # redistribution residual
367 | change_21_c = np.zeros((nb, nl_c, ns_c)).astype(float)
368 | change_21 = np.zeros((nb, nl, ns)).astype(float)
369 |
370 | for ic in range(0, ns_c):
371 | for jc in range(0, nl_c):
372 |
373 | ind_c = np.where(index_f == index_c[jc, ic])
374 | num_ii = int(int(np.size(ind_c)) / len(ind_c))
375 |
376 | for ib in range(0, nb):
377 | diff_change = change_R[ib, jc, ic]
378 | w_change_tps = (L2_tps[ib, :, :])[ind_c] - (L2_1[ib, :, :])[ind_c]
379 | if diff_change <= 0:
380 | ind_noc = np.where(w_change_tps > 0)
381 | num_noc = int(int(np.size(ind_noc)) / len(ind_noc))
382 | if num_noc > 0:
383 | w_change_tps[ind_noc] = 0
384 | else:
385 | ind_noc = np.where(w_change_tps < 0)
386 | num_noc = int(int(np.size(ind_noc)) / len(ind_noc))
387 | if num_noc > 0:
388 | w_change_tps[ind_noc] = 0
389 |
390 | w_change_tps = np.abs(w_change_tps)
391 | w_unform = np.zeros(num_ii).astype(float) # evenly distributing residuals to sub-pixels
392 | w_unform[:] = np.abs(diff_change)
393 |
394 | w_change = w_change_tps * het_index[ind_c] + w_unform*(1.0-het_index[ind_c]) + 0.000001 # combine these two weights
395 | w_change = w_change / (np.mean(w_change)) # nomalize weight
396 |
397 | # avoid extreme weights
398 | ind_extrem = np.where(w_change > 10)
399 | num_extrem = int(int(np.size(ind_extrem)) / len(ind_extrem))
400 | if num_extrem > 0:
401 | w_change[ind_extrem] = np.mean(w_change)
402 | w_change = w_change / (np.mean(w_change))
403 |
404 | # distribute residuals according to WEIGHT
405 | temp = change_21[ib, :, :]
406 | temp[ind_c] = w_change * diff_change
407 | change_21[ib, :, :] = temp
408 |
409 | # second prediction: L1+change
410 | fine2_2 = L2_1 + change_21
411 | # correct abnormal detected change
412 | for ib in range(0, nb):
413 | temp = fine2_2[ib, :, :]
414 | ind_min = np.where(temp < min_allow[ib])
415 | num_min = int(int(np.size(ind_min)) / len(ind_min))
416 | if num_min > 0:
417 | temp[ind_min] = min_allow[ib]
418 | ind_max = np.where(temp > max_allow[ib])
419 | num_max = int(int(np.size(ind_max)) / len(ind_max))
420 | if num_max > 0:
421 | temp[ind_max] = max_allow[ib]
422 | fine2_2[ib, :, :] = temp
423 |
424 | change_21 = fine2_2 - fine1
425 |
426 | else:
427 | change_21 = fine1 - fine1
428 |
429 | change_21 = change_21[:, location[isub, 2]:location[isub, 3] + 1, location[isub, 0]:location[isub, 1] + 1]
430 |
431 | print('finish change prediction step ', isub+1, 'block')
432 | tempoutname1 = temp_file + '\\temp_change'
433 | Out_Name = tempoutname1 + str(isub + 1) + suffix
434 | fp = path1
435 | writeimage(change_21, Out_Name, fp)
436 |
437 | # **************************mosaic all the change patch********************************
438 | datalist = []
439 | minx_list = []
440 | maxX_list = []
441 | minY_list = []
442 | maxY_list = []
443 |
444 | for isub in range(0, n_ns * n_nl):
445 | out_name = temp_file + '\\temp_change' + str(isub + 1) + suffix
446 | datalist.append(out_name)
447 |
448 | col1 = ind_patch1[isub, 0]
449 | col2 = ind_patch1[isub, 1]
450 | row1 = ind_patch1[isub, 2]
451 | row2 = ind_patch1[isub, 3]
452 |
453 | minx_list.append(col1)
454 | maxX_list.append(col2)
455 | minY_list.append(row1)
456 | maxY_list.append(row2)
457 |
458 | minX = min(minx_list)
459 | maxX = max(maxX_list)
460 | minY = min(minY_list)
461 | maxY = max(maxY_list)
462 |
463 | xOffset_list = []
464 | yOffset_list = []
465 | i = 0
466 | for data in datalist:
467 | xOffset = int(minx_list[i] - minX)
468 | yOffset = int(minY_list[i] - minY)
469 | xOffset_list.append(xOffset)
470 | yOffset_list.append(yOffset)
471 | i += 1
472 |
473 | in_ds = gdal.Open(path1)
474 | path = temp_file + "\\temp_change" + suffix
475 | if suffix == '.tif':
476 | driver = gdal.GetDriverByName("GTiff")
477 | elif suffix == "":
478 | driver = gdal.GetDriverByName("ENVI")
479 | dataset = driver.Create(path, orig_ns, orig_nl, nb, gdal.GDT_Float32)
480 |
481 | i = 0
482 | for data in datalist:
483 | nl, ns, datavalue = read_raster(data)
484 | for j in range(0, nb):
485 | dataset.GetRasterBand(j + 1).WriteArray(datavalue[j], xOffset_list[i], yOffset_list[i])
486 | i += 1
487 |
488 | geoTransform = in_ds.GetGeoTransform()
489 | dataset.SetGeoTransform(geoTransform)
490 | proj = in_ds.GetProjection()
491 | dataset.SetProjection(proj)
492 |
493 | del dataset
494 |
495 |
496 | # *******************************step 5: final prediction*********************************
497 |
498 | FileName6 = temp_file + "\\temp_change" + suffix
499 | _, _, change = read_raster(FileName6)
500 |
501 | tempoutname = temp_file + '\\temp_change'
502 | for isub in range(0, n_nl * n_ns):
503 | col1 = ind_patch[isub, 0]
504 | col2 = ind_patch[isub, 1]
505 | row1 = ind_patch[isub, 2]
506 | row2 = ind_patch[isub, 3]
507 | data = change[:, row1:row2 + 1, col1:col2 + 1]
508 | out_name = tempoutname + str(isub + 1) + suffix
509 | fp = path1
510 | writeimage(data, out_name, fp)
511 |
512 | for isub in range(0, n_nl * n_ns):
513 |
514 | # open each block image
515 |
516 | FileName = temp_file + '\\temp_F1' + str(isub + 1) + suffix
517 | nl, ns, fine1 = read_raster(FileName)
518 |
519 | FileName = temp_file + '\\temp_C1' + str(isub + 1) + suffix
520 | _, _, coarse1 = read_raster(FileName)
521 |
522 | FileName = temp_file + '\\temp_C0' + str(isub + 1) + suffix
523 | _, _, coarse2 = read_raster(FileName)
524 |
525 | FileName = temp_file + '\\class' + str(isub + 1) + suffix
526 | _, _, L1_class = read_raster(FileName)
527 |
528 | FileName = temp_file + '\\temp_change' + str(isub + 1) + suffix
529 | _, _, change_21 = read_raster(FileName)
530 |
531 | # place the blended result
532 | fine2 = np.zeros([nb, location[isub, 3]-location[isub, 2]+1, location[isub, 1]-location[isub, 0]+1]).astype(float)
533 |
534 | # compute the distance of each pixel in the window with the target pixel (integrate window)
535 | D_temp1 = w - np.tile((idlwrap.indgen(w*2+1)), (int(w*2+1), 1))
536 | d1 = np.power(D_temp1, 2)
537 | D_temp2 = w - np.tile(idlwrap.indgen(1, w*2+1), (1, int(w*2+1)))
538 | d2 = np.power(D_temp2, 2)
539 | D_D_all = np.sqrt(d1 + d2)
540 | D_D_all = D_D_all.flatten()
541 |
542 | similar_th = np.zeros(nb).astype(float)
543 | for iband in range(0, nb):
544 | similar_th[iband] = np.std(fine1[iband, :, :]) * 2.0 / float(num_class)
545 |
546 | for i in range(location[isub, 0], location[isub, 1] + 1): # retrieve each target pixel
547 | for j in range(location[isub, 2], location[isub, 3] + 1):
548 | if fine1[background_band - 1, j, i] != background: # do not process the background
549 |
550 | ai = int(np.max([0, i - w]))
551 | bi = int(np.min([ns - 1, i + w]))
552 | aj = int(np.max([0, j - w]))
553 | bj = int(np.min([nl - 1, j + w]))
554 |
555 | ci = i - ai # location of target pixel
556 | cj = j - aj
557 |
558 | col_wind = np.tile(idlwrap.indgen(bi-ai+1), (int(bj-aj+1), 1))
559 | row_wind = np.tile(idlwrap.indgen(1, bj-aj+1), (1, int(bi-ai+1)))
560 |
561 | # search similar pixels within window
562 | similar_cand = np.zeros((bi-ai+1)*(bj-aj+1)).astype(float) # place the similarity measure between each pixel and the target pixel
563 | position_cand = np.zeros((bi-ai+1)*(bj-aj+1)).astype(int) + 1 # place the location of each similar pixel
564 | for ib in range(0, nb):
565 | cand_band = np.zeros((bi-ai+1)*(bj-aj+1)).astype(int)
566 | wind_fine = fine1[ib, aj:bj+1, ai:bi+1]
567 | S_S = np.abs(wind_fine - wind_fine[cj, ci])
568 | similar_cand = similar_cand + (S_S / (wind_fine[cj, ci] + 0.00000001)).flatten()
569 | ind_cand = np.where(S_S.flatten() < similar_th[ib])
570 | cand_band[ind_cand] = 1
571 | position_cand = position_cand * cand_band
572 |
573 | indcand = np.where(position_cand != 0)
574 | number_cand0 = int(int(np.size(indcand)) / len(indcand)) # select similar pixel initially
575 | # place the spatial distance measure between each calidated pixesls and the target pixel
576 | if (bi-ai+1) * (bj-aj+1) < (w*2.0+1) * (w*2.0+1): # not an integrate window
577 | distance_cand = np.sqrt((ci-col_wind)**2 + (cj-row_wind)**2) + 0.00001
578 | else:
579 | distance_cand = D_D_all # integrate window
580 |
581 | # add a small weight from spatial distance to spectral distance to avoid all pixels in the window with same similarity. This happens in perfect simulated images
582 | combine_similar_cand = (similar_cand+0.00001)*(10.0+distance_cand/w).flatten() # spatial distance has very small effect
583 | order_dis = np.argsort(combine_similar_cand[indcand], kind='mergesort')
584 | number_cand = np.min([number_cand0, num_similar_pixel])
585 | ind_same_class = (indcand[0])[order_dis[0:int(number_cand)]] # select the N most similar samples
586 |
587 | # normalize these distances
588 | D_D_cand = (distance_cand.flatten())[ind_same_class]
589 | C_D = (1.0+D_D_cand/w) * (similar_cand[ind_same_class]+1.0)
590 | C_D = 1.0 / C_D
591 | weight = C_D / np.sum(C_D)
592 |
593 | for iband in range(0, nb):
594 | # predict the value
595 | change_21_win = change_21[iband, aj:bj+1, ai:bi+1]
596 | change_cand = (change_21_win.flatten())[ind_same_class]
597 | fine2[iband, j-location[isub, 2], i - location[isub, 0]] = fine1[iband, j, i] + np.sum(weight * change_cand)
598 |
599 | # revise the abnormal prediction
600 | if fine2[iband, j-location[isub, 2], i - location[isub, 0]] < DN_min:
601 | another_predict = np.max([DN_min, fine1[iband, j, i]] + coarse2[iband, j, i] - coarse1[iband, j, i])
602 | fine2[iband, j-location[isub, 2], i - location[isub, 0]] = np.min([DN_max, another_predict])
603 |
604 | if fine2[iband, j-location[isub, 2], i - location[isub, 0]] > DN_max:
605 | another_predict = np.min([DN_max, fine1[iband, j, i]] + coarse2[iband, j, i] - coarse1[iband, j, i])
606 | fine2[iband, j - location[isub, 2], i - location[isub, 0]] = np.max([DN_min, another_predict])
607 |
608 | print('finish final prediction', str(isub+1), 'block')
609 | tempoutname1 = temp_file + '\\temp_blended'
610 | Out_Name = tempoutname1 + str(isub+1) + suffix
611 | fp = path1
612 | writeimage(fine2, Out_Name, fp)
613 |
614 | endtime = datetime.datetime.now()
615 | print('time used:', (endtime - starttime).seconds, 'seconds')
616 |
617 | # # ***************************************************************
618 | # # mosaic all the blended patch
619 |
620 | datalist = []
621 | minx_list = []
622 | maxX_list = []
623 | minY_list = []
624 | maxY_list = []
625 |
626 | for isub in range(0, n_ns * n_nl):
627 | out_name = temp_file + '\\temp_blended' + str(isub+1) + suffix
628 | datalist.append(out_name)
629 |
630 | col1 = ind_patch1[isub, 0]
631 | col2 = ind_patch1[isub, 1]
632 | row1 = ind_patch1[isub, 2]
633 | row2 = ind_patch1[isub, 3]
634 |
635 | minx_list.append(col1)
636 | maxX_list.append(col2)
637 | minY_list.append(row1)
638 | maxY_list.append(row2)
639 |
640 | minX = min(minx_list)
641 | maxX = max(maxX_list)
642 | minY = min(minY_list)
643 | maxY = max(maxY_list)
644 |
645 | xOffset_list = []
646 | yOffset_list = []
647 | i = 0
648 | for data in datalist:
649 | xOffset = int(minx_list[i] - minX)
650 | yOffset = int(minY_list[i] - minY)
651 | xOffset_list.append(xOffset)
652 | yOffset_list.append(yOffset)
653 | i += 1
654 |
655 | in_ds = gdal.Open(path1)
656 | path = os.path.splitext(path3)[0] + "_FSDAF_Preclassification" + suffix
657 | if suffix == '.tif':
658 | driver = gdal.GetDriverByName("GTiff")
659 | elif suffix == "":
660 | driver = gdal.GetDriverByName("ENVI")
661 | dataset = driver.Create(path, orig_ns, orig_nl, nb, gdal.GDT_Float32)
662 |
663 | i = 0
664 | for data in datalist:
665 | nl, ns, datavalue = read_raster(data)
666 | for j in range(0, nb):
667 | dd = datavalue[j, :, :]
668 | dataset.GetRasterBand(j + 1).WriteArray(dd, xOffset_list[i], yOffset_list[i])
669 | i += 1
670 |
671 | geoTransform = in_ds.GetGeoTransform()
672 | dataset.SetGeoTransform(geoTransform)
673 | proj = in_ds.GetProjection()
674 | dataset.SetProjection(proj)
675 |
--------------------------------------------------------------------------------
/FSDAF/isodata.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import numpy as np
4 | from tqdm import tqdm
5 |
6 |
7 | def initialize_parameters(parameters=None):
8 |
9 | parameters = {} if not parameters else parameters
10 |
11 | def safe_pull_value(parameters, key, default):
12 | return parameters.get(key, default)
13 |
14 | # number of clusters desired
15 | K = safe_pull_value(parameters, 'k', 6)
16 |
17 | # maximum number of iterations
18 | I = safe_pull_value(parameters, 'I', 100)
19 |
20 | # maximum of number of pairs of clusters which can be merged
21 | P = safe_pull_value(parameters, 'P', 2)
22 |
23 | # threshold value for minimum number of samples in each cluster(discarding clusters)
24 | minS = safe_pull_value(parameters, 'THETA_M', 10)
25 |
26 | # threshold value for standard deviation (for split)
27 | maxStdv = safe_pull_value(parameters, 'maxStdv', 0.1)
28 | # threshold value for pairwise distances (for merge)
29 | minDis = safe_pull_value(parameters, 'minDis', 2)
30 |
31 | # percentage of change in clusters between each iteration
32 | # (to stop algorithm)
33 | M = 0.05
34 |
35 | # number of starting clusters
36 | k = safe_pull_value(parameters, 'k', K)
37 |
38 | ret = locals()
39 | ret.pop('safe_pull_value')
40 | ret.pop('parameters')
41 | globals().update(ret)
42 |
43 |
44 | def myISODATA(img, parameters=None):
45 | """
46 | Classify a numpy 'img' using Isodata algorithm.
47 | Parameters: a dictionary with the following keys.
48 | - img: an input numpy array that contains the image to classify.
49 | - parameters: a dictionary with the initial values.
50 | If 'parameters' are not specified, the algorithm uses the default
51 | ones.
52 | + number of clusters desired.
53 | k = 6
54 | + max number of iterations.
55 | I = 100
56 | + max number of pairs of clusters which can be merged.
57 | P = 2
58 | + threshold value for min number in each cluster.
59 | minS = 10
60 | + threshold value for standard deviation (for split).
61 | maxStdv= 0.1
62 | + threshold value for pairwise distances (for merge).
63 | minDis = 2
64 | + threshold change in the clusters between each iter.
65 | M = 0.05
66 | Note: if some(or all) parameters are not provided, default values
67 | will be used.
68 | Returns:
69 | - img_class: a numpy array with the classification.
70 | """
71 | global K, I, P, maxStdv, minDis, minS, M, k
72 | initialize_parameters(parameters)
73 | shape = img.shape
74 | img = img.reshape(-1, shape[2])
75 | m = np.zeros((shape[0] * shape[1], 1))
76 | pad = np.zeros(img.shape)
77 | c = np.zeros((k, shape[2]))
78 |
79 | for i in range(k):
80 | c[i] = img[i, :]
81 | for iter in tqdm(range(I), desc="Classify a numpy 'img' using Isodata algorithm"):
82 |
83 | distance = np.zeros((shape[0] * shape[1], k))
84 |
85 | c_sum = np.zeros((k, shape[2]))
86 | c_num = np.zeros((k, 1))
87 | cnt = 0
88 | for i in range(k):
89 | pad = np.tile(c[i], (shape[0] * shape[1], 1))
90 | pad = img - pad
91 | distance[:, i] = np.linalg.norm(pad, axis=1)
92 |
93 | m = np.argmin(distance, axis=1)
94 | for i in range(k):
95 | t_result = m - i
96 | t_value = img[np.argwhere(t_result == 0)]
97 | c_num[i] = t_value.shape[0]
98 | c_sum[i] = t_value.sum(axis=0)
99 |
100 | if (c_num < minS).any():
101 | deleteDic = np.zeros(k)
102 | t = k
103 | for i in range(t):
104 | if c_num[i] < minS:
105 | k = k - 1
106 | deleteDic[i] = 1
107 | t_distance = np.zeros((shape[0] * shape[1], k))
108 | t_c = np.zeros((k, shape[2]))
109 | t_i = 0
110 | for i in range(t):
111 | if deleteDic[i] == 0:
112 | t_distance[:, t_i] = distance[:, i]
113 | t_c[t_i, :] = c[i]
114 | t_i = t_i + 1
115 | distance = t_distance
116 | c = t_c
117 | c_sum = np.zeros((k, shape[2]))
118 | c_num = np.zeros((k, 1))
119 | cnt = 0
120 | m = np.argmin(distance, axis=1)
121 | for i in range(k):
122 | t_result = m - i
123 | t_value = img[np.argwhere(t_result == 0)]
124 | c_num[i] = t_value.shape[0]
125 | c_sum[i] = t_value.sum(axis=0)
126 |
127 | if ((iter % 2) == 0) or k < (K / 2):
128 | b_split = False
129 | t_maxStd = -1
130 | for i in range(k):
131 | t_result = m - i
132 | t_value = img[np.argwhere(t_result == 0)]
133 | std = np.std(t_value, axis=0)
134 | if (std > maxStdv).any():
135 | t_n_feature = np.argmax(std)
136 | if std[0, t_n_feature] > t_maxStd:
137 | t_maxStd = std[0, t_n_feature]
138 | n_feature = t_n_feature.copy()
139 | n_class = i
140 | b_split = True
141 |
142 | if b_split:
143 |
144 | split_t_result = m - n_class
145 | split_t_value = img[np.argwhere(split_t_result == 0)]
146 | std = np.std(split_t_value, axis=0)
147 |
148 | k = k + 1
149 |
150 | t_row1 = c[n_class, :]
151 | t_row2 = t_row1.copy()
152 | t_row1[n_feature] = t_row1[n_feature] - M * std[0, n_feature]
153 | t_row2[n_feature] = t_row2[n_feature] + M * std[0, n_feature]
154 |
155 | c[n_class, :] = t_row1.T
156 |
157 | c = np.r_['0,2,1', c, t_row2.T]
158 | distance = np.zeros((shape[0] * shape[1], k))
159 | c_sum = np.zeros((k, shape[2]))
160 | c_num = np.zeros((k, 1))
161 | cnt = 0
162 | for i in range(k):
163 | pad = np.tile(c[i], (shape[0] * shape[1], 1))
164 | pad = img - pad
165 | distance[:, i] = np.linalg.norm(pad, axis=1)
166 |
167 | m = np.argmin(distance, axis=1)
168 | for i in range(k):
169 | t_result = m - i
170 | t_value = img[np.argwhere(t_result == 0)]
171 | c_num[i] = t_value.shape[0]
172 | c_sum[i] = t_value.sum(axis=0)
173 | if ((iter % 2) == 1) or k > (K * 2):
174 | b_merge = False
175 | EucildDistence = np.zeros((k, k))
176 | for classi in range(k):
177 | t_class = np.tile(c[classi, :], (k, 1))
178 | t_minus = c - t_class
179 | EucildDistence[:, classi] = np.linalg.norm(t_minus, axis=1)
180 | t_height = k * (k - 1) / 2
181 | t_height = np.uint32(t_height)
182 | distStruct = np.zeros((t_height, 5))
183 | cursor = 0
184 | for classi in range(1, k):
185 | for classj in range(0, classi):
186 | distStruct[cursor, :] = [EucildDistence[classi, classj], classi, classj, 0, 0]
187 | cursor = cursor + 1
188 | distStruct = distStruct[np.lexsort(distStruct[:, ::-1].T)]
189 | for i in range(t_height):
190 | if distStruct[i, 4] == 0 and distStruct[i, 0] < minDis:
191 | b_merge = True
192 | distStruct[i, 3] = 1
193 | for j in range(t_height):
194 | if distStruct[j, 1] == distStruct[i, 1] or distStruct[j, 2] == distStruct[i, 1] or distStruct[j, 1] == distStruct[i, 2] or distStruct[j, 2] == distStruct[i, 2]:
195 | distStruct[j, 4] = 1
196 |
197 | t_c = c.copy()
198 | marker = False
199 | for i in range(t_height):
200 | if distStruct[i, 3] == 1:
201 | class_a = distStruct[i, 1]
202 | class_b = distStruct[i, 2]
203 | class_a = np.uint32(class_a)
204 | class_b = np.uint32(class_b)
205 | k = k - 1
206 | #
207 | t_c[class_a, :] = (t_c[class_a, :] + t_c[class_b, :]) / 2
208 | t_c[class_b, :] = np.zeros((1, shape[2]))
209 | marker = True
210 | if marker:
211 | c = t_c[np.nonzero(t_c)]
212 | c = c.reshape(k, shape[2])
213 | distance = np.zeros((shape[0] * shape[1], k))
214 | c_sum = np.zeros((k, shape[2]))
215 | c_num = np.zeros((k, 1))
216 | cnt = 0
217 | for i in range(k):
218 | pad = np.tile(c[i], (shape[0] * shape[1], 1))
219 | pad = img - pad
220 | distance[:, i] = np.linalg.norm(pad, axis=1)
221 |
222 | m = np.argmin(distance, axis=1)
223 | for i in range(k):
224 | t_result = m - i
225 | t_value = img[np.argwhere(t_result == 0)]
226 | c_num[i] = t_value.shape[0]
227 | c_sum[i] = t_value.sum(axis=0)
228 |
229 | m = m.flatten()
230 | m = np.uint8(m)
231 |
232 | if (c_num == 0).any():
233 | zero_index = np.argwhere(c_sum == 0)
234 | c_sum[zero_index] = 0.01
235 | c_num[zero_index] = 1
236 | t = c_sum / c_num
237 | if (t == c).all():
238 | m = m.reshape((shape[0], shape[1]))
239 | return m, c
240 | c = t
241 |
242 | m = m.reshape((shape[0], shape[1]))
243 | return m, c
244 |
--------------------------------------------------------------------------------
/FSDAF/parameters_fsdaf.yaml:
--------------------------------------------------------------------------------
1 | w: 20 # set the half window size, if 25, the window size is 25*2+1=51
2 | num_similar_pixel: 20 # set number of similar pixels
3 | min_class: 4.0 # set the estimated minimum and maximum number of classes
4 | max_class: 6.0
5 | num_pure: 100 # number of most purest coarse pixels in each class selected for change calculation
6 | DN_min: 0.0 # set the range of DN value of the image,If byte, 0 and 255
7 | DN_max: 10000.0
8 | scale_factor: 16 # set the scale factor, it is integer=coarse resolution/fine resolution, e.g., 480/30=16
9 | block_size: 30 # set the size of block, e.g., 20 means 20*20 coarse pixels, if process whole ETM scene, set 30~50
10 | background: 0 # set the value of background pixels. 0 means that pixels will be considered as background if one of its bands= 0
11 | background_band: 3 # which band with value = background indicating background pixels. Sometimes, background pixels have different values in different bands
12 | # parameters for isodata classification
13 | I: 20 # max number of iterations
14 | maxStdv: 500 # threshold value for standard deviation (for split)
15 | minDis: 500 # threshold value for pairwise distances (for merge)
16 | minS: 200 # threshold value for min number in each cluster
17 | M: 0.05 # threshold change in the clusters between each iter
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
--------------------------------------------------------------------------------
/FSDAF/utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from osgeo import gdal
3 | import os
4 |
5 |
6 | def read_raster(infile):
7 | gdal.PushErrorHandler('CPLQuietErrorHandler')
8 | gdal.UseExceptions()
9 | fp = gdal.Open(infile)
10 | cols = fp.RasterXSize
11 | rows = fp.RasterYSize
12 | nb = fp.RasterCount
13 | if nb == 1:
14 | band = fp.GetRasterBand(1)
15 | data = band.ReadAsArray()
16 | data = data.reshape(1, rows, cols)
17 | band.GetScale()
18 | band.GetOffset()
19 | band.GetNoDataValue()
20 | else:
21 | data = np.zeros([nb, rows, cols])
22 | for i in range(0, nb):
23 | band = fp.GetRasterBand(i+1)
24 | data[i, :, :] = band.ReadAsArray()
25 | band.GetScale()
26 | band.GetOffset()
27 | band.GetNoDataValue()
28 | return rows, cols, data
29 |
30 |
31 | def read_raster_new(infile):
32 | gdal.PushErrorHandler('CPLQuietErrorHandler')
33 | gdal.UseExceptions()
34 | fp = gdal.Open(infile)
35 | cols = fp.RasterXSize
36 | rows = fp.RasterYSize
37 | nb = fp.RasterCount
38 | data = np.zeros([rows, cols, nb])
39 | for i in range(0, nb):
40 | band = fp.GetRasterBand(i+1)
41 | data[:, :, i] = band.ReadAsArray()
42 | band.GetScale()
43 | band.GetOffset()
44 | band.GetNoDataValue()
45 | return rows, cols, data
46 |
47 |
48 | def writeimage(bands, path, in_ds):
49 | suffix = os.path.splitext(in_ds)[-1]
50 | in_ds = gdal.Open(in_ds)
51 | if bands is None or bands.__len__() == 0:
52 | return
53 | else:
54 | band1 = bands[0]
55 | img_width = band1.shape[1]
56 | img_height = band1.shape[0]
57 | num_bands = bands.__len__()
58 |
59 | if 'int8' in band1.dtype.name:
60 | datatype = gdal.GDT_Byte
61 | elif 'int16' in band1.dtype.name:
62 | datatype = gdal.GDT_UInt16
63 | else:
64 | datatype = gdal.GDT_Float32
65 |
66 | if suffix == '.tif':
67 | driver = gdal.GetDriverByName("GTiff")
68 | elif suffix == "":
69 | driver = gdal.GetDriverByName("ENVI")
70 |
71 | dataset = driver.Create(path, img_width, img_height, num_bands, datatype)
72 | if dataset is not None:
73 | for i in range(bands.__len__()):
74 | dataset.GetRasterBand(i + 1).WriteArray(bands[i])
75 | geoTransform = in_ds.GetGeoTransform()
76 | dataset.SetGeoTransform(geoTransform)
77 | proj = in_ds.GetProjection()
78 | dataset.SetProjection(proj)
79 |
--------------------------------------------------------------------------------
/Fit-FC/Fit_FC_Python.py:
--------------------------------------------------------------------------------
1 | import warnings
2 |
3 | from sklearn.linear_model import LinearRegression
4 | import numpy as np
5 | from skimage.transform import resize
6 | from tqdm import tqdm
7 |
8 | from functions import *
9 | from skimage.transform import downscale_local_mean
10 | from datetime import datetime
11 | warnings.filterwarnings('ignore', category=DeprecationWarning, module='np')
12 |
13 | class Fit_FC:
14 | def __init__(self, F_t1, C_t1, C_t2, RM_win_size=3, scale_factor=16, similar_win_size=17, similar_num=20):
15 | self.F_t1 = F_t1.astype(np.float32)
16 | self.C_t1 = C_t1.astype(np.float32)
17 | self.C_t2 = C_t2.astype(np.float32)
18 | self.RM_win_size = RM_win_size
19 | self.scale_factor = scale_factor
20 | self.similar_win_size = similar_win_size
21 | self.similar_num = similar_num
22 |
23 | def regression_model_fitting(self, band_idx):
24 | C_t1_band = self.C_t1[:, :, band_idx]
25 | C_t2_band = self.C_t2[:, :, band_idx]
26 |
27 | C_t1_band_pad = np.pad(C_t1_band,
28 | pad_width=((self.RM_win_size//2, self.RM_win_size//2),
29 | (self.RM_win_size//2, self.RM_win_size//2)),
30 | mode="reflect")
31 | C_t2_band_pad = np.pad(C_t2_band,
32 | pad_width=((self.RM_win_size//2, self.RM_win_size//2),
33 | (self.RM_win_size//2, self.RM_win_size//2)),
34 | mode="reflect")
35 |
36 | a = np.empty(shape=(self.C_t1.shape[0], self.C_t1.shape[1]), dtype=np.float32)
37 | b = np.empty(shape=(self.C_t1.shape[0], self.C_t1.shape[1]), dtype=np.float32)
38 |
39 | for row_idx in range(self.C_t1.shape[0]):
40 | for col_idx in range(self.C_t1.shape[1]):
41 | C_t1_win = C_t1_band_pad[row_idx:row_idx + self.RM_win_size,
42 | col_idx:col_idx + self.RM_win_size]
43 | C_t2_win = C_t2_band_pad[row_idx:row_idx + self.RM_win_size,
44 | col_idx:col_idx + self.RM_win_size]
45 | reg = LinearRegression().fit(C_t1_win.flatten().reshape(-1, 1), C_t2_win.flatten().reshape(-1, 1))
46 |
47 | a[row_idx, col_idx] = reg.coef_
48 | b[row_idx, col_idx] = reg.intercept_
49 |
50 | C_t2_RM_pred = C_t1_band * a + b
51 | r = C_t2_band - C_t2_RM_pred
52 |
53 | return a, b, r
54 |
55 | def calculate_distances(self):
56 | rows = np.linspace(start=0, stop=self.similar_win_size - 1, num=self.similar_win_size)
57 | cols = np.linspace(start=0, stop=self.similar_win_size - 1, num=self.similar_win_size)
58 | xx, yy = np.meshgrid(rows, cols, indexing='ij')
59 |
60 | central_row = self.similar_win_size // 2
61 | central_col = self.similar_win_size // 2
62 | distances = np.sqrt(np.square(xx - central_row) + np.square(yy - central_col))
63 |
64 | return distances
65 |
66 | def select_similar_pixels(self):
67 | F_t1_pad = np.pad(self.F_t1,
68 | pad_width=((self.similar_win_size // 2, self.similar_win_size // 2),
69 | (self.similar_win_size // 2, self.similar_win_size // 2),
70 | (0, 0)),
71 | mode="reflect")
72 | F_t1_similar_weights = np.empty(shape=(self.F_t1.shape[0], self.F_t1.shape[1], self.similar_num),
73 | dtype=np.float32)
74 | F_t1_similar_indices = np.empty(shape=(self.F_t1.shape[0], self.F_t1.shape[1], self.similar_num),
75 | dtype=np.uint32)
76 |
77 | distances = self.calculate_distances().flatten()
78 | for row_idx in tqdm(range(self.F_t1.shape[0]), desc='select_similar_pixels'):
79 | for col_idx in range(self.F_t1.shape[1]):
80 | central_pixel_vals = self.F_t1[row_idx, col_idx, :]
81 | neighbor_pixel_vals = F_t1_pad[row_idx:row_idx + self.similar_win_size,
82 | col_idx:col_idx + self.similar_win_size, :]
83 | D = np.mean(np.abs(neighbor_pixel_vals - central_pixel_vals), axis=2).flatten()
84 | similar_indices = np.argsort(D)[:self.similar_num]
85 |
86 | similar_distances = 1 + distances[similar_indices] / (self.similar_win_size//2)
87 | similar_weights = (1 / similar_distances) / np.sum(1 / similar_distances)
88 |
89 | F_t1_similar_indices[row_idx, col_idx, :] = similar_indices
90 | F_t1_similar_weights[row_idx, col_idx, :] = similar_weights
91 |
92 | return F_t1_similar_indices, F_t1_similar_weights
93 |
94 | def spatial_filtering(self, F_t2_RM_pred, F_t1_similar_indices, F_t1_similar_weights):
95 | F_t2_RM_pred_pad = np.pad(F_t2_RM_pred,
96 | pad_width=((self.similar_win_size//2, self.similar_win_size//2),
97 | (self.similar_win_size//2, self.similar_win_size//2)),
98 | mode="reflect")
99 | SF_pred = np.empty(shape=(self.F_t1.shape[0], self.F_t1.shape[1]), dtype=np.float32)
100 |
101 | for row_idx in range(self.F_t1.shape[0]):
102 | for col_idx in range(self.F_t1.shape[1]):
103 | neighbor_pixel_RM_pred = F_t2_RM_pred_pad[row_idx:row_idx + self.similar_win_size,
104 | col_idx:col_idx + self.similar_win_size]
105 |
106 | similar_indices = F_t1_similar_indices[row_idx, col_idx, :]
107 | similar_weights = F_t1_similar_weights[row_idx, col_idx, :]
108 |
109 | similar_RM_pred = neighbor_pixel_RM_pred.flatten()[similar_indices]
110 | SF_pred[row_idx, col_idx] = np.sum(similar_weights * similar_RM_pred)
111 |
112 | return SF_pred
113 |
114 | def residual_compensation(self, F_t2_SF_pred, residuals, F_t1_similar_indices, F_t1_similar_weights):
115 | residuals_pad = np.pad(residuals,
116 | pad_width=((self.similar_win_size//2, self.similar_win_size//2),
117 | (self.similar_win_size//2, self.similar_win_size//2)),
118 | mode="reflect")
119 | Fit_FC_pred = F_t2_SF_pred.copy()
120 |
121 | pred_residuals = np.empty(shape=(self.F_t1.shape[0], self.F_t1.shape[1]), dtype=np.float32)
122 |
123 | for row_idx in range(residuals.shape[0]):
124 | for col_idx in range(residuals.shape[1]):
125 | neighbor_pixel_residuals = residuals_pad[row_idx:row_idx + self.similar_win_size,
126 | col_idx:col_idx + self.similar_win_size]
127 | similar_indices = F_t1_similar_indices[row_idx, col_idx, :]
128 | similar_residuals = neighbor_pixel_residuals.flatten()[similar_indices]
129 | similar_weights = F_t1_similar_weights[row_idx, col_idx, :]
130 | residual = np.sum(similar_residuals * similar_weights)
131 |
132 | pred_residuals[row_idx, col_idx] = residual
133 |
134 | Fit_FC_pred[row_idx, col_idx] += residual
135 |
136 | return Fit_FC_pred, pred_residuals
137 |
138 | def fit_fc(self):
139 | RM_pred = np.empty(shape=self.F_t1.shape, dtype=np.float32)
140 | SF_pred = np.empty(shape=self.F_t1.shape, dtype=np.float32)
141 | Fit_FC_pred = np.empty(shape=self.F_t1.shape, dtype=np.float32)
142 |
143 | similar_indices, similar_weights = self.select_similar_pixels()
144 | print("Selected similar pixels!")
145 |
146 | for band_idx in tqdm(range(self.F_t1.shape[2]), desc='Fitting FC'):
147 | a, b, r = self.regression_model_fitting(band_idx)
148 | a = resize(a, output_shape=(self.F_t1.shape[0], self.F_t1.shape[1]), order=0)
149 | b = resize(b, output_shape=(self.F_t1.shape[0], self.F_t1.shape[1]), order=0)
150 | r = resize(r, output_shape=(self.F_t1.shape[0], self.F_t1.shape[1]), order=3)
151 | band_RM_pred = self.F_t1[:, :, band_idx] * a + b
152 | print(f"Finished RM prediction of band {band_idx}!")
153 |
154 | band_SF_pred = self.spatial_filtering(band_RM_pred, similar_indices, similar_weights)
155 | print(f"Finished spatial filtering of band {band_idx}!")
156 |
157 | band_Fit_FC_pred, pred_residuals = self.residual_compensation(band_SF_pred, r, similar_indices,
158 | similar_weights)
159 | print(f"Finished final prediction of band {band_idx}!")
160 |
161 | RM_pred[:, :, band_idx] = band_RM_pred
162 | SF_pred[:, :, band_idx] = band_SF_pred
163 | Fit_FC_pred[:, :, band_idx] = band_Fit_FC_pred
164 |
165 | return RM_pred, SF_pred, Fit_FC_pred
166 |
167 |
168 | ###########################################################
169 | # Parameters setting #
170 | ###########################################################
171 | RM_win_size = 3
172 | scale_factor = 30
173 | similar_win_size = 31
174 | similar_num = 30
175 |
176 | F_tb_path = r"/home/zbl/datasets_paper/LGC/val/2005_045_0214-2005_061_0302/20050214_TM.tif"
177 | C_tb_path = r"/home/zbl/datasets_paper/LGC/val/2005_045_0214-2005_061_0302/MOD09GA_A2005045.tif"
178 | C_tp_path = r"/home/zbl/datasets_paper/LGC/val/2005_045_0214-2005_061_0302/MOD09GA_A2005061.tif"
179 | Fit_FC_path = r"/home/zbl/RunLog/Fit-FC/LGC/PRED_2005_045_0214-2005_061_0302.tif"
180 |
181 | if __name__ == "__main__":
182 | F_tb, F_tb_profile = read_raster(F_tb_path)
183 | print(F_tb_profile)
184 | C_tb = read_raster(C_tb_path)[0]
185 | C_tb_coarse = downscale_local_mean(C_tb, factors=(scale_factor, scale_factor, 1))
186 | C_tp = read_raster(C_tp_path)[0]
187 | C_tp_coarse = downscale_local_mean(C_tp, factors=(scale_factor, scale_factor, 1))
188 | print("裁剪完成,正在处理中,请耐心等待")
189 | time0 = datetime.now()
190 | fit_fc = Fit_FC(F_tb, C_tb_coarse, C_tp_coarse,
191 | RM_win_size=RM_win_size,
192 | scale_factor=scale_factor,
193 | similar_win_size=similar_win_size, similar_num=similar_num)
194 | print("初始化成功,计算中")
195 | F_tp_RM, F_tp_SF, F_tp_Fit_FC = fit_fc.fit_fc()
196 | time1 = datetime.now()
197 | time_span = time1 - time0
198 | print(f"Used {time_span.total_seconds():.2f} seconds!")
199 |
200 | write_raster(F_tp_Fit_FC, F_tb_profile, Fit_FC_path)
201 |
202 |
--------------------------------------------------------------------------------
/Fit-FC/functions.py:
--------------------------------------------------------------------------------
1 | import rasterio
2 | from rasterio.windows import Window
3 | import numpy as np
4 |
5 |
6 | def read_raster(raster_path):
7 | dataset = rasterio.open(raster_path)
8 | raster_profile = dataset.profile
9 | raster = dataset.read()
10 | raster = np.transpose(raster, (1, 2, 0))
11 | raster = raster.astype(np.dtype(raster_profile["dtype"]))
12 |
13 | return raster, raster_profile
14 |
15 |
16 | def write_raster(raster, raster_profile, raster_path):
17 | raster_profile["dtype"] = str(raster.dtype)
18 | raster_profile["height"] = raster.shape[0]
19 | raster_profile["width"] = raster.shape[1]
20 | raster_profile["count"] = raster.shape[2]
21 | image = np.transpose(raster, (2, 0, 1))
22 | dataset = rasterio.open(raster_path, mode='w', **raster_profile)
23 | dataset.write(image)
24 | dataset.close()
25 |
26 |
27 | def clip_raster(dataset, row_start, row_stop, col_start, col_stop):
28 | window = Window.from_slices((row_start, row_stop), (col_start, col_stop))
29 | transform = dataset.window_transform(window)
30 | clipped_raster = dataset.read(window=window)
31 | clipped_raster = np.transpose(clipped_raster, (1, 2, 0))
32 | clipped_profile = dataset.profile
33 | clipped_profile.update({'width': col_stop - col_start,
34 | 'height': row_stop - row_start,
35 | 'transform': transform})
36 |
37 | return clipped_raster, clipped_profile
38 |
39 |
40 | def color_composite(image, bands_idx):
41 | image = np.stack([image[:, :, i] for i in bands_idx], axis=2)
42 | return image
43 |
44 | def color_composite_ma(image, bands_idx):
45 | image = np.ma.stack([image[:, :, i] for i in bands_idx], axis=2)
46 | return image
47 |
48 |
49 | def linear_pct_stretch(img, pct=2, max_out=1, min_out=0):
50 |
51 | def gray_process(gray):
52 | truncated_down = np.percentile(gray, pct)
53 | truncated_up = np.percentile(gray, 100 - pct)
54 | gray = (gray - truncated_down) / (truncated_up - truncated_down) * (max_out - min_out) + min_out
55 | gray[gray < min_out] = min_out
56 | gray[gray > max_out] = max_out
57 | return gray
58 |
59 | bands = []
60 | for band_idx in range(img.shape[2]):
61 | band = img[:, :, band_idx]
62 | band_strch = gray_process(band)
63 | bands.append(band_strch)
64 | img_pct_strch = np.stack(bands, axis=2)
65 | return img_pct_strch
66 |
67 | def linear_pct_stretch_ma(img, pct=2, max_out=1, min_out=0):
68 |
69 | def gray_process(gray):
70 | truncated_down = np.percentile(gray, pct)
71 | truncated_up = np.percentile(gray, 100 - pct)
72 | gray = (gray - truncated_down) / (truncated_up - truncated_down) * (max_out - min_out) + min_out
73 | gray[gray < min_out] = min_out
74 | gray[gray > max_out] = max_out
75 | return gray
76 |
77 | out = img.copy()
78 | for band_idx in range(img.shape[2]):
79 | band = img.data[:, :, band_idx]
80 | mask = img.mask[:, :, band_idx]
81 | band_strch = gray_process(band[~mask])
82 | out.data[:, :, band_idx][~mask] = band_strch
83 | return out
84 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # 遥感图像时空融合方法汇总
2 |
3 | 本项目已经包含以传统方法以及验证指标:RMSE,SSIM,UIQI,CC,SAM,ERGAS
4 |
5 | DataSetsTools放了一些常用的CIA和LGC数据集的处理代码
6 |
7 | 自己整理的一些关于遥感图像时空融合的相关知识:[遥感图像时空融合看板](https://www.yuque.com/basailuonadeyuhui/lczi48/ur61mu8fgbmum727?singleDoc# 《遥感图像 时空融合知识库 看板》)
8 |
9 | 全部传统方法请参考这个仓库:[Free-shared-Spatiotemporal-method-of-remote-sensing](https://github.com/max19951001/Free-shared-Spatiotemporal-method-of-remote-sensing-)
10 |
11 | ## 传统方法
12 |
13 | ### STARFM
14 |
15 | [endu111/remote-sensing-images-fusion: remote sensing images fusion,a topic similar to super resolution (github.com)](https://github.com/endu111/remote-sensing-images-fusion/tree/master)
16 |
17 | [文献阅读:STARFM - 知乎 (zhihu.com)](https://zhuanlan.zhihu.com/p/412081033)
18 |
19 | [STARFM(Python版)_starfm库-CSDN博客](https://blog.csdn.net/qq_43873392/article/details/127990068)
20 |
21 | ### ESTARFM/FSDAF
22 |
23 | [遥感 如何获取时空融合-starfm\estarfm\fsdaf 算法的python代码(自带测试数据)_estarfm融合-CSDN博客](https://blog.csdn.net/Nieqqwe/article/details/124341403)
24 |
25 | [FSDAF效果始终不如STARFM的原因和解决办法(在LGC和CIA数据集上) - 知乎 (zhihu.com)](https://zhuanlan.zhihu.com/p/436387889)
26 |
27 | ### Fit-FC
28 |
29 | [HoucaiGuo/Fit-FC-Python: Python implementation of the Fit-FC algorithm for spatiotemporal fusion of remote sensing images. (github.com)](https://github.com/HoucaiGuo/Fit-FC-Python)
30 |
31 | ## 深度学习的方法
32 |
33 | ### STFCNN
34 |
35 | 论文:[Spatiotemporal Satellite Image Fusion Using Deep Convolutional Neural Networks](https://ieeexplore.ieee.org/abstract/document/8291042)
36 |
37 | 代码:[setneicum/stfcnn](https://github.com/setneicum/stfcnn)
38 |
39 | ### GANSTFM
40 |
41 | 论文:[A Flexible Reference-Insensitive Spatiotemporal Fusion Model for Remote Sensing Images Using Conditional Generative Adversarial Network | IEEE Journals & Magazine | IEEE Xplore](https://ieeexplore.ieee.org/abstract/document/9336033)
42 |
43 | 代码:[theonegis/ganstfm: A Flexible Spatiotemporal Fusion Model for Remote Sensing Images With Conditional Generative Adversarial Network (github.com)](https://github.com/theonegis/ganstfm)
44 |
45 | ### MSNet【未找到】
46 |
47 | 论文:[Remote Sensing | Free Full-Text | MSNet: A Multi-Stream Fusion Network for Remote Sensing Spatiotemporal Fusion Based on Transformer and Convolution (mdpi.com)](https://www.mdpi.com/2072-4292/13/18/3724)
48 |
49 | ### SwinSTFM
50 |
51 | 论文:[SwinSTFM: Remote Sensing Spatiotemporal Fusion Using Swin Transformer | IEEE Journals & Magazine | IEEE Xplore](https://ieeexplore.ieee.org/abstract/document/9795183)
52 |
53 | 代码:[LouisChen0104/swinstfm: Code of SwinSTFM: Remote Sensing Spatiotemporal Fusion using Swin Transformer (github.com)](https://github.com/LouisChen0104/swinstfm)
54 |
55 | ### CycleGAN-STF 【未找到】
56 |
57 | 论文:[CycleGAN-STF: Spatiotemporal Fusion via CycleGAN-Based Image Generation | IEEE Journals & Magazine | IEEE Xplore](https://ieeexplore.ieee.org/abstract/document/9206067)
58 |
59 | ### StfNet【未找到】
60 |
61 | 论文:[StfNet: A Two-Stream Convolutional Neural Network for Spatiotemporal Image Fusion | IEEE Journals & Magazine | IEEE Xplore](https://ieeexplore.ieee.org/abstract/document/8693668)
62 |
63 | ### EDCSTFN
64 |
65 | 论文:[Remote Sensing | Free Full-Text | An Enhanced Deep Convolutional Model for Spatiotemporal Image Fusion (mdpi.com)](https://www.mdpi.com/2072-4292/11/24/2898?ref=https://githubhelp.com)
66 |
67 | 代码:[theonegis/edcstfn: An Enhanced Deep Convolutional Model for Spatiotemporal Image Fusion (github.com)](https://github.com/theonegis/edcstfn)
68 |
69 | ### MLFF-GAN
70 |
71 | 论文:[MLFF-GAN: A Multilevel Feature Fusion With GAN for Spatiotemporal Remote Sensing Images | IEEE Journals & Magazine | IEEE Xplore](https://ieeexplore.ieee.org/abstract/document/9781347/)
72 |
73 | 代码:[songbingze/MLFF-GAN (github.com)](https://github.com/songbingze/MLFF-GAN)
74 |
75 | ### ECPW-STFN
76 |
77 | 论文:[Enhanced wavelet based spatiotemporal fusion networks using cross-paired remote sensing images - ScienceDirect](https://www.sciencedirect.com/science/article/pii/S092427162400176X)
78 |
79 | 代码:[lixinghua5540/ECPW-STFN: Enhanced wavelet based spatiotemporal fusion networks using cross-paired remote sensing images, 2024](https://github.com/lixinghua5540/ECPW-STFN)
80 |
81 | ### STFDiff
82 |
83 | 论文:[STFDiff: Remote sensing image spatiotemporal fusion with diffusion models - ScienceDirect](https://www.sciencedirect.com/science/article/pii/S1566253524002835)
84 |
85 | 代码:[prowDIY/STF](https://github.com/prowDIY/STF)
86 |
87 | 注:模型路径在src.model.stfdiff.model6_GN_SiLU
88 |
89 | ### STM-STFNet
90 |
91 | 论文:[[A Dual-Perspective Spatiotemporal Fusion Model for Remote Sensing Images by Discriminative Learning of the Spatial and Temporal Mapping](https://ieeexplore.ieee.org/abstract/document/10595407)]
92 |
93 | 代码:[zhonhua/STM-STFNet](https://github.com/zhonhua/STM-STFNet)
94 |
95 | ## 联系我
96 |
97 | 如果有代码贡献欢迎联系我。
98 |
99 | ## 声明
100 |
101 | 本仓库仅供学习交流使用,请勿用于非法用途,如有侵权请联系我。
102 |
--------------------------------------------------------------------------------
/STARFM.py:
--------------------------------------------------------------------------------
1 | import os.path
2 |
3 | import numpy as np
4 | import torch
5 | import torch.nn as nn
6 | import time
7 | # import skimage.measure as sm
8 | import skimage.metrics as sm
9 | import cv2
10 | from osgeo import gdal, gdalconst
11 | import matplotlib.pyplot as plt
12 | from tqdm import tqdm
13 |
14 | import DataHelper
15 |
16 |
17 | ###img read tool###############################################################
18 | def imgread(file, mode='gdal'):
19 | if mode == 'cv2':
20 | img = cv2.imread(file, -1) / 10000. # /10000.
21 | if mode == 'gdal':
22 | img = gdal.Open(file).ReadAsArray() / 10000. # /10000.
23 | return img
24 |
25 |
26 | ###weight caculate tools######################################################
27 | def weight_caculate(data):
28 | return torch.log((abs(data) * 10000 + 1.00001))
29 |
30 |
31 | def caculate_weight(l1m1, m1m2):
32 | # atmos difference
33 | wl1m1 = weight_caculate(l1m1)
34 | # time deference
35 | wm1m2 = weight_caculate(m1m2)
36 | return wl1m1 * wm1m2
37 |
38 |
39 | ###space distance caculate tool################################################
40 | def indexdistance(window):
41 | # one window, one distance weight matrix
42 | [distx, disty] = np.meshgrid(np.arange(window[0]), np.arange(window[1]))
43 | centerlocx, centerlocy = (window[0] - 1) // 2, (window[1] - 1) // 2
44 | dist = 1 + (((distx - centerlocx) ** 2 + (disty - centerlocy) ** 2) ** 0.5) / ((window[0] - 1) // 2)
45 | return dist
46 |
47 |
48 | ###threshold select tool######################################################
49 | def weight_bythreshold(weight, data, threshold):
50 | # make weight tensor
51 | weight[data <= threshold] = 1
52 | return weight
53 |
54 |
55 | def weight_bythreshold_allbands(weight, l1m1, m1m2, thresholdmax):
56 | # make weight tensor
57 | weight[l1m1 <= thresholdmax[0]] = 1
58 | weight[m1m2 <= thresholdmax[1]] = 1
59 | allweight = (weight.sum(0).view(1, weight.shape[1], weight.shape[2])) / weight.shape[0]
60 | allweight[allweight != 1] = 0
61 | return allweight
62 |
63 |
64 | ###initial similar pixels tools################################################
65 | def spectral_similar_threshold(clusters, NIR, red):
66 | thresholdNIR = NIR.std() * 2 / clusters
67 | thresholdred = red.std() * 2 / clusters
68 | return (thresholdNIR, thresholdred)
69 |
70 |
71 | def caculate_similar(l1, threshold, window):
72 | # read l1
73 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
74 | l1 = nn.functional.unfold(l1, window)
75 | # caculate similar
76 | weight = torch.zeros(l1.shape, dtype=torch.float32).to(device)
77 | centerloc = (l1.size()[1] - 1) // 2
78 | weight = weight_bythreshold(weight, abs(l1 - l1[:, centerloc:centerloc + 1, :]), threshold)
79 | return weight
80 |
81 |
82 | def classifier(l1):
83 | '''not used'''
84 | return
85 |
86 |
87 | ###similar pixels filter tools#################################################
88 | def allband_arrayindex(arraylist, indexarray, rawindexshape):
89 | shape = arraylist[0].shape
90 | datalist = []
91 | for array in arraylist:
92 | newarray = torch.zeros(rawindexshape, dtype=torch.float32).cuda()
93 | for band in range(shape[1]):
94 | newarray[0, band] = array[0, band][indexarray]
95 | datalist.append(newarray)
96 | return datalist
97 |
98 |
99 | def similar_filter(datalist, sital, sitam):
100 | [l1, m1, m2] = datalist
101 | l1m1 = abs(l1 - m1)
102 | m1m2 = abs(m2 - m1)
103 | #####
104 | l1m1 = nn.functional.unfold(l1m1, (1, 1)).max(1)[0] + (sital ** 2 + sitam ** 2) ** 0.5
105 | m1m2 = nn.functional.unfold(m1m2, (1, 1)).max(1)[0] + (sitam ** 2 + sitam ** 2) ** 0.5
106 | return (l1m1, m1m2)
107 |
108 |
109 | ###starfm for onepart##########################################################
110 | def starfm_onepart(datalist, similar, thresholdmax, window, outshape, dist):
111 | #####param and data
112 | [l1, m1, m2] = datalist
113 | bandsize = l1.shape[1]
114 | outshape = outshape
115 | blocksize = outshape[0] * outshape[1]
116 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
117 | #####img to col
118 | l1 = nn.functional.unfold(l1, window)
119 | m1 = nn.functional.unfold(m1, window)
120 | m2 = nn.functional.unfold(m2, window)
121 | l1 = l1.view(bandsize, -1, blocksize)
122 | m1 = m1.view(bandsize, -1, blocksize)
123 | m2 = m2.view(bandsize, -1, blocksize)
124 | l1m1 = abs(l1 - m1)
125 | m1m2 = abs(m2 - m1)
126 | #####caculate weights
127 | # time and space weight
128 | w = caculate_weight(l1m1, m1m2)
129 | w = 1 / (w * dist)
130 | # similar pixels: 1:by threshold 2:by classifier
131 | wmask = torch.zeros(l1.shape, dtype=torch.float32).to(device)
132 |
133 | # filter similar pixels for each band: (bandsize,windowsize,blocksize)
134 | # wmasknew=weight_bythreshold(wmask,l1m1,thresholdmax[0])
135 | # wmasknew=weight_bythreshold(wmasknew,m1m2,thresholdmax[1])
136 |
137 | # filter similar pixels for all bands: (1,windowsize,blocksize)
138 | wmasknew = weight_bythreshold_allbands(wmask, l1m1, m1m2, thresholdmax)
139 | # mask
140 | w = w * wmasknew * similar
141 | # normili
142 | w = w / (w.sum(1).view(w.shape[0], 1, w.shape[2]))
143 | #####predicte and trans
144 | # predicte l2
145 | l2 = (l1 + m2 - m1) * w
146 | l2 = l2.sum(1).reshape(1, bandsize, l2.shape[2])
147 | # col to img
148 | l2 = nn.functional.fold(l2.view(1, -1, blocksize), outshape, (1, 1))
149 | return l2
150 |
151 |
152 | ###starfm for allpart#########################################################
153 | def starfm_main(l1r, m1r, m2r,
154 | param={'part_shape': (140, 140),
155 | 'window_size': (31, 31),
156 | 'clusters': 5,
157 | 'NIRindex': 3, 'redindex': 2,
158 | 'sital': 0.001, 'sitam': 0.001}):
159 | # get start time
160 | time_start = time.time()
161 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
162 | # read parameters
163 | parts_shape = param['part_shape']
164 | window = param['window_size']
165 | clusters = param['clusters']
166 | NIRindex = param['NIRindex']
167 | redindex = param['redindex']
168 | sital = param['sital']
169 | sitam = param['sitam']
170 | # caculate initial similar pixels threshold
171 | threshold = spectral_similar_threshold(clusters, l1r[:, NIRindex:NIRindex + 1], l1r[:, redindex:redindex + 1])
172 | print('similar threshold (NIR,red)', threshold)
173 | ####shape
174 | imageshape = (l1r.shape[1], l1r.shape[2], l1r.shape[3])
175 | print('datashape:', imageshape)
176 | row = imageshape[1] // parts_shape[0] + 1
177 | col = imageshape[2] // parts_shape[1] + 1
178 | padrow = window[0] // 2
179 | padcol = window[1] // 2
180 | #####padding constant for conv;STARFM use Inverse distance weight(1/w),better to avoid 0 and NAN(1/0),or you can use another distance measure
181 | constant1 = 10
182 | constant2 = 20
183 | constant3 = 30
184 | l1 = torch.nn.functional.pad(l1r, (padrow, padcol, padrow, padcol), 'constant', constant1)
185 | m1 = torch.nn.functional.pad(m1r, (padrow, padcol, padrow, padcol), 'constant', constant2)
186 | m2 = torch.nn.functional.pad(m2r, (padrow, padcol, padrow, padcol), 'constant', constant3)
187 | # split parts , get index and run for every part
188 | row_part = np.array_split(np.arange(imageshape[1]), row, axis=0)
189 | col_part = np.array_split(np.arange(imageshape[2]), col, axis=0)
190 | print('Split into {} parts,row number: {},col number: {}'.format(len(row_part) * len(row_part), len(row_part),
191 | len(row_part)))
192 | dist = nn.functional.unfold(
193 | torch.tensor(indexdistance(window), dtype=torch.float32).reshape(1, 1, window[0], window[1]), window).to(device)
194 |
195 | for rnumber, row_index in tqdm(enumerate(row_part), desc=f'生成图像中', total=len(row_part)):
196 | for cnumber, col_index in enumerate(col_part):
197 | ####run for part: (rnumber,cnumber)
198 | # print('now for part{}'.format((rnumber, cnumber)))
199 | ####output index
200 | rawindex = np.meshgrid(row_index, col_index)
201 | ####output shape
202 | rawindexshape = (col_index.shape[0], row_index.shape[0])
203 | ####the real parts_index ,for reading the padded data
204 | row_pad = np.arange(row_index[0], row_index[len(row_index) - 1] + window[0])
205 | col_pad = np.arange(col_index[0], col_index[len(col_index) - 1] + window[1])
206 | padindex = np.meshgrid(row_pad, col_pad)
207 | padindexshape = (col_pad.shape[0], row_pad.shape[0])
208 | ####caculate initial similar pixels
209 | NIR_similar = caculate_similar(l1[0, NIRindex][padindex].view(1, 1, padindexshape[0], padindexshape[1]),
210 | threshold[0], window)
211 | red_similar = caculate_similar(l1[0, redindex][padindex].view(1, 1, padindexshape[0], padindexshape[1]),
212 | threshold[1], window)
213 | similar = NIR_similar * red_similar
214 | ####caculate threshold used for similar_pixels_filter
215 | thresholdmax = similar_filter(
216 | allband_arrayindex([l1r, m1r, m2r], rawindex, (1, imageshape[0], rawindexshape[0], rawindexshape[1])),
217 | sital, sitam)
218 | ####Splicing each col at rnumber-th row
219 | if cnumber == 0:
220 | rowdata = starfm_onepart(
221 | allband_arrayindex([l1, m1, m2], padindex, (1, imageshape[0], padindexshape[0], padindexshape[1])),
222 | similar, thresholdmax, window, rawindexshape, dist
223 | )
224 |
225 | else:
226 | rowdata = torch.cat((rowdata,
227 | starfm_onepart(allband_arrayindex([l1, m1, m2], padindex, (
228 | 1, imageshape[0], padindexshape[0], padindexshape[1])),
229 | similar, thresholdmax, window, rawindexshape, dist)), 2)
230 | ####Splicing each row
231 | if rnumber == 0:
232 | l2_fake = rowdata
233 | else:
234 | l2_fake = torch.cat((l2_fake, rowdata), 3)
235 |
236 | l2_fake = l2_fake.transpose(3, 2)
237 | # time cost
238 | time_end = time.time()
239 | print('now over,use time {:.4f}'.format(time_end - time_start))
240 | return l2_fake
241 |
242 |
243 | def trans(datafile):
244 | datashape = datafile.shape
245 | for index in tqdm(range(datashape[2]), desc="转换中"):
246 | for i in range(datashape[0]):
247 | for j in range(datashape[1]):
248 | datafile[i][j][index] *= 10000
249 | # datafile.transpose(1, 2, 0)
250 | return datafile
251 |
252 |
253 | def starfm(paths, root, name):
254 | ##three band datas(sorry,just find them at home,i cant recognise the spectral response range of each band,'NIR' and 'red' are only examples)
255 | l1file = paths[1]
256 | l2file = paths[3]
257 | m1file = paths[0]
258 | m2file = paths[2]
259 |
260 | ##param
261 | param = {'part_shape': (75, 75),
262 | 'window_size': (31, 31),
263 | 'clusters': 5,
264 | 'NIRindex': 1, 'redindex': 0,
265 | 'sital': 0.001, 'sitam': 0.001}
266 |
267 | ##read images from files(numpy)
268 | l1 = imgread(l1file)
269 | m1 = imgread(m1file)
270 | m2 = imgread(m2file)
271 | l2_gt = imgread(l2file)
272 |
273 | ##numpy to tensor
274 | shape = l1.shape
275 | l1r = torch.tensor(l1.reshape(1, shape[0], shape[1], shape[2]), dtype=torch.float32)
276 | m1r = torch.tensor(m1.reshape(1, shape[0], shape[1], shape[2]), dtype=torch.float32)
277 | m2r = torch.tensor(m2.reshape(1, shape[0], shape[1], shape[2]), dtype=torch.float32)
278 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
279 | l1r = l1r.to(device)
280 | m1r = m1r.to(device)
281 | m2r = m2r.to(device)
282 |
283 | ##predicte(tensor input —> tensor output)
284 | l2_fake = starfm_main(l1r, m1r, m2r, param)
285 | print(f'l2_fake.shape:{l2_fake.shape}')
286 |
287 | ##tensor to numpy
288 | if device.type == 'cuda':
289 | l2_fake = l2_fake[0].cpu().numpy()
290 | else:
291 | l2_fake = l2_fake[0].numpy()
292 |
293 | ##show results
294 | # transform:(chanel,H,W) to (H,W,chanel)
295 | l2_fake = l2_fake.transpose(1, 2, 0)
296 | l2_gt = l2_gt.transpose(1, 2, 0)
297 | l1 = l1.transpose(1, 2, 0)
298 | m1 = m1.transpose(1, 2, 0)
299 | m2 = m2.transpose(1, 2, 0)
300 | # plot
301 | # plt.figure('landsat:t1')
302 | # plt.imshow(l1)
303 | # plt.figure('landsat:t2_fake')
304 | # plt.imshow(l2_fake)
305 | # plt.figure('landsat:t2_groundtrue')
306 | # plt.imshow(l2_gt)
307 |
308 | ##evaluation
309 | # psnr = 10. * np.log10(1. / np.mean((l2_fake - l2_gt) ** 2))
310 | # ssim1 = sm.structural_similarity(l2_fake, l2_gt, data_range=1, multichannel=True)
311 | # ssim2 = sm.structural_similarity(l1, l2_gt, data_range=1, multichannel=True)
312 | # ssim3 = sm.structural_similarity(l1 + m2 - m1, l2_gt, data_range=1, multichannel=True)
313 | # print('psnr:{:.4f};with-similarpixels ssim: {:.4f};landsat_t1 ssim: {:.4f};non-similarpixels ssim: {:.4f}'.format(
314 | # psnr, ssim1, ssim2, ssim3))
315 |
316 | trans(l2_fake)
317 | targetfile_name = f"PRED_{name}.tif"
318 | path = os.path.join(root, targetfile_name)
319 | writetif(l2_fake, path, l2file)
320 |
321 | return
322 |
323 |
324 | def writetif(dataset, target_file, reference_file):
325 | reference = gdal.Open(reference_file, gdalconst.GA_ReadOnly)
326 | band_count = dataset.shape[2] # 波段数
327 | print("波段数:", band_count)
328 | band1 = dataset[0]
329 | # data_type = band1.DataType
330 | target = gdal.GetDriverByName("GTiff").Create(target_file, xsize=dataset.shape[1],
331 | ysize=dataset.shape[0],
332 | bands=band_count,
333 | eType=reference.GetRasterBand(1).DataType)
334 | geotrans = list(reference.GetGeoTransform())
335 | target.SetProjection(reference.GetProjection()) # 设置投影坐标
336 | target.SetGeoTransform(geotrans) # 设置地理变换参数
337 | total = band_count + 1
338 | for index in tqdm(range(1, total), desc="写入中"):
339 | # data = dataset.GetRasterBand(index).ReadAsArray(buf_xsize=dataset.shape[0], buf_ysize=dataset.shape[1])
340 | out_band = target.GetRasterBand(index)
341 | # out_band.SetNoDataValue(dataset.GetRasterBand(index).GetNoDataValue())
342 | out_band.WriteArray(dataset[:, :, index - 1]) # 写入数据到新影像中
343 | out_band.FlushCache()
344 | out_band.ComputeBandStats(False) # 计算统计信息
345 | print("写入完成")
346 | del dataset
347 |
348 |
349 | save_path = r"/home/zbl/RunLog/STARFM/LGC/"
350 | if __name__ == '__main__':
351 | list_dirs, names = DataHelper.getDataLoader(option="LGC")
352 | for i in range(len(list_dirs)):
353 | starfm(list_dirs[i], save_path, names[i])
354 |
--------------------------------------------------------------------------------
/evaluatPointSwin.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | from pathlib import Path
4 | import numpy as np
5 | import torch
6 | from osgeo import gdal_array
7 | from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
8 | from math import sqrt
9 |
10 | from skimage.metrics import structural_similarity as compare_ssim
11 |
12 | from sewar import sam, rmse
13 |
14 | from Config import ConfigForEvaluation, ConfigForEvaluationForSwin
15 |
16 |
17 | def uiqi(im1, im2, block_size=64, return_map=False):
18 | if im1.shape[0] == 6: # 调整成标准的[长,宽,通道]
19 | im1 = im1.transpose(1, 2, 0)
20 | im2 = im2.transpose(1, 2, 0)
21 | if len(im1.shape) == 3:
22 | return np.array(
23 | [uiqi(im1[:, :, i], im2[:, :, i], block_size, return_map=return_map) for i in range(im1.shape[2])])
24 | delta_x = np.std(im1, ddof=1)
25 | delta_y = np.std(im2, ddof=1)
26 | delta_xy = np.sum((im1 - np.mean(im1)) * (im2 - np.mean(im2))) / (im1.shape[0] * im1.shape[1] - 1)
27 | mu_x = np.mean(im1)
28 | mu_y = np.mean(im2)
29 | q1 = delta_xy / (delta_x * delta_y)
30 | q2 = 2 * mu_x * mu_y / (mu_x ** 2 + mu_y ** 2)
31 | q3 = 2 * delta_x * delta_y / (delta_x ** 2 + delta_y ** 2)
32 | q = q1 * q2 * q3
33 | return q
34 |
35 |
36 | def calculate_ergas(real_images, predicted_images):
37 | """
38 | 计算增强的灰度相似性指数(ERGAS)。
39 |
40 | 参数:
41 | real_images -- 真实图像的列表,每个元素是一个通道。
42 | predicted_images -- 预测图像的列表,每个元素是一个通道。
43 |
44 | 返回:
45 | ergas -- ERGAS指标的值。
46 | """
47 | ergas_sum = 0.0
48 | num_channels = len(real_images)
49 | # 遍历所有通道
50 | for real_img, pred_img in zip(real_images, predicted_images):
51 | # 计算RMSE
52 | channel_rmse = rmse(real_img, pred_img)
53 |
54 | # 计算图像平均亮度
55 | mean_brightness = np.mean(real_img)
56 |
57 | # 避免除以零
58 | mean_brightness_squared = max(mean_brightness ** 2, 1e-100)
59 |
60 | # 计算ERGAS值
61 | channel_ergas = (channel_rmse ** 2) / mean_brightness_squared
62 |
63 | # 累加ERGAS值
64 | ergas_sum += channel_ergas
65 |
66 | # 计算平均ERGAS值
67 | average_ergas = ergas_sum / num_channels
68 |
69 | # 缩放ERGAS值
70 | scaled_ergas = np.sqrt(average_ergas) * 6
71 |
72 | return scaled_ergas
73 |
74 |
75 | def evaluate(y_true, y_pred, func):
76 | assert y_true.shape == y_pred.shape
77 | if y_true.ndim == 2:
78 | y_true = y_true[np.newaxis, :]
79 | y_pred = y_pred[np.newaxis, :]
80 | metrics = []
81 | for i in range(y_true.shape[0]):
82 | metrics.append(func(y_true[i], y_pred[i]))
83 | return metrics
84 |
85 |
86 | def rmse_loss(y_true, y_pred):
87 | return evaluate(y_true, y_pred,
88 | lambda x, y: sqrt(mean_squared_error(x.ravel(), y.ravel())))
89 |
90 |
91 | def ssim(y_true, y_pred, data_range=1):
92 | return evaluate(y_true, y_pred,
93 | lambda x, y: compare_ssim(x, y, data_range=data_range))
94 |
95 |
96 | config = ConfigForEvaluationForSwin("LGC",
97 | save_dir_name="/home/zbl/datasets/STFusion/RunLog/FinePainterNet/LGC/2024-03-17/test/")
98 |
99 |
100 | def getMean(data):
101 | return sum(data) / len(data)
102 |
103 |
104 | def cc(real_image, predicted_image):
105 | """
106 | 计算两个图像的相关系数。
107 |
108 | 参数:
109 | real_image -- 真实图像,形状为 (channels, height, width)
110 | predicted_image -- 预测图像,形状应与 real_image 相同
111 |
112 | 返回:
113 | cc_array -- 一个数组,包含每个通道的相关系数
114 | """
115 | # 确保输入图像的形状相同
116 | if real_image.shape != predicted_image.shape:
117 | raise ValueError("The shapes of real_image and predicted_image must be the same.")
118 |
119 | # 计算每个通道的相关系数
120 | cc_array = []
121 | for i in range(real_image.shape[0]): # 遍历所有通道
122 | real_channel = real_image[i]
123 | pred_channel = predicted_image[i]
124 |
125 | # 计算均值
126 | mu_x = np.mean(real_channel)
127 | mu_y = np.mean(pred_channel)
128 |
129 | # 计算协方差和标准差
130 | cov_xy = np.sum((real_channel - mu_x) * (pred_channel - mu_y))
131 | var_x = np.sum(np.square(real_channel - mu_x))
132 | var_y = np.sum(np.square(pred_channel - mu_y))
133 |
134 | # 计算相关系数
135 | cc = cov_xy / (np.sqrt(var_x * var_y) + 1e-100) # 添加一个小的常数以避免除以零
136 |
137 | cc_array.append(cc)
138 |
139 | return np.array(cc_array)
140 |
141 |
142 | def trans_sam(real_image, predicted_image):
143 | return sam(real_image.transpose(1, 2, 0), predicted_image.transpose(1, 2, 0)) * 180 / np.pi
144 |
145 |
146 | if __name__ == '__main__':
147 |
148 | # for idx, img in enumerate(config.predict_img_names):
149 | # predict_dir = os.path.join(config.predict_dir, img)
150 | # ground_truth_dir = os.path.join(config.ground_truth_dir, config.ref_img_names[idx])
151 | # print(f"--------------[{idx + 1}/{len(config.predict_img_names)}]IMAGE_NAME:{img}----------------------------")
152 | # ix = gdal_array.LoadFile(predict_dir).astype(np.int32)
153 | # iy = gdal_array.LoadFile(ground_truth_dir).astype(np.int32)
154 | # if config.choice == 'CIA':
155 | # ix[iy == 0] = 0
156 | # scale_factor = 0.0001
157 | # xx = ix * scale_factor
158 | # yy = iy * scale_factor
159 | # print('RMSE', rmse_loss(yy, xx))
160 | # print('SSIM', ssim(yy, xx))
161 | # print('UIQI', uiqi(xx, yy))
162 | # print('CC', cc(yy, xx))
163 | # print('SAM', trans_sam(iy, ix)) # 在原论文中,只有sam是真实数据比的,其他指标都是放缩后再比的
164 | # print('ERGAS', calculate_ergas(yy, xx))
165 |
166 | # predict_dir = "/home/zbl/codeLab/remotePython/RemoteSensingLab/fake.tif"
167 | # ground_truth_dir = "/home/zbl/datasets_paper/CIA-swinSTFM/val/2002_005_0105-2002_012_0112/20020112_TM.tif"
168 | predict_dir = "/home/zbl/codeLab/remotePython/RemoteSensingLab/fake.tif"
169 | ground_truth_dir = "/home/zbl/datasets_paper/CIA-MLFF-GAN/val/2002_076_0317-2002_092_0402/20020402_TM.tif"
170 | ix = gdal_array.LoadFile(predict_dir).astype(np.int32)
171 | iy = gdal_array.LoadFile(ground_truth_dir).astype(np.int32)
172 | # if config.choice == 'CIA':
173 | # ix[iy == 0] = 0
174 | scale_factor = 0.0001
175 | xx = ix * scale_factor
176 | yy = iy * scale_factor
177 | print('RMSE', rmse_loss(yy, xx))
178 | print('SSIM', ssim(yy, xx))
179 | print('UIQI', uiqi(xx, yy))
180 | print('CC', cc(yy, xx))
181 | print('SAM', trans_sam(iy, ix)) # 在原论文中,只有sam是真实数据比的,其他指标都是放缩后再比的
182 | print('ERGAS', calculate_ergas(yy, xx))
183 |
--------------------------------------------------------------------------------