├── GF126 ├── build_pyramid.py ├── extract_by_shp.py ├── ortho.py ├── pansharpen.py ├── preprocess_main_GF126.py ├── preprocess_main_GF2.py └── unpackage.py ├── GF3 ├── main.py └── readme.md └── README.md /GF126/build_pyramid.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Mon Jun 20 15:19:22 2022 4 | 5 | @author: DELL 6 | """ 7 | 8 | from osgeo import gdal 9 | 10 | def build_pyramid(file_name): 11 | dataset = gdal.Open(file_name) 12 | dataset.BuildOverviews(overviewlist=[2, 4 ,8, 16]) 13 | del dataset 14 | -------------------------------------------------------------------------------- /GF126/extract_by_shp.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Tue Jun 21 15:58:47 2022 4 | 5 | @author: DELL 6 | """ 7 | import fiona 8 | import rasterio 9 | from rasterio.mask import mask 10 | from build_pyramid import build_pyramid 11 | import os 12 | 13 | def extract_by_shp(raster_path, shp_path, out_raster_path): 14 | 15 | print("提取...") 16 | with fiona.open(shp_path, "r", encoding='utf-8') as shapefile: 17 | # 获取所有要素feature的形状geometry 18 | geoms = [feature["geometry"] for feature in shapefile] 19 | 20 | # print(geoms) 21 | # 裁剪 22 | with rasterio.open(raster_path) as src: 23 | out_image, out_transform = mask(src, geoms, crop=True) 24 | out_meta = src.meta.copy() 25 | 26 | # 更新输出文件的元数据 27 | out_meta.update({"driver": "GTiff", 28 | "height": out_image.shape[1], 29 | "width": out_image.shape[2], 30 | "transform": out_transform}) 31 | 32 | # 保存 33 | with rasterio.open(out_raster_path, "w", **out_meta) as dest: 34 | dest.write(out_image) 35 | 36 | print("创建金字塔") 37 | # 创建金字塔 38 | build_pyramid(out_raster_path) 39 | 40 | -------------------------------------------------------------------------------- /GF126/ortho.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Sun Jun 19 15:30:42 2022 4 | 5 | @author: DELL 6 | ref: https://blog.csdn.net/weixin_43762038/article/details/123500331 7 | https://zhuanlan.zhihu.com/p/430397978 8 | https://www.pythonf.cn/read/61251 9 | 10 | """ 11 | 12 | import os 13 | from osgeo import gdal, osr 14 | 15 | 16 | # 正射校正 17 | def ortho(file_name, dem_name, res, out_file_name): 18 | 19 | dataset = gdal.Open(file_name, gdal.GA_ReadOnly) 20 | 21 | # 是否北半球 22 | is_north = 1 if os.path.basename(file_name).split('_')[3][0] == 'N' else 0 23 | # 计算UTM区号 24 | zone = str(int(float(os.path.basename(file_name).split('_')[2][1:])/6) + 31) 25 | zone = int('326' + zone) if is_north else int('327' + zone) 26 | 27 | dstSRS = osr.SpatialReference() 28 | dstSRS.ImportFromEPSG(zone) 29 | 30 | # dstSRS = 'EPSG:4326' 31 | 32 | tmp_ds = gdal.Warp(out_file_name, dataset, format = 'GTiff', 33 | xRes = res, yRes = res, dstSRS = dstSRS, 34 | rpc = True, resampleAlg=gdal.GRIORA_Bilinear, 35 | transformerOptions=["RPC_DEM="+dem_name]) 36 | dataset = tds = None 37 | 38 | -------------------------------------------------------------------------------- /GF126/pansharpen.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Sun Jun 19 20:17:46 2022 4 | 5 | @author: DELL 6 | ref:Lib/site-packages/osgeo/utils 7 | """ 8 | 9 | import os 10 | import os.path 11 | import sys 12 | from osgeo import gdal 13 | 14 | 15 | def DoesDriverHandleExtension(drv, ext): 16 | exts = drv.GetMetadataItem(gdal.DMD_EXTENSIONS) 17 | return exts is not None and exts.lower().find(ext.lower()) >= 0 18 | 19 | 20 | def GetExtension(filename): 21 | ext = os.path.splitext(filename)[1] 22 | if ext.startswith('.'): 23 | ext = ext[1:] 24 | return ext 25 | 26 | 27 | def GetOutputDriversFor(filename): 28 | drv_list = [] 29 | ext = GetExtension(filename) 30 | for i in range(gdal.GetDriverCount()): 31 | drv = gdal.GetDriver(i) 32 | if (drv.GetMetadataItem(gdal.DCAP_CREATE) is not None or 33 | drv.GetMetadataItem(gdal.DCAP_CREATECOPY) is not None) and \ 34 | drv.GetMetadataItem(gdal.DCAP_RASTER) is not None: 35 | if ext and DoesDriverHandleExtension(drv, ext): 36 | drv_list.append(drv.ShortName) 37 | else: 38 | prefix = drv.GetMetadataItem(gdal.DMD_CONNECTION_PREFIX) 39 | if prefix is not None and filename.lower().startswith(prefix.lower()): 40 | drv_list.append(drv.ShortName) 41 | 42 | # GMT is registered before netCDF for opening reasons, but we want 43 | # netCDF to be used by default for output. 44 | if ext.lower() == 'nc' and not drv_list and \ 45 | drv_list[0].upper() == 'GMT' and drv_list[1].upper() == 'NETCDF': 46 | drv_list = ['NETCDF', 'GMT'] 47 | 48 | return drv_list 49 | 50 | 51 | def GetOutputDriverFor(filename): 52 | drv_list = GetOutputDriversFor(filename) 53 | ext = GetExtension(filename) 54 | if not drv_list: 55 | if not ext: 56 | return 'GTiff' 57 | else: 58 | raise Exception("Cannot guess driver for %s" % filename) 59 | elif len(drv_list) > 1: 60 | print("Several drivers matching %s extension. Using %s" % (ext if ext else '', drv_list[0])) 61 | return drv_list[0] 62 | 63 | 64 | def Usage(): 65 | print('Usage: gdal_pansharpen [--help-general] pan_dataset {spectral_dataset[,band=num]}+ out_dataset') 66 | print(' [-of format] [-b band]* [-w weight]*') 67 | print(' [-r {nearest,bilinear,cubic,cubicspline,lanczos,average}]') 68 | print(' [-threads {ALL_CPUS|number}] [-bitdepth val] [-nodata val]') 69 | print(' [-spat_adjust {union,intersection,none,nonewithoutwarning}]') 70 | print(' [-verbose_vrt] [-co NAME=VALUE]* [-q]') 71 | print('') 72 | print('Create a dataset resulting from a pansharpening operation.') 73 | return -1 74 | 75 | 76 | def gdal_pansharpen(argv): 77 | 78 | argv = gdal.GeneralCmdLineProcessor(argv) 79 | if argv is None: 80 | return -1 81 | 82 | pan_name = None 83 | last_name = None 84 | spectral_ds = [] 85 | spectral_bands = [] 86 | out_name = None 87 | bands = [] 88 | weights = [] 89 | frmt = None 90 | creation_options = [] 91 | callback = gdal.TermProgress_nocb 92 | resampling = None 93 | spat_adjust = None 94 | verbose_vrt = False 95 | num_threads = None 96 | bitdepth = None 97 | nodata = None 98 | 99 | i = 1 100 | argc = len(argv) 101 | while i < argc: 102 | if (argv[i] == '-of' or argv[i] == '-f') and i < len(argv) - 1: 103 | frmt = argv[i + 1] 104 | i = i + 1 105 | elif argv[i] == '-r' and i < len(argv) - 1: 106 | resampling = argv[i + 1] 107 | i = i + 1 108 | elif argv[i] == '-spat_adjust' and i < len(argv) - 1: 109 | spat_adjust = argv[i + 1] 110 | i = i + 1 111 | elif argv[i] == '-b' and i < len(argv) - 1: 112 | bands.append(int(argv[i + 1])) 113 | i = i + 1 114 | elif argv[i] == '-w' and i < len(argv) - 1: 115 | weights.append(float(argv[i + 1])) 116 | i = i + 1 117 | elif argv[i] == '-co' and i < len(argv) - 1: 118 | creation_options.append(argv[i + 1]) 119 | i = i + 1 120 | elif argv[i] == '-threads' and i < len(argv) - 1: 121 | num_threads = argv[i + 1] 122 | i = i + 1 123 | elif argv[i] == '-bitdepth' and i < len(argv) - 1: 124 | bitdepth = argv[i + 1] 125 | i = i + 1 126 | elif argv[i] == '-nodata' and i < len(argv) - 1: 127 | nodata = argv[i + 1] 128 | i = i + 1 129 | elif argv[i] == '-q': 130 | callback = None 131 | elif argv[i] == '-verbose_vrt': 132 | verbose_vrt = True 133 | elif argv[i][0] == '-': 134 | sys.stderr.write('Unrecognized option : %s\n' % argv[i]) 135 | return Usage() 136 | elif pan_name is None: 137 | pan_name = argv[i] 138 | pan_ds = gdal.Open(pan_name) 139 | if pan_ds is None: 140 | return 1 141 | else: 142 | # print(last_name) 143 | if last_name is not None: 144 | pos = last_name.find(',band=') 145 | # print(last_name) 146 | if pos > 0: 147 | spectral_name = last_name[0:pos] 148 | ds = gdal.Open(spectral_name) 149 | if ds is None: 150 | return 1 151 | band_num = int(last_name[pos + len(',band='):]) 152 | band = ds.GetRasterBand(band_num) 153 | spectral_ds.append(ds) 154 | spectral_bands.append(band) 155 | else: 156 | spectral_name = last_name 157 | ds = gdal.Open(spectral_name) 158 | if ds is None: 159 | return 1 160 | for j in range(ds.RasterCount): 161 | spectral_ds.append(ds) 162 | spectral_bands.append(ds.GetRasterBand(j + 1)) 163 | 164 | last_name = argv[i] 165 | # print(last_name) 166 | 167 | i = i + 1 168 | 169 | # print(spectral_name) 170 | if pan_name is None or not spectral_bands: 171 | return Usage() 172 | out_name = last_name 173 | # print(out_name) 174 | if frmt is None: 175 | frmt = GetOutputDriverFor(out_name) 176 | 177 | if not bands: 178 | bands = [j + 1 for j in range(len(spectral_bands))] 179 | else: 180 | for band in bands: 181 | if band < 0 or band > len(spectral_bands): 182 | print('Invalid band number in -b: %d' % band) 183 | return 1 184 | 185 | if weights and len(weights) != len(spectral_bands): 186 | print('There must be as many -w values specified as input spectral bands') 187 | return 1 188 | 189 | vrt_xml = """\n""" 190 | if bands != [j + 1 for j in range(len(spectral_bands))]: 191 | for i, band in enumerate(bands): 192 | sband = spectral_bands[band - 1] 193 | datatype = gdal.GetDataTypeName(sband.DataType) 194 | colorname = gdal.GetColorInterpretationName(sband.GetColorInterpretation()) 195 | vrt_xml += """ 196 | %s 197 | \n""" % (datatype, i + 1, colorname) 198 | 199 | vrt_xml += """ \n""" 200 | 201 | if weights: 202 | vrt_xml += """ \n""" 203 | vrt_xml += """ """ 204 | for i, weight in enumerate(weights): 205 | if i > 0: 206 | vrt_xml += "," 207 | vrt_xml += "%.16g" % weight 208 | vrt_xml += "\n" 209 | vrt_xml += """ \n""" 210 | 211 | if resampling is not None: 212 | vrt_xml += ' %s\n' % resampling 213 | 214 | if num_threads is not None: 215 | vrt_xml += ' %s\n' % num_threads 216 | 217 | if bitdepth is not None: 218 | vrt_xml += ' %s\n' % bitdepth 219 | 220 | if nodata is not None: 221 | vrt_xml += ' %s\n' % nodata 222 | 223 | if spat_adjust is not None: 224 | vrt_xml += ' %s\n' % spat_adjust 225 | 226 | pan_relative = '0' 227 | if frmt.upper() == 'VRT': 228 | if not os.path.isabs(pan_name): 229 | pan_relative = '1' 230 | pan_name = os.path.relpath(pan_name, os.path.dirname(out_name)) 231 | 232 | vrt_xml += """ 233 | %s 234 | 1 235 | \n""" % (pan_relative, pan_name) 236 | 237 | for i, sband in enumerate(spectral_bands): 238 | dstband = '' 239 | for j, band in enumerate(bands): 240 | if i + 1 == band: 241 | dstband = ' dstBand="%d"' % (j + 1) 242 | break 243 | 244 | ms_relative = '0' 245 | ms_name = spectral_ds[i].GetDescription() 246 | if frmt.upper() == 'VRT': 247 | if not os.path.isabs(ms_name): 248 | ms_relative = '1' 249 | ms_name = os.path.relpath(ms_name, os.path.dirname(out_name)) 250 | 251 | vrt_xml += """ 252 | %s 253 | %d 254 | \n""" % (dstband, ms_relative, ms_name, sband.GetBand()) 255 | 256 | vrt_xml += """ \n""" 257 | vrt_xml += """\n""" 258 | 259 | if frmt.upper() == 'VRT': 260 | f = gdal.VSIFOpenL(out_name, 'wb') 261 | if f is None: 262 | print('Cannot create %s' % out_name) 263 | return 1 264 | gdal.VSIFWriteL(vrt_xml, 1, len(vrt_xml), f) 265 | gdal.VSIFCloseL(f) 266 | if verbose_vrt: 267 | vrt_ds = gdal.Open(out_name, gdal.GA_Update) 268 | vrt_ds.SetMetadata(vrt_ds.GetMetadata()) 269 | else: 270 | vrt_ds = gdal.Open(out_name) 271 | if vrt_ds is None: 272 | return 1 273 | 274 | return 0 275 | 276 | vrt_ds = gdal.Open(vrt_xml) 277 | out_ds = gdal.GetDriverByName(frmt).CreateCopy(out_name, vrt_ds, 0, creation_options, callback=callback) 278 | if out_ds is None: 279 | return 1 280 | return 0 281 | 282 | 283 | 284 | # if __name__ == '__main__': 285 | # pan_path = r"E:\WangZhenQing\FGMS-Dataset\GF2_原始数据集\大宁县\GF2_PMS1_E110.6_N36.3_20200727_L1A0004953367\GF2_PMS1_E110.6_N36.3_20200727_L1A0004953367-PAN1_ortho_dem.tiff" 286 | # mss_path = r"E:\WangZhenQing\FGMS-Dataset\GF2_原始数据集\大宁县\GF2_PMS1_E110.6_N36.3_20200727_L1A0004953367\GF2_PMS1_E110.6_N36.3_20200727_L1A0004953367-MSS1_ortho_dem.tiff" 287 | # pansharpen_path = pan_path.replace("PAN1_ortho_dem.tiff", "pansharpen.tiff") 288 | # gdal_pansharpen(["pass",pan_path,mss_path,pansharpen_path]) 289 | -------------------------------------------------------------------------------- /GF126/preprocess_main_GF126.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Mon Jun 20 09:42:06 2022 4 | 5 | @author: DELL 6 | """ 7 | 8 | 9 | import os 10 | import time 11 | import glob 12 | from osgeo import gdal 13 | from ortho import ortho 14 | from unpackage import unpackage 15 | from pansharpen import gdal_pansharpen 16 | from build_pyramid import build_pyramid 17 | import warnings 18 | warnings.filterwarnings('ignore') 19 | 20 | gdal.UseExceptions() 21 | 22 | def preprocess(dem_path, tar_path): 23 | 24 | print("开始解压...") 25 | tar_unpackage_dir = unpackage(tar_path) 26 | 27 | print("开始正射校正与融合...") 28 | # try: 29 | # 全色数据正射校正 30 | pan_path = glob.glob(tar_unpackage_dir+"/*PAN*.tiff")[0] 31 | pan_ortho_path = pan_path.replace(".tiff", "_ortho.tiff") 32 | 33 | if "GF1" in pan_path: 34 | pan_res = 2 35 | elif "GF2" in pan_path: 36 | pan_res = 1 37 | elif "GF6" in pan_path: 38 | pan_res = 2 39 | else: 40 | print("error!") 41 | 42 | print(os.path.basename(pan_path),"正射校正...") 43 | ortho(pan_path, dem_path, pan_res, pan_ortho_path) 44 | 45 | # 多光谱数据正射校正 46 | try: 47 | mss_path = glob.glob(tar_unpackage_dir+"/*MSS*.tiff")[0] 48 | except: 49 | mss_path = glob.glob(tar_unpackage_dir+"/*MUX*.tiff")[0] 50 | mss_ortho_path = mss_path.replace(".tiff", "_ortho.tiff") 51 | mss_res = pan_res * 4 52 | print(os.path.basename(mss_path),"正射校正...") 53 | ortho(mss_path, dem_path, mss_res, mss_ortho_path) 54 | 55 | # 融合 56 | print("融合...") 57 | pansharpen_path = pan_ortho_path.split("PAN")[0]+"pansharpen.tiff" 58 | gdal_pansharpen(["pass",pan_ortho_path,mss_ortho_path,pansharpen_path]) 59 | 60 | tar_unpackage_dir_paths = glob.glob(tar_unpackage_dir+"/*") 61 | for tar_unpackage_dir_path in tar_unpackage_dir_paths: 62 | if tar_unpackage_dir_path==pansharpen_path: 63 | pass 64 | else: 65 | os.remove(tar_unpackage_dir_path) 66 | 67 | print("创建金字塔...") 68 | build_pyramid(pansharpen_path) 69 | 70 | print("该景图像处理完成, 并删除压缩包.") 71 | # except: 72 | # print(f"{tar_unpackage_dir} is error!") 73 | 74 | if __name__ == '__main__': 75 | 76 | print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))) 77 | # 采用envi自带的dem 78 | dem_path = r"C:\setup\Exelis\ENVI53\data\GMTED2010.jp2" 79 | # 一堆压缩包所在主文件夹 80 | tar_dir = "H:/HYF" 81 | tar_paths = [] 82 | # 假设最多5级文件夹嵌套 83 | for i in range(5): 84 | nest_dir = "/*" * i 85 | tar_paths += glob.glob(f"{tar_dir}{nest_dir}/*.tar.gz") 86 | 87 | 88 | print(f"文件共有{len(tar_paths)}个.") 89 | for tar_index, tar_path in enumerate(tar_paths): 90 | print(f"{tar_index+1}/{len(tar_paths)}") 91 | preprocess(dem_path, tar_path) 92 | # 删除压缩包 93 | os.remove(tar_path) 94 | print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))) 95 | -------------------------------------------------------------------------------- /GF126/preprocess_main_GF2.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Mon Jun 20 09:42:06 2022 4 | 5 | @author: DELL 6 | """ 7 | 8 | import glob 9 | import os 10 | from unpackage import unpackage 11 | from ortho import ortho 12 | from pansharpen import gdal_pansharpen 13 | from osgeo import gdal 14 | from build_pyramid import build_pyramid 15 | import warnings 16 | warnings.filterwarnings('ignore') 17 | 18 | gdal.UseExceptions() 19 | 20 | def preprocess(dem_path, tar_dir): 21 | 22 | tar_paths = glob.glob(tar_dir+"/*.tar.gz") 23 | tar_unpackage_dirs = [] 24 | print("开始解压...") 25 | for tar_index, tar_path in enumerate(tar_paths): 26 | print(f"{tar_index+1}/{len(tar_paths)}") 27 | print(os.path.basename(tar_path)) 28 | tar_unpackage_dir = unpackage(tar_path) 29 | tar_unpackage_dirs.append(tar_unpackage_dir) 30 | 31 | print("开始正射校正与融合...") 32 | for tar_unpackage_index, tar_unpackage_dir in enumerate(tar_unpackage_dirs): 33 | print(f"{tar_unpackage_index+1}/{len(tar_unpackage_dirs)}") 34 | 35 | # 全色数据正射校正 36 | pan_path = glob.glob(tar_unpackage_dir+"/*PAN*.tiff")[0] 37 | pan_ortho_path = pan_path.replace(".tiff", "_ortho.tiff") 38 | pan_res = 0.8 39 | print(os.path.basename(pan_path),"正射校正...") 40 | ortho(pan_path, dem_path, pan_res, pan_ortho_path) 41 | 42 | # 多光谱数据正射校正 43 | mss_path = glob.glob(tar_unpackage_dir+"/*MSS*.tiff")[0] 44 | mss_ortho_path = mss_path.replace(".tiff", "_ortho.tiff") 45 | mss_res = 3.2 46 | print(os.path.basename(mss_path),"正射校正...") 47 | ortho(mss_path, dem_path, mss_res, mss_ortho_path) 48 | 49 | # 融合 50 | print("融合...") 51 | pansharpen_path = pan_ortho_path.split("PAN")[0]+"pansharpen.tiff" 52 | gdal_pansharpen(["pass",pan_ortho_path,mss_ortho_path,pansharpen_path]) 53 | print("创建金字塔...") 54 | build_pyramid(pansharpen_path) 55 | 56 | if __name__ == '__main__': 57 | 58 | # 采用envi自带的dem 59 | dem_path = r"C:\setup\Exelis\ENVI53\data\GMTED2010.jp2" 60 | # 一堆压缩包所在文件夹 61 | tar_dir = r"E:\WangZhenQing\GF2" 62 | preprocess(dem_path, tar_dir) 63 | -------------------------------------------------------------------------------- /GF126/unpackage.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Mon Jun 20 09:38:38 2022 4 | 5 | @author: DELL 6 | """ 7 | import tarfile 8 | 9 | # 解压 10 | def unpackage(file_name): 11 | 12 | # 提取解压文件夹名 13 | if ".tar.gz" in file_name: 14 | out_dir = file_name.split(".tar.gz")[0] 15 | else: 16 | out_dir = file_name.split(".")[0] 17 | # 进行解压 18 | with tarfile.open(file_name) as file: 19 | file.extractall(path = out_dir) 20 | return out_dir 21 | -------------------------------------------------------------------------------- /GF3/main.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Mon May 15 15:59:13 2023 4 | 5 | @author: DELL 6 | """ 7 | 8 | import os 9 | import re 10 | import glob 11 | import warnings 12 | import numpy as np 13 | import xml.etree.ElementTree as ET 14 | 15 | from osgeo import gdal, osr 16 | 17 | warnings.filterwarnings ('ignore') 18 | 19 | # 获取影像元信息xml文件名、1级影像tiff文件名 20 | def get_meta_image_path(data_dir): 21 | meta_path = glob.glob(data_dir+"/*.meta.xml")[0] 22 | 23 | HH_paths = glob.glob(data_dir+"/*_HH_*.tiff") 24 | if len(HH_paths) == 0: 25 | HH_path = None 26 | else: 27 | HH_path = HH_paths[0] 28 | HV_paths = glob.glob(data_dir+"/*_HV_*.tiff") 29 | if len(HV_paths) == 0: 30 | HV_path = None 31 | else: 32 | HV_path = HV_paths[0] 33 | VH_paths = glob.glob(data_dir+"/*_VH_*.tiff") 34 | if len(VH_paths) == 0: 35 | VH_path = None 36 | else: 37 | VH_path = VH_paths[0] 38 | VV_paths = glob.glob(data_dir+"/*_VV_*.tiff") 39 | if len(VV_paths) == 0: 40 | VV_path = None 41 | else: 42 | VV_path = VV_paths[0] 43 | 44 | image_paths = [HH_path, HV_path, VH_path, VV_path] 45 | return meta_path, image_paths 46 | 47 | # 获取影像rpc文件名(有理多项式系数,rational polynomail cofficient) 48 | def get_rpc_path(data_dir): 49 | # rpc文件后缀可能为rpb或者rpc 50 | HH_rpcs = glob.glob(data_dir+"/*_HH_*.rpb") + glob.glob(data_dir+"/*_HH_*.rpc") 51 | if len(HH_rpcs) == 0: 52 | HH_rpc = None 53 | else: 54 | HH_rpc = HH_rpcs[0] 55 | HV_rpcs = glob.glob(data_dir+"/*_HV_*.rpb") + glob.glob(data_dir+"/*_HV_*.rpc") 56 | if len(HV_rpcs) == 0: 57 | HV_rpc = None 58 | else: 59 | HV_rpc = HV_rpcs[0] 60 | VH_rpcs = glob.glob(data_dir+"/*_VH_*.rpb") + glob.glob(data_dir+"/*_VH_*.rpc") 61 | if len(VH_rpcs) == 0: 62 | VH_rpc = None 63 | else: 64 | VH_rpc = VH_rpcs[0] 65 | VV_rpcs = glob.glob(data_dir+"/*_VV_*.rpb") + glob.glob(data_dir+"/*_VV_*.rpc") 66 | if len(VV_rpcs) == 0: 67 | VV_rpc = None 68 | else: 69 | VV_rpc = VV_rpcs[0] 70 | 71 | rpc_paths = [HH_rpc, HV_rpc, VH_rpc, VV_rpc] 72 | return rpc_paths 73 | 74 | # 从元数据中获取归一化峰值qualifyValue与定标常数Calibration 75 | def get_qualifyValue_calibrations(meta_path): 76 | tree = ET.parse(meta_path) 77 | 78 | HH_qualifyValue = tree.findall("imageinfo")[0].find("QualifyValue").find("HH").text 79 | HV_qualifyValue = tree.findall("imageinfo")[0].find("QualifyValue").find("HV").text 80 | VH_qualifyValue = tree.findall("imageinfo")[0].find("QualifyValue").find("VH").text 81 | VV_qualifyValue = tree.findall("imageinfo")[0].find("QualifyValue").find("VV").text 82 | qualifyValues = [HH_qualifyValue, HV_qualifyValue, VH_qualifyValue, VV_qualifyValue] 83 | 84 | HH_calibrationConst = tree.findall("processinfo")[0].find("CalibrationConst").find("HH").text 85 | HV_calibrationConst = tree.findall("processinfo")[0].find("CalibrationConst").find("HV").text 86 | VH_calibrationConst = tree.findall("processinfo")[0].find("CalibrationConst").find("VH").text 87 | VV_calibrationConst = tree.findall("processinfo")[0].find("CalibrationConst").find("VV").text 88 | calibrationConsts = [HH_calibrationConst, HV_calibrationConst, VH_calibrationConst, VV_calibrationConst] 89 | 90 | return qualifyValues, calibrationConsts 91 | 92 | # 从元数据中获取标称分辨率信息/m 93 | def get_resolution(meta_path): 94 | tree = ET.parse(meta_path) 95 | 96 | resolution = float(tree.findall("productinfo")[0].find("NominalResolution").text) 97 | 98 | return resolution 99 | 100 | # 图像写入 101 | def imwrite(data, save_path, geotrans=(0,0,0,0,0,0), proj=""): 102 | 103 | if 'int8' in data.dtype.name: 104 | datatype = gdal.GDT_Byte 105 | elif 'int16' in data.dtype.name: 106 | datatype = gdal.GDT_UInt16 107 | else: 108 | datatype = gdal.GDT_Float32 109 | if len(data.shape) == 3: 110 | im_bands, im_height, im_width = data.shape 111 | elif len(data.shape) == 2: 112 | data = np.array([data]) 113 | im_bands, im_height, im_width = data.shape 114 | 115 | # 创建文件 116 | driver = gdal.GetDriverByName("GTiff") 117 | dataset = driver.Create(save_path, int(im_width), int(im_height), int(im_bands), datatype) 118 | if (dataset != None): 119 | dataset.SetGeoTransform(geotrans) # 写入仿射变换参数 120 | dataset.SetProjection(proj) # 写入投影 121 | for i in range(im_bands): 122 | dataset.GetRasterBand(i + 1).WriteArray(data[i]) 123 | del dataset 124 | 125 | # 读取rpc文件 126 | def read_rpb(rpb_path): 127 | with open(rpb_path, 'r') as f: 128 | buff = f.read() 129 | 130 | # Name the url of the reference:http://geotiff.maptools.org/rpc_prop.html 131 | ERR_BIAS1 = 'errBias' # Error - deviation. RMS error for all points in the image (in m/horizontal axis) (-1.0, if unknown) 132 | ERR_BIAS2 = ';' 133 | 134 | ERR_RAND1 = 'errRand' # Error - random. RMS random error in meters for each horizontal axis of each point in the image (-1.0 if unknown) 135 | ERR_RAND2 = ';' 136 | 137 | LINE_OFF1 = 'lineOffset' 138 | LINE_OFF2 = ';' 139 | 140 | SAMP_OFF1 = 'sampOffset' 141 | SAMP_OFF2 = ';' 142 | 143 | LAT_OFF1 = 'latOffset' 144 | LAT_OFF2 = ';' 145 | 146 | LONG_OFF1 = 'longOffset' 147 | LONG_OFF2 = ';' 148 | 149 | HEIGHT_OFF1 = 'heightOffset' 150 | HEIGHT_OFF2 = ';' 151 | 152 | LINE_SCALE1 = 'lineScale' 153 | LINE_SCALE2 = ';' 154 | 155 | SAMP_SCALE1 = 'sampScale' 156 | SAMP_SCALE2 = ';' 157 | 158 | LAT_SCALE1 = 'latScale' 159 | LAT_SCALE2 = ';' 160 | 161 | LONG_SCALE1 = 'longScale' 162 | LONG_SCALE2 = ';' 163 | 164 | HEIGHT_SCALE1 = 'heightScale' 165 | HEIGHT_SCALE2 = ';' 166 | 167 | LINE_NUM_COEFF1 = 'lineNumCoef' 168 | LINE_NUM_COEFF2 = ';' 169 | 170 | LINE_DEN_COEFF1 = 'lineDenCoef' 171 | LINE_DEN_COEFF2 = ';' 172 | 173 | SAMP_NUM_COEFF1 = 'sampNumCoef' 174 | SAMP_NUM_COEFF2 = ';' 175 | 176 | SAMP_DEN_COEFF1 = 'sampDenCoef' 177 | SAMP_DEN_COEFF2 = ';' 178 | 179 | # Regularized extraction values 180 | pat_ERR_BIAS = re.compile(ERR_BIAS1 + '(.*?)' + ERR_BIAS2, re.S) 181 | result_ERR_BIAS = pat_ERR_BIAS.findall(buff) 182 | ERR_BIAS = result_ERR_BIAS[0] 183 | ERR_BIAS = ERR_BIAS.replace(" ", "") 184 | 185 | pat_ERR_RAND = re.compile(ERR_RAND1 + '(.*?)' + ERR_RAND2, re.S) 186 | result_ERR_RAND = pat_ERR_RAND.findall(buff) 187 | ERR_RAND = result_ERR_RAND[0] 188 | ERR_RAND = ERR_RAND.replace(" ", "") 189 | 190 | pat_LINE_OFF = re.compile(LINE_OFF1 + '(.*?)' + LINE_OFF2, re.S) 191 | result_LINE_OFF = pat_LINE_OFF.findall(buff) 192 | LINE_OFF = result_LINE_OFF[0] 193 | LINE_OFF = LINE_OFF.replace(" ", "") 194 | 195 | pat_SAMP_OFF = re.compile(SAMP_OFF1 + '(.*?)' + SAMP_OFF2, re.S) 196 | result_SAMP_OFF = pat_SAMP_OFF.findall(buff) 197 | SAMP_OFF = result_SAMP_OFF[0] 198 | SAMP_OFF = SAMP_OFF.replace(" ", "") 199 | 200 | pat_LAT_OFF = re.compile(LAT_OFF1 + '(.*?)' + LAT_OFF2, re.S) 201 | result_LAT_OFF = pat_LAT_OFF.findall(buff) 202 | LAT_OFF = result_LAT_OFF[0] 203 | LAT_OFF = LAT_OFF.replace(" ", "") 204 | 205 | pat_LONG_OFF = re.compile(LONG_OFF1 + '(.*?)' + LONG_OFF2, re.S) 206 | result_LONG_OFF = pat_LONG_OFF.findall(buff) 207 | LONG_OFF = result_LONG_OFF[0] 208 | LONG_OFF = LONG_OFF.replace(" ", "") 209 | 210 | pat_HEIGHT_OFF = re.compile(HEIGHT_OFF1 + '(.*?)' + HEIGHT_OFF2, re.S) 211 | result_HEIGHT_OFF = pat_HEIGHT_OFF.findall(buff) 212 | HEIGHT_OFF = result_HEIGHT_OFF[0] 213 | HEIGHT_OFF = HEIGHT_OFF.replace(" ", "") 214 | 215 | pat_LINE_SCALE = re.compile(LINE_SCALE1 + '(.*?)' + LINE_SCALE2, re.S) 216 | result_LINE_SCALE = pat_LINE_SCALE.findall(buff) 217 | LINE_SCALE = result_LINE_SCALE[0] 218 | LINE_SCALE = LINE_SCALE.replace(" ", "") 219 | 220 | pat_SAMP_SCALE = re.compile(SAMP_SCALE1 + '(.*?)' + SAMP_SCALE2, re.S) 221 | result_SAMP_SCALE = pat_SAMP_SCALE.findall(buff) 222 | SAMP_SCALE = result_SAMP_SCALE[0] 223 | SAMP_SCALE = SAMP_SCALE.replace(" ", "") 224 | 225 | pat_LAT_SCALE = re.compile(LAT_SCALE1 + '(.*?)' + LAT_SCALE2, re.S) 226 | result_LAT_SCALE = pat_LAT_SCALE.findall(buff) 227 | LAT_SCALE = result_LAT_SCALE[0] 228 | LAT_SCALE = LAT_SCALE.replace(" ", "") 229 | 230 | pat_LONG_SCALE = re.compile(LONG_SCALE1 + '(.*?)' + LONG_SCALE2, re.S) 231 | result_LONG_SCALE = pat_LONG_SCALE.findall(buff) 232 | LONG_SCALE = result_LONG_SCALE[0] 233 | LONG_SCALE = LONG_SCALE.replace(" ", "") 234 | 235 | pat_HEIGHT_SCALE = re.compile(HEIGHT_SCALE1 + '(.*?)' + HEIGHT_SCALE2, re.S) 236 | result_HEIGHT_SCALE = pat_HEIGHT_SCALE.findall(buff) 237 | HEIGHT_SCALE = result_HEIGHT_SCALE[0] 238 | HEIGHT_SCALE = HEIGHT_SCALE.replace(" ", "") 239 | 240 | pat_LINE_NUM_COEFF = re.compile(LINE_NUM_COEFF1 + '(.*?)' + LINE_NUM_COEFF2, re.S) 241 | result_LINE_NUM_COEFF = pat_LINE_NUM_COEFF.findall(buff) 242 | LINE_NUM_COEFF = result_LINE_NUM_COEFF[0] 243 | LINE_NUM_COEFF3 = LINE_NUM_COEFF 244 | # LINE_NUM_COEFF3 = LINE_NUM_COEFF3.strip('()') 245 | # LINE_NUM_COEFF3 = LINE_NUM_COEFF3.strip('()') 246 | LINE_NUM_COEFF3 = LINE_NUM_COEFF3.replace(" ", "") 247 | LINE_NUM_COEFF3 = LINE_NUM_COEFF3.replace('(', '') 248 | LINE_NUM_COEFF3 = LINE_NUM_COEFF3.replace(')', '') 249 | LINE_NUM_COEFF3 = LINE_NUM_COEFF3.replace('\n', '') 250 | LINE_NUM_COEFF3 = LINE_NUM_COEFF3.replace('\t', '') 251 | LINE_NUM_COEFF3 = LINE_NUM_COEFF3.replace(',', ' ') 252 | 253 | pat_LINE_DEN_COEFF = re.compile(LINE_DEN_COEFF1 + '(.*?)' + LINE_DEN_COEFF2, re.S) 254 | result_LINE_DEN_COEFF = pat_LINE_DEN_COEFF.findall(buff) 255 | LINE_DEN_COEFF = result_LINE_DEN_COEFF[0] 256 | LINE_DEN_COEFF3 = LINE_DEN_COEFF 257 | LINE_DEN_COEFF3 = LINE_DEN_COEFF3.replace(" ", "") 258 | LINE_DEN_COEFF3 = LINE_DEN_COEFF3.replace('(', '') 259 | LINE_DEN_COEFF3 = LINE_DEN_COEFF3.replace(')', '') 260 | LINE_DEN_COEFF3 = LINE_DEN_COEFF3.replace('\n', '') 261 | LINE_DEN_COEFF3 = LINE_DEN_COEFF3.replace('\t', '') 262 | LINE_DEN_COEFF3 = LINE_DEN_COEFF3.replace(',', ' ') 263 | 264 | pat_SAMP_NUM_COEFF = re.compile(SAMP_NUM_COEFF1 + '(.*?)' + SAMP_NUM_COEFF2, re.S) 265 | result_SAMP_NUM_COEFF = pat_SAMP_NUM_COEFF.findall(buff) 266 | SAMP_NUM_COEFF = result_SAMP_NUM_COEFF[0] 267 | SAMP_NUM_COEFF3 = SAMP_NUM_COEFF 268 | SAMP_NUM_COEFF3 = SAMP_NUM_COEFF3.replace(" ", "") 269 | 270 | SAMP_NUM_COEFF3 = SAMP_NUM_COEFF3.replace('(', '') 271 | SAMP_NUM_COEFF3 = SAMP_NUM_COEFF3.replace(')', '') 272 | SAMP_NUM_COEFF3 = SAMP_NUM_COEFF3.replace('\n', '') 273 | SAMP_NUM_COEFF3 = SAMP_NUM_COEFF3.replace('\t', '') 274 | SAMP_NUM_COEFF3 = SAMP_NUM_COEFF3.replace(',', ' ') 275 | 276 | pat_SAMP_DEN_COEFF = re.compile(SAMP_DEN_COEFF1 + '(.*?)' + SAMP_DEN_COEFF2, re.S) 277 | result_SAMP_DEN_COEFF = pat_SAMP_DEN_COEFF.findall(buff) 278 | SAMP_DEN_COEFF = result_SAMP_DEN_COEFF[0] 279 | SAMP_DEN_COEFF3 = SAMP_DEN_COEFF 280 | SAMP_DEN_COEFF3 = SAMP_DEN_COEFF3.replace(" ", "") 281 | 282 | SAMP_DEN_COEFF3 = SAMP_DEN_COEFF3.replace('(', '') 283 | SAMP_DEN_COEFF3 = SAMP_DEN_COEFF3.replace(')', '') 284 | SAMP_DEN_COEFF3 = SAMP_DEN_COEFF3.replace('\n', '') 285 | SAMP_DEN_COEFF3 = SAMP_DEN_COEFF3.replace('\t', '') 286 | SAMP_DEN_COEFF3 = SAMP_DEN_COEFF3.replace(',', ' ') 287 | 288 | rpc = ['ERR_BIAS'+ERR_BIAS, 'ERR_RAND'+ERR_RAND, 'LINE_OFF'+LINE_OFF, 289 | 'SAMP_OFF'+SAMP_OFF, 'LAT_OFF'+LAT_OFF, 'LONG_OFF'+LONG_OFF, 290 | 'HEIGHT_OFF'+HEIGHT_OFF, 'LINE_SCALE'+LINE_SCALE, 'SAMP_SCALE'+SAMP_SCALE, 291 | 'LAT_SCALE'+LAT_SCALE, 'LONG_SCALE'+LONG_SCALE, 'HEIGHT_SCALE'+HEIGHT_SCALE, 292 | 'LINE_NUM_COEFF'+LINE_NUM_COEFF3,'LINE_DEN_COEFF'+LINE_DEN_COEFF3, 293 | 'SAMP_NUM_COEFF'+SAMP_NUM_COEFF3,'SAMP_DEN_COEFF'+SAMP_DEN_COEFF3] 294 | #rpc = ['ERR BIAS=' + ERR_BIAS, 'ERR RAND=' + ERR_RAND, 'LINE OFF=' + LINE_OFF, 'SAMP OFF=' + SAMP_OFF,'LAT OFF=' + LAT_OFF, 'LONG OFF=' + LONG_OFF, 'HEIGHT OFF=' + HEIGHT_OFF, 'LINE SCALE=' + LINE_SCALE,'SAMP SCALE=' + SAMP_SCALE, 'LAT SCALE=' + LAT_SCALE, 'LONG SCALE=' + LONG_SCALE,'HEIGHT SCALE=' + HEIGHT_SCALE, 'LINE NUM COEFF=' + LINE_NUM_COEFF3, 'LINE DEN COEFF=' + LINE_DEN_COEFF3,'SAMP NUM COEFF=' + SAMP_NUM_COEFF3, 'SAMP DEN COEFF=' + SAMP_DEN_COEFF3] 295 | return rpc 296 | 297 | # 使用 RPC 的几何校正 298 | def geometric_correction(image_L1B_path, rpc_path, resolution, dem_path, output_dir): 299 | image_L2_name = os.path.basename(image_L1B_path).replace('L1B', 'L2') 300 | image_L2_path = os.path.join(output_dir, image_L2_name) 301 | 302 | # 读取rpc并嵌入dataset 303 | rpc = read_rpb(rpc_path) 304 | dataset = gdal.Open(image_L1B_path) 305 | dataset.SetMetadata(rpc, 'RPC') 306 | 307 | # 是否北半球 308 | is_north = 1 if os.path.basename(image_L2_path).split('_')[5][0] == 'N' else 0 309 | # 计算UTM区号 310 | zone = str(int(float(os.path.basename(image_L2_path).split('_')[4][1:])/6) + 31) 311 | zone = int('326' + zone) if is_north else int('327' + zone) 312 | dstSRS = osr.SpatialReference() 313 | dstSRS.ImportFromEPSG(zone) 314 | 315 | gdal.Warp(image_L2_path, dataset, dstSRS=dstSRS, 316 | xRes=resolution, yRes=resolution, rpc=True, 317 | transformerOptions=["RPC_DEM="+dem_path]) 318 | del dataset 319 | 320 | # GF3号L1A级数据生产出L2级 321 | def GF3_L1A_2_L2(image_path, qualifyValue1A, calibrationConst, rpc_path, resolution, dem_path, output_dir): 322 | image_name = os.path.basename(image_path) 323 | 324 | # 获取L1A单视复数产品值 325 | image = gdal.Open(image_path) 326 | IQ = image.ReadAsArray() 327 | 328 | # 获取复数中的实部I与虚部Q 329 | I = np.array(IQ[0, :, :], dtype='float32') 330 | Q = np.array(IQ[1, :, :], dtype='float32') 331 | 332 | # 多视处理(由复数数据得到强度数据) 333 | # 计算强度P与振幅A 334 | P = (I ** 2 + Q ** 2) 335 | 336 | # 辐射定标 337 | backscatter = 10 * np.log10(P * (qualifyValue1A / 32767) ** 2) - calibrationConst 338 | 339 | # # 高分三号卫星的等效噪声系数为-25dB 340 | # # 后向散射系数最小值对应于SAR传感器的等效噪声系数 341 | # # 考虑到部分GF3元文件中定标系数为空不可用,计算得到的后向散射系数为相对值,所以等效噪声系数暂不使用 342 | # equivalent_noise = -25 343 | 344 | # log(0)=-inf ,将inf赋值为最小值 345 | backscatter[np.isinf(-backscatter)] = np.nan 346 | backscatter[np.isnan(-backscatter)] = np.nanmin(backscatter) 347 | 348 | image_L1B_name = image_name.replace('L1A', 'L1B') 349 | image_L1B_path = os.path.join(output_dir, image_L1B_name) 350 | imwrite(backscatter, image_L1B_path) 351 | 352 | # 几何校正 353 | geometric_correction(image_L1B_path, rpc_path, resolution, dem_path, output_dir) 354 | 355 | # 对不同极化方式数据进行L2级生成 356 | def GF3_L1A_2_L2_batch(meta_path, image_paths, rpc_paths, resolution, dem_path, output_dir): 357 | qualifyValues, calibrationConsts = get_qualifyValue_calibrations(meta_path) 358 | for image_path, rpc_path, qualifyValue1A, calibrationConst in zip(image_paths, rpc_paths, qualifyValues, calibrationConsts): 359 | if image_path != None: 360 | image_name = os.path.basename(image_path) 361 | print(f"正在处理{image_name}.") 362 | if qualifyValue1A == "NULL": 363 | qualifyValue1A = np.nan 364 | else: 365 | qualifyValue1A = float(qualifyValue1A) 366 | if calibrationConst == "NULL": 367 | calibrationConst = 0 368 | print("注意: 元文件中定标系数缺失,以0作为定标系数计算后向散射系数相对值.") 369 | else: 370 | calibrationConst = float(calibrationConst) 371 | GF3_L1A_2_L2(image_path, qualifyValue1A, calibrationConst, rpc_path, resolution, dem_path, output_dir) 372 | 373 | if __name__ == '__main__': 374 | 375 | dem_path = r"C:\setup\Exelis\ENVI53\data\GMTED2010.jp2" 376 | data_dir = r"E:\WangZhenQing\GF3_preprocess\GF3_KAS_FSII_030839_E113.5_N22.7_20220619_L1A_HHHV_L10006536542" 377 | output_dir = data_dir+'_output' 378 | 379 | if not os.path.exists(output_dir): 380 | os.mkdir(output_dir) 381 | 382 | meta_path, image_paths = get_meta_image_path(data_dir) 383 | resolution = get_resolution(meta_path) 384 | rpc_paths = get_rpc_path(data_dir) 385 | 386 | GF3_L1A_2_L2_batch(meta_path, image_paths, rpc_paths, resolution, dem_path, output_dir) -------------------------------------------------------------------------------- /GF3/readme.md: -------------------------------------------------------------------------------- 1 | 2 | ## python+gdal实现GF3预处理(多视处理+辐射定标+地理编码) 3 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 说明 2 | 3 | 对于高分1/2/6号数据,本文仅仅实现了正射校正、全色和多光谱的融合以及创建金字塔。如果需要的是高精度的定量反演,还需要进行辐射定标和大气校正。 4 | 5 | 数据来源为中国资源卫星应用中心陆地观测卫星数据服务平台。 6 | 7 | GF126/preprocess_main_GF2.py可实现GF2预处理 8 | 9 | GF126/preprocess_main_GF126.py可实现GF126预处理 10 | 11 | GF3/main.py可实现GF3预处理 12 | 13 | # 知乎文字链接 14 | 15 | [python+gdal高分二号影像批量预处理(正射校正+融合+金字塔创建)](https://zhuanlan.zhihu.com/p/531311366) 16 | 17 | --------------------------------------------------------------------------------