├── LICENSE ├── README.md ├── .gitignore ├── glas_analysis.py ├── filter_glas.py └── glas_proc.py /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 David Shean 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # icesat_tools 2 | Scripts for processing NASA ICESat and ICESat-2 satellite laser altimetry data 3 | 4 | ## ICESat GLAS 5 | 6 | GLAH14 products available from NSIDC: 7 | - [https://nsidc.org/data/GLAH14/versions/34](https://nsidc.org/data/GLAH14/versions/34) 8 | - [http://nsidc.org/data/docs/daac/glas_altimetry/data-dictionary-glah14.html](http://nsidc.org/data/docs/daac/glas_altimetry/data-dictionary-glah14.html) 9 | 10 | ### Initial download 11 | `lftp ftp://n5eil01u.ecs.nsidc.org/DP5/GLAS/` 12 | 13 | `mirror --parallel=16 GLAH14.034` 14 | 15 | ### Processing 16 | - `glas_proc.py` - clip to specified extent (e.g., CONUS), filter using internal flags/values, filter against reference DEM 17 | 18 | ### Filtering 19 | - `filter_glas.py` - extract points to match extent of existing raster, analyze statistics for elevation differences. Preparation for pc_align co-registration with ICESat points as reference. 20 | 21 | ### Analysis 22 | - `glas_analysis` - sample worflow for Africa 23 | 24 | ### To do 25 | - Update to geopandas 26 | - Add crossover analysis 27 | - Better interpolation 28 | - Remove hardcoded paths, better documentation 29 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | .pytest_cache/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | db.sqlite3 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # Environments 85 | .env 86 | .venv 87 | env/ 88 | venv/ 89 | ENV/ 90 | env.bak/ 91 | venv.bak/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | .spyproject 96 | 97 | # Rope project settings 98 | .ropeproject 99 | 100 | # mkdocs documentation 101 | /site 102 | 103 | # mypy 104 | .mypy_cache/ 105 | -------------------------------------------------------------------------------- /glas_analysis.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | #Sample analysis workflow 4 | #Clustering, gridding 5 | 6 | import numpy as np 7 | from pygeotools.lib import timelib, geolib, iolib 8 | from osgeo import osr 9 | 10 | glas_csv_fn='GLAH14_chad_refdemfilt.csv' 11 | srtm_fn='chad_nasadem_hgt_merge_hgt_adj_proj_hs_az315.tif' 12 | srtm_ds=iolib.fn_getds(srtm_fn) 13 | 14 | # dt_ordinal, dt_YYYYMMDD, lat, lon, z_WGS84, z_refdem_med_WGS84, z_refdem_nmad 15 | glas_pts = np.loadtxt(glas_csv_fn, delimiter=',', skiprows=1, dtype=None) 16 | 17 | srs=osr.SpatialReference() 18 | srs.ImportFromEPSG(32633) 19 | x, y, z = geolib.cT_helper(glas_pts[:,3], glas_pts[:,2], glas_pts[:,4], geolib.wgs_srs, srs) 20 | 21 | #pt_array = glas_pts[:,[3,2,4,0]] 22 | pt_array = np.array([x, y, z, glas_pts[:,0]]).T 23 | 24 | #Cluster 25 | dt_thresh=16.0 26 | d = np.diff(pt_array[:,3]) 27 | b = np.nonzero(d > dt_thresh)[0] + 1 28 | b = np.hstack((0, b, d.shape[0])) 29 | f_list = [] 30 | dt_list = [] 31 | for i in range(len(b)-1): 32 | f = pt_array[b[i]:b[i+1]] 33 | min_dt = timelib.o2dt(f[:,3].min()) 34 | max_dt = timelib.o2dt(f[:,3].max()) 35 | mid_dt = f[:,3].min() + (f[:,3].max() - f[:,3].min())/2.0 36 | mean_dt = timelib.o2dt(f[:,3].mean()) 37 | med_dt = timelib.o2dt(np.median(f[:,3])) 38 | f_list.append(f) 39 | dt_list.append([min_dt, max_dt, mid_dt, mean_dt, med_dt]) 40 | 41 | statlist=('median', 'mean', 'count', 'std') 42 | res=300 43 | for n,f in enumerate(f_list): 44 | mean_dt = dt_list[n][3] 45 | for stat in statlist: 46 | g_count, ds = geolib.block_stats_grid_gen(f[:,0], f[:,1], f[:,2], res=res, srs=srs, stat=stat) 47 | out_fn = mean_dt.strftime('%Y%m%d_%H%M_')+stat+'_'+str(res)+'m.tif' 48 | iolib.writeGTiff(g_count, out_fn, ds) 49 | 50 | #Make stacks 51 | #Create function to wrap ASP point2dem for interpolation 52 | -------------------------------------------------------------------------------- /filter_glas.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | #Filter preprocessed ICESat-1 GLAS points for a given input raster 4 | 5 | #First run glas_proc.py - see usage 6 | #Also expects *DEM_32m_ref.tif output from dem_mask.py 7 | 8 | #Then run filter_glas.py for a single DEM fn or a list of DEM fn 9 | #parallel --jobs 16 --delay 1 '~/src/demcoreg/demcoreg/filter_glas.py {}' ::: */dem*/*DEM_32m.tif 10 | 11 | import sys 12 | import os 13 | 14 | import numpy as np 15 | from osgeo import gdal 16 | from pygeotools.lib import geolib, iolib, malib, timelib 17 | 18 | import matplotlib.pyplot as plt 19 | 20 | from imview.lib import gmtColormap, pltlib 21 | cpt_rainbow = gmtColormap.get_rainbow() 22 | 23 | site = 'hma' 24 | 25 | #Minimum number of points required to write out _ref.csv 26 | min_pts = 100 27 | 28 | #Maximum value of surface slope to use 29 | max_slope = 20. 30 | 31 | pt_srs = geolib.wgs_srs 32 | #This is time column in YYYYMMDD 33 | tcol = 0 34 | xcol = 3 35 | ycol = 2 36 | zcol = 4 37 | 38 | #Padding in pixels for sample radius 39 | #Since we're likely dealing with 32-m products here, can just use pad=1 40 | pad = 1 41 | #pad = 'glas' 42 | 43 | glas_dir = '/nobackupp8/deshean/icesat_glas' 44 | #ext = 'GLAH14_%s_refdemfilt_lulcfilt' % site 45 | ext = 'GLAH14_%s_refdemfilt' % site 46 | glas_npz_fn = os.path.join(glas_dir, ext+'.npz') 47 | 48 | if not os.path.exists(glas_npz_fn): 49 | glas_csv_fn = os.path.splitext(glas_npz_fn)[0]+'.csv' 50 | print("Loading csv: %s" % glas_csv_fn) 51 | glas_pts = np.loadtxt(glas_csv_fn, delimiter=',', skiprows=1, dtype=None) 52 | print("Saving npz: %s" % glas_npz_fn) 53 | np.savez_compressed(glas_npz_fn, glas_pts) 54 | else: 55 | #This takes ~5 seconds to load ~9M records with 8 fields 56 | print("Loading npz: %s" % glas_npz_fn) 57 | glas_pts = np.load(glas_npz_fn)['arr_0'] 58 | 59 | dem_fn_list = sys.argv[1:] 60 | for n,dem_fn in enumerate(dem_fn_list): 61 | print("%i of %i" % (n+1, len(dem_fn_list))) 62 | #Lat/lon extent filter 63 | print("Loading DEM: %s" % dem_fn) 64 | dem_ds = gdal.Open(dem_fn) 65 | dem_ma = iolib.ds_getma(dem_ds) 66 | dem_extent_wgs84 = geolib.ds_extent(dem_ds, t_srs=pt_srs) 67 | xmin, ymin, xmax, ymax = dem_extent_wgs84 68 | print("Applying spatial filter") 69 | x = glas_pts[:,xcol] 70 | y = glas_pts[:,ycol] 71 | idx = ((x >= xmin) & (x <= xmax) & (y >= ymin) & (y <= ymax)) 72 | if idx.nonzero()[0].size == 0: 73 | print("No points after spatial filter") 74 | continue 75 | 76 | print("Sampling DEM at masked point locations") 77 | glas_pts_fltr = glas_pts[idx] 78 | 79 | print("Writing out %i points after spatial filter" % glas_pts_fltr.shape[0]) 80 | out_csv_fn = os.path.splitext(dem_fn)[0]+'_%s.csv' % ext 81 | 82 | # dt_ordinal, dt_YYYYMMDD, lat, lon, z_WGS84 83 | fmt = '%0.8f, %i, %0.6f, %0.6f, %0.2f' 84 | if glas_pts_fltr.shape[1] == 7: 85 | # dt_ordinal, dt_YYYYMMDD, lat, lon, z_WGS84, z_refdem_med_WGS84, z_refdem_nmad 86 | fmt += ', %0.2f, %0.2f' 87 | elif glas_pts_fltr.shape[1] == 8: 88 | # dt_ordinal, dt_YYYYMMDD, lat, lon, z_WGS84, z_refdem_med_WGS84, z_refdem_nmad, lulc 89 | fmt += ', %0.2f, %0.2f, %i' 90 | np.savetxt(out_csv_fn, glas_pts_fltr, fmt=fmt, delimiter=',') 91 | 92 | x_fltr = glas_pts_fltr[:,xcol] 93 | y_fltr = glas_pts_fltr[:,ycol] 94 | z_fltr = glas_pts_fltr[:,zcol] 95 | 96 | dem_mask_fn = os.path.splitext(dem_fn)[0]+'_ref.tif' 97 | if os.path.exists(dem_mask_fn): 98 | print("Loading Masked DEM: %s" % dem_mask_fn) 99 | dem_mask_ds = gdal.Open(dem_mask_fn) 100 | dem_mask = iolib.ds_getma(dem_mask_ds) 101 | else: 102 | dem_mask_ds = dem_ds 103 | dem_mask = dem_ma 104 | 105 | #Convert input xy coordinates to raster coordinates 106 | mX_fltr, mY_fltr, mZ = geolib.cT_helper(x_fltr, y_fltr, 0, pt_srs, geolib.get_ds_srs(dem_mask_ds)) 107 | pX_fltr, pY_fltr = geolib.mapToPixel(mX_fltr, mY_fltr, dem_mask_ds.GetGeoTransform()) 108 | pX_fltr = np.atleast_1d(pX_fltr) 109 | pY_fltr = np.atleast_1d(pY_fltr) 110 | 111 | #Sample raster 112 | #This returns median and mad for ICESat footprint 113 | samp = geolib.sample(dem_mask_ds, mX_fltr, mY_fltr, pad=pad) 114 | samp_idx = ~(np.ma.getmaskarray(samp[:,0])) 115 | npts = samp_idx.nonzero()[0].size 116 | if npts < min_pts: 117 | print("Not enough points after sampling valud pixels, post bareground mask (%i < %i)" % (npts, min_pts)) 118 | continue 119 | 120 | if True: 121 | print("Applying slope filter, masking points with slope > %0.1f" % max_slope) 122 | slope_ds = geolib.gdaldem_mem_ds(dem_mask_ds, processing='slope', returnma=False) 123 | slope_samp = geolib.sample(slope_ds, mX_fltr, mY_fltr, pad=pad) 124 | slope_samp_idx = (slope_samp[:,0] <= max_slope).data 125 | samp_idx = np.logical_and(slope_samp_idx, samp_idx) 126 | 127 | npts = samp_idx.nonzero()[0].size 128 | if npts < min_pts: 129 | print("Not enough points after %0.1f deg slope mask (%i < %i)" % (max_slope, npts, min_pts)) 130 | continue 131 | 132 | glas_pts_fltr_mask = glas_pts_fltr[samp_idx] 133 | 134 | if os.path.exists(dem_mask_fn): 135 | print("Writing out %i points after mask" % glas_pts_fltr_mask.shape[0]) 136 | out_csv_fn_mask = os.path.splitext(out_csv_fn)[0]+'_ref.csv' 137 | #Could add DEM samp columns here 138 | np.savetxt(out_csv_fn_mask, glas_pts_fltr_mask, fmt=fmt, delimiter=',') 139 | 140 | x_fltr_mask = glas_pts_fltr_mask[:,xcol] 141 | y_fltr_mask = glas_pts_fltr_mask[:,ycol] 142 | z_fltr_mask = glas_pts_fltr_mask[:,zcol] 143 | mX_fltr_mask, mY_fltr_mask, mZ = geolib.cT_helper(x_fltr_mask, y_fltr_mask, 0, pt_srs, geolib.get_ds_srs(dem_mask_ds)) 144 | pX_fltr_mask, pY_fltr_mask = geolib.mapToPixel(mX_fltr_mask, mY_fltr_mask, dem_mask_ds.GetGeoTransform()) 145 | pX_fltr_mask = np.atleast_1d(pX_fltr_mask) 146 | pY_fltr_mask = np.atleast_1d(pY_fltr_mask) 147 | 148 | dz = z_fltr_mask - samp[samp_idx,0] 149 | 150 | if True: 151 | print "Creating plot of %i output points" % x_fltr.shape[0] 152 | fig_kw = {'figsize':(10,7.5)} 153 | fig, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, sharex=True, sharey=True, **fig_kw) 154 | 155 | #Plot DEM color shaded relief 156 | hs_ma = geolib.gdaldem_wrapper(dem_fn) 157 | hs_clim = malib.calcperc(hs_ma, perc=(0.5, 99.5)) 158 | dem_clim = malib.calcperc(dem_ma) 159 | ax1.imshow(hs_ma, cmap='gray', clim=hs_clim) 160 | im1 = ax1.imshow(dem_ma, cmap=cpt_rainbow, clim=dem_clim, alpha=0.5) 161 | cbar = pltlib.add_cbar(ax1, im1, label='DEM Elev. (m WGS84)') 162 | 163 | #Plot all color points over shaded relief 164 | im2 = ax2.imshow(hs_ma, cmap='gray', clim=hs_clim, alpha=0.5) 165 | #Plot all points in black 166 | sc2 = ax2.scatter(pX_fltr, pY_fltr, s=0.5, c='k', edgecolors='none') 167 | #Plot valid in color 168 | c = z_fltr_mask 169 | sc2 = ax2.scatter(pX_fltr_mask, pY_fltr_mask, s=0.5, c=c, cmap=cpt_rainbow, vmin=dem_clim[0], vmax=dem_clim[1], edgecolors='none') 170 | cbar = pltlib.add_cbar(ax2, sc2, label='Pt Elev. (m WGS84)') 171 | 172 | #Plot time 173 | c = glas_pts_fltr[:,tcol] 174 | c_decyear = timelib.np_dt2decyear(timelib.np_o2dt(c)) 175 | c = c_decyear 176 | #vmin = c.min() 177 | #vmax = c.max() 178 | vmin = 2003.14085699 179 | vmax = 2009.77587047 180 | #vmin = 20030220 181 | #vmax = 20091011 182 | im3 = ax3.imshow(hs_ma, cmap='gray', clim=hs_clim, alpha=0.5) 183 | sc3 = ax3.scatter(pX_fltr, pY_fltr, s=1, c=c, vmin=vmin, vmax=vmax, edgecolors='none') 184 | #cbar = pltlib.add_cbar(ax3, sc3, label='Pt Year', cbar_kwargs={'format':'%0.2f'}) 185 | cbar = pltlib.add_cbar(ax3, sc3, label='Pt Year') 186 | 187 | #Plot dz 188 | c = dz 189 | vmin, vmax = malib.calcperc(c, perc=(5, 95)) 190 | absmax = np.max(np.abs([vmin, vmax])) 191 | vmin = -absmax 192 | vmax = absmax 193 | im4 = ax4.imshow(hs_ma, cmap='gray', clim=hs_clim, alpha=0.5) 194 | sc4 = ax4.scatter(pX_fltr_mask, pY_fltr_mask, s=2, c=c, cmap='RdYlBu', vmin=vmin, vmax=vmax, edgecolors='none') 195 | cbar = pltlib.add_cbar(ax4, sc4, label='GCP - DEM (m)') 196 | 197 | for ax in (ax1, ax2, ax3, ax4): 198 | ax.xaxis.set_visible(False) 199 | ax.yaxis.set_visible(False) 200 | ax.set_aspect('equal', 'box-forced') 201 | 202 | title='%s \n %i valid points (%i initial)' % (os.path.splitext(os.path.split(dem_fn)[1])[0], pX_fltr_mask.shape[0], pX_fltr.shape[0]) 203 | fig.suptitle(title) 204 | fig.tight_layout() 205 | #This adjusts subplots to fit suptitle 206 | plt.subplots_adjust(top=0.92) 207 | fig_fn = os.path.splitext(out_csv_fn)[0]+'.png' 208 | print "Saving figure: %s" % fig_fn 209 | plt.savefig(fig_fn, dpi=300, bbox_inches='tight', pad_inches=0) 210 | plt.close(fig) 211 | -------------------------------------------------------------------------------- /glas_proc.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | #David Shean 4 | #dshean@gmail.com 5 | 6 | #Utility to process ICESat-1 GLAS products, filter and clip to specified bounding box 7 | #Input is HDF5 GLAH14 8 | #https://nsidc.org/data/GLAH14/versions/34 9 | #http://nsidc.org/data/docs/daac/glas_altimetry/data-dictionary-glah14.html 10 | 11 | import os, sys 12 | from datetime import datetime, timedelta 13 | import argparse 14 | 15 | import h5py 16 | import numpy as np 17 | from osgeo import gdal 18 | 19 | from pygeotools.lib import timelib, geolib, iolib, malib, filtlib 20 | 21 | #This is needed for LULC products 22 | import dem_mask 23 | 24 | #Before running, download all GLAH14 products 25 | #lftp ftp://n5eil01u.ecs.nsidc.org/DP5/GLAS/ 26 | #mirror --parallel=16 GLAH14.034 27 | 28 | """ 29 | cd GLAH14.034 30 | lfs setstripe -c 32 . 31 | for site in conus hma 32 | do 33 | parallel --progress --delay 1 -j 32 "~/src/demcoreg/demcoreg/glas_proc.py {} $site" ::: */*.H5 34 | #Combine output 35 | for ext in ${site}.csv ${site}_refdemfilt.csv ${site}_refdemfilt_lulcfilt.csv 36 | do 37 | first=$(ls */*$ext | head -1) 38 | head -1 $first > GLAH14_$ext 39 | cat */*$ext | sort -n | grep -v lat >> GLAH14_$ext 40 | done 41 | done 42 | """ 43 | 44 | #Clip to glacier polygons 45 | #clipsrc=/Volumes/d/hma/rgi/rgi_hma_aea_110kmbuffer_wgs84.shp 46 | #vrt=GLAH14_tllz_hma_lulcfilt_demfilt.vrt 47 | #ogr2ogr -progress -overwrite -clipsrc $clipsrc ${vrt%.*}_clip.shp $vrt 48 | 49 | def getparser(): 50 | parser = argparse.ArgumentParser(description="Process and filter ICESat GLAS points") 51 | parser.add_argument('fn', type=str, help='GLAH14 HDF5 filename') 52 | site_choices = geolib.site_dict.keys() 53 | parser.add_argument('sitename', type=str, choices=site_choices, help='Site name') 54 | #parser.add_argument('--rockfilter', action='store_true', help='Only output points over exposed rock using NLCD or bareground') 55 | parser.add_argument('-extent', type=str, default=None, help='Specify output spatial extent ("xmin xmax ymin ymax"). Otherwise, use default specified for sitename in pygeotools/lib/geolib') 56 | parser.add_argument('-refdem_fn', type=str, default=None, help='Specify alternative reference DEM for filtering. Otherwise use NED or SRTM') 57 | return parser 58 | 59 | def main(): 60 | parser = getparser() 61 | args = parser.parse_args() 62 | 63 | fn = args.fn 64 | sitename = args.sitename 65 | #User-specified output extent 66 | #Note: not checked, untested 67 | if args.extent is not None: 68 | extent = (args.extent).split() 69 | else: 70 | extent = (geolib.site_dict[sitename]).extent 71 | if args.refdem_fn is not None: 72 | refdem_fn = args.refdem_fn 73 | else: 74 | refdem_fn = (geolib.site_dict[sitename]).refdem_fn 75 | 76 | #Max elevation difference between shot and sampled DEM 77 | max_z_DEM_diff = 200 78 | #Max elevation std for sampled DEM values in padded window around shot 79 | max_DEMhiresArElv_std = 50.0 80 | 81 | f = h5py.File(fn) 82 | t = f.get('Data_40HZ/Time/d_UTCTime_40')[:] 83 | 84 | #pyt0 = datetime(1, 1, 1, 0, 0) 85 | #utct0 = datetime(1970, 1, 1, 0, 0) 86 | #t0 = datetime(2000, 1, 1, 12, 0, 0) 87 | #offset_s = (t0 - utct0).total_seconds() 88 | offset_s = 946728000.0 89 | t += offset_s 90 | dt = timelib.np_utc2dt(t) 91 | dt_o = timelib.dt2o(dt) 92 | #dts = timelib.np_print_dt(dt) 93 | #dt_decyear = timelib.np_dt2decyear(dt) 94 | dt_int = np.array([ts.strftime('%Y%m%d') for ts in dt], dtype=long) 95 | 96 | lat = np.ma.masked_equal(f.get('Data_40HZ/Geolocation/d_lat')[:], 1.7976931348623157e+308) 97 | lon = np.ma.masked_equal(f.get('Data_40HZ/Geolocation/d_lon')[:], 1.7976931348623157e+308) 98 | lon = geolib.lon360to180(lon) 99 | z = np.ma.masked_equal(f.get('Data_40HZ/Elevation_Surfaces/d_elev')[:], 1.7976931348623157e+308) 100 | 101 | print('Input: %i' % z.count()) 102 | 103 | #Now spatial filter - should do this up front 104 | x = lon 105 | y = lat 106 | xmin, xmax, ymin, ymax = extent 107 | #This is True if point is within extent 108 | valid_idx = ((x >= xmin) & (x <= xmax) & (y >= ymin) & (y <= ymax)) 109 | 110 | #Prepare output array 111 | #out = np.ma.vstack([dt_decyear, dt_o, dt_int, lat, lon, z]).T 112 | out = np.ma.vstack([dt_o, dt_int, lat, lon, z]).T 113 | #Create a mask to ensure all four values are valid for each point 114 | mask = ~(np.any(np.ma.getmaskarray(out), axis=1)) 115 | mask *= valid_idx 116 | out = out[mask] 117 | valid_idx = ~(np.any(np.ma.getmaskarray(out), axis=1)) 118 | 119 | #Lon and lat indices 120 | xcol = 3 121 | ycol = 2 122 | zcol = 4 123 | 124 | if out.shape[0] == 0: 125 | sys.exit("No points within specified extent\n") 126 | else: 127 | print("Spatial filter: %i" % out.shape[0]) 128 | 129 | #out_fmt = ['%0.8f', '%0.8f', '%i', '%0.6f', '%0.6f', '%0.2f'] 130 | #out_hdr = ['dt_decyear, dt_ordinal', 'dt_YYYYMMDD', 'lat', 'lon', 'z_WGS84'] 131 | out_fmt = ['%0.8f', '%i', '%0.6f', '%0.6f', '%0.2f'] 132 | out_hdr = ['dt_ordinal', 'dt_YYYYMMDD', 'lat', 'lon', 'z_WGS84'] 133 | 134 | """ 135 | ICESat-1 filters 136 | """ 137 | #Saturation Correction Flag 138 | #These are 0 to 5, not_saturated inconsequential applicable not_computed not_applicable 139 | sat_corr_flg = f.get('Data_40HZ/Quality/sat_corr_flg')[mask] 140 | #valid_idx *= (sat_corr_flg < 2) 141 | 142 | #Correction to elevation for saturated waveforms 143 | #Notes suggest this might not be desirable over land 144 | satElevCorr = np.ma.masked_equal(f.get('Data_40HZ/Elevation_Corrections/d_satElevCorr')[mask], 1.7976931348623157e+308) 145 | #z[sat_corr_flg < 3] += satElevCorr.filled(0.0)[sat_corr_flg < 3] 146 | out[:,zcol] += satElevCorr.filled(0.0) 147 | 148 | #Correction to elevation based on post flight analysis for biases determined for each campaign 149 | ElevBiasCorr = np.ma.masked_equal(f.get('Data_40HZ/Elevation_Corrections/d_ElevBiasCorr')[mask], 1.7976931348623157e+308) 150 | out[:,zcol] += ElevBiasCorr.filled(0.0) 151 | 152 | #Surface elevation (T/P ellipsoid) minus surface elevation (WGS84 ellipsoid). 153 | #Approximately 0.7 m, so WGS is lower; need to subtract from d_elev 154 | deltaEllip = np.ma.masked_equal(f.get('Data_40HZ/Geophysical/d_deltaEllip')[mask], 1.7976931348623157e+308) 155 | out[:,zcol] -= deltaEllip 156 | 157 | #These are 1 for valid, 0 for invalid 158 | valid_idx *= ~(np.ma.getmaskarray(out[:,zcol])) 159 | print("z corrections: %i" % valid_idx.nonzero()[0].size) 160 | 161 | if False: 162 | #Reflectivity, not corrected for atmospheric effects 163 | reflctUC = np.ma.masked_equal(f.get('Data_40HZ/Reflectivity/d_reflctUC')[mask], 1.7976931348623157e+308) 164 | #This was minimum used for ice sheets 165 | min_reflctUC = 0.025 166 | valid_idx *= (reflctUC > min_reflctUC).data 167 | print("reflctUC: %i" % valid_idx.nonzero()[0].size) 168 | 169 | if False: 170 | #The Standard deviation of the difference between the functional fit and the received echo \ 171 | #using alternate parameters. It is directly taken from GLA05 parameter d_wfFitSDev_1 172 | LandVar = np.ma.masked_equal(f.get('Data_40HZ/Elevation_Surfaces/d_LandVar')[mask], 1.7976931348623157e+308) 173 | #This was max used for ice sheets 174 | max_LandVar = 0.04 175 | valid_idx *= (LandVar < max_LandVar).data 176 | print("LandVar: %i" % valid_idx.nonzero()[0].size) 177 | 178 | if True: 179 | #Flag indicating whether the elevations on this record should be used. 180 | #0 = valid, 1 = not valid 181 | elev_use_flg = f.get('Data_40HZ/Quality/elev_use_flg')[mask].astype('Bool') 182 | valid_idx *= ~elev_use_flg 183 | print("elev_use_flg: %i" % valid_idx.nonzero()[0].size) 184 | 185 | if False: 186 | #Cloud contamination; Indicates if Gain > flag value, indicating probable cloud contamination. 187 | elv_cloud_flg = f.get('Data_40HZ/Elevation_Flags/elv_cloud_flg')[mask].astype('Bool') 188 | valid_idx *= ~elv_cloud_flg 189 | print("elv_cloud_flg: %i" % valid_idx.nonzero()[0].size) 190 | 191 | if False: 192 | #Full resolution 1064 Quality Flag; 0 - 12 indicate Cloud detected 193 | FRir_qa_flg = f.get('Data_40HZ/Atmosphere/FRir_qa_flg')[mask] 194 | valid_idx *= (FRir_qa_flg == 15).data 195 | print("FRir_qa_flg: %i" % valid_idx.nonzero()[0].size) 196 | 197 | if False: 198 | #This is elevation extracted from SRTM30 199 | DEM_elv = np.ma.masked_equal(f.get('Data_40HZ/Geophysical/d_DEM_elv')[mask], 1.7976931348623157e+308) 200 | z_DEM_diff = np.abs(out[:,zcol] - DEM_elv) 201 | valid_idx *= (z_DEM_diff < max_z_DEM_diff).data 202 | print("z_DEM_diff: %i" % valid_idx.nonzero()[0].size) 203 | 204 | #d_DEMhiresArElv is a 9 element array of high resolution DEM values. The array index corresponds to the position of the DEM value relative to the spot. (5) is the footprint center. 205 | DEMhiresArElv = np.ma.masked_equal(f.get('Data_40HZ/Geophysical/d_DEMhiresArElv')[mask], 1.7976931348623157e+308) 206 | DEMhiresArElv_std = np.ma.std(DEMhiresArElv, axis=1) 207 | valid_idx *= (DEMhiresArElv_std < max_DEMhiresArElv_std).data 208 | print("max_DEMhiresArElv_std: %i" % valid_idx.nonzero()[0].size) 209 | #Compute slope 210 | 211 | #Apply cumulative filter to output 212 | out = out[valid_idx] 213 | 214 | out_fn = os.path.splitext(fn)[0]+'_%s.csv' % sitename 215 | print("Writing out %i records to: %s\n" % (out.shape[0], out_fn)) 216 | out_fmt_str = ', '.join(out_fmt) 217 | out_hdr_str = ', '.join(out_hdr) 218 | np.savetxt(out_fn, out, fmt=out_fmt_str, delimiter=',', header=out_hdr_str) 219 | iolib.writevrt(out_fn, x='lon', y='lat') 220 | 221 | #Extract our own DEM values - should be better than default GLAS reference DEM stats 222 | if True: 223 | print("Loading reference DEM: %s" % refdem_fn) 224 | dem_ds = gdal.Open(refdem_fn) 225 | print("Converting coords for DEM") 226 | dem_mX, dem_mY = geolib.ds_cT(dem_ds, out[:,xcol], out[:,ycol], geolib.wgs_srs) 227 | print("Sampling") 228 | dem_samp = geolib.sample(dem_ds, dem_mX, dem_mY, pad='glas') 229 | abs_dem_z_diff = np.abs(out[:,zcol] - dem_samp[:,0]) 230 | 231 | valid_idx *= ~(np.ma.getmaskarray(abs_dem_z_diff)) 232 | print("Valid DEM extract: %i" % valid_idx.nonzero()[0].size) 233 | valid_idx *= (abs_dem_z_diff < max_z_DEM_diff).data 234 | print("Valid abs DEM diff: %i" % valid_idx.nonzero()[0].size) 235 | valid_idx *= (dem_samp[:,1] < max_DEMhiresArElv_std).data 236 | print("Valid DEM mad: %i" % valid_idx.nonzero()[0].size) 237 | 238 | if valid_idx.nonzero()[0].size == 0: 239 | sys.exit("No valid points remain") 240 | 241 | out = np.ma.hstack([out, dem_samp]) 242 | out_fmt.extend(['%0.2f', '%0.2f']) 243 | out_hdr.extend(['z_refdem_med_WGS84', 'z_refdem_nmad']) 244 | 245 | #Apply cumulative filter to output 246 | out = out[valid_idx] 247 | 248 | out_fn = os.path.splitext(out_fn)[0]+'_refdemfilt.csv' 249 | print("Writing out %i records to: %s\n" % (out.shape[0], out_fn)) 250 | out_fmt_str = ', '.join(out_fmt) 251 | out_hdr_str = ', '.join(out_hdr) 252 | np.savetxt(out_fn, out, fmt=out_fmt_str, delimiter=',', header=out_hdr_str) 253 | iolib.writevrt(out_fn, x='lon', y='lat') 254 | 255 | #This will sample land-use/land-cover or percent bareground products 256 | #Can be used to isolate points over exposed rock 257 | #if args.rockfilter: 258 | if True: 259 | #This should automatically identify appropriate LULC source based on refdem extent 260 | lulc_source = dem_mask.get_lulc_source(dem_ds) 261 | #Looks like NED extends beyond NCLD, force use NLCD for conus 262 | #if sitename == 'conus': 263 | # lulc_source = 'nlcd' 264 | lulc_ds = dem_mask.get_lulc_ds_full(dem_ds, lulc_source) 265 | print("Converting coords for LULC") 266 | lulc_mX, lulc_mY = geolib.ds_cT(lulc_ds, out[:,xcol], out[:,ycol], geolib.wgs_srs) 267 | print("Sampling LULC: %s" % lulc_source) 268 | #Note: want to make sure we're not interpolating integer values for NLCD 269 | #Should be safe with pad=0, even with pad>0, should take median, not mean 270 | lulc_samp = geolib.sample(lulc_ds, lulc_mX, lulc_mY, pad=0) 271 | l = lulc_samp[:,0].data 272 | if lulc_source == 'nlcd': 273 | #This passes rock and ice pixels 274 | valid_idx = np.logical_or((l==31),(l==12)) 275 | elif lulc_source == 'bareground': 276 | #This preserves pixels with bareground percentation >85% 277 | minperc = 85 278 | valid_idx = (l >= minperc) 279 | else: 280 | print("Unknown LULC source") 281 | print("LULC: %i" % valid_idx.nonzero()[0].size) 282 | if l.ndim == 1: 283 | l = l[:,np.newaxis] 284 | out = np.ma.hstack([out, l]) 285 | out_fmt.append('%i') 286 | out_hdr.append('lulc') 287 | 288 | #Apply cumulative filter to output 289 | out = out[valid_idx] 290 | 291 | out_fn = os.path.splitext(out_fn)[0]+'_lulcfilt.csv' 292 | print("Writing out %i records to: %s\n" % (out.shape[0], out_fn)) 293 | out_fmt_str = ', '.join(out_fmt) 294 | out_hdr_str = ', '.join(out_hdr) 295 | np.savetxt(out_fn, out, fmt=out_fmt_str, delimiter=',', header=out_hdr_str) 296 | iolib.writevrt(out_fn, x='lon', y='lat') 297 | 298 | if __name__ == "__main__": 299 | main() 300 | --------------------------------------------------------------------------------