├── MANIFEST.in ├── requirements.txt ├── README_files ├── README_6_2.png ├── README_7_2.png ├── README_9_2.png └── README_13_0.png ├── environment.yaml ├── topo_descriptors ├── config │ └── topo_descriptors.conf ├── __init__.py ├── helpers.py └── topo.py ├── AUTHORS.rst ├── test ├── test_helpers.py └── test_topo.py ├── HISTORY.rst ├── .github └── workflows │ ├── python-publish.yml │ └── python-package.yml ├── LICENSE ├── .gitignore ├── setup.py ├── scripts └── compute_topo_descriptors.py └── README.md /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include LICENSE README.md HISTORY.rst AUTHORS.rst 2 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | netcdf4 2 | numba 3 | numpy 4 | pandas 5 | scipy 6 | utm 7 | xarray 8 | yaconfigobject -------------------------------------------------------------------------------- /README_files/README_6_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MeteoSwiss/topo-descriptors/HEAD/README_files/README_6_2.png -------------------------------------------------------------------------------- /README_files/README_7_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MeteoSwiss/topo-descriptors/HEAD/README_files/README_7_2.png -------------------------------------------------------------------------------- /README_files/README_9_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MeteoSwiss/topo-descriptors/HEAD/README_files/README_9_2.png -------------------------------------------------------------------------------- /README_files/README_13_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MeteoSwiss/topo-descriptors/HEAD/README_files/README_13_0.png -------------------------------------------------------------------------------- /environment.yaml: -------------------------------------------------------------------------------- 1 | name: topo 2 | channels: 3 | - conda-forge 4 | - defaults 5 | dependencies: 6 | - python=3.10 7 | - gdal 8 | - jupyter -------------------------------------------------------------------------------- /topo_descriptors/config/topo_descriptors.conf: -------------------------------------------------------------------------------- 1 | # Values lower or equal than min_elevation are filtered out 2 | min_elevation: -100 3 | 4 | # The number of standard deviations per unit scale 5 | scale_std: 4 6 | -------------------------------------------------------------------------------- /AUTHORS.rst: -------------------------------------------------------------------------------- 1 | ======= 2 | Credits 3 | ======= 4 | 5 | Contributors 6 | ------------ 7 | 8 | * Mathieu Schaer 9 | * Daniele Nerini 10 | * Francesco Zanetta 11 | -------------------------------------------------------------------------------- /test/test_helpers.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | import topo_descriptors.helpers as hlp 4 | 5 | 6 | def test_round_up_to_odd(): 7 | inputs = np.arange(0.1, 10, 0.7) 8 | outputs = hlp.round_up_to_odd(inputs) 9 | expected = [1, 1, 1, 3, 3, 3, 5, 5, 5, 7, 7, 7, 9, 9, 9] 10 | assert outputs.dtype == np.int64 11 | assert all([a == b for a, b in zip(outputs, expected)]) 12 | -------------------------------------------------------------------------------- /topo_descriptors/__init__.py: -------------------------------------------------------------------------------- 1 | """ Initializations """ 2 | 3 | from pkg_resources import DistributionNotFound, get_distribution 4 | from yaconfigobject import Config 5 | 6 | try: 7 | __version__ = get_distribution(__name__).version 8 | except DistributionNotFound: 9 | # package is not installed 10 | __version__ = "" 11 | 12 | __author__ = """Daniele Nerini""" 13 | __email__ = "daniele.nerini@meteoswiss.ch" 14 | 15 | CFG = Config(name="topo_descriptors.conf") 16 | -------------------------------------------------------------------------------- /HISTORY.rst: -------------------------------------------------------------------------------- 1 | ======= 2 | History 3 | ======= 4 | 5 | 0.3.0 (2024-01-15) 6 | ------------------ 7 | 8 | * Move from DataArray to Dataset for DEM object to allow transferring global attributes. 9 | * Add units as variable attributes. 10 | * Output slope in units of [degree] instead of [m / pixel]. 11 | * Fix bug in slope calculation. 12 | * Remove parallelization of scales with multiprocessing for valley and ridge 13 | 14 | 0.2.1 (2022-10-19) 15 | ------------------ 16 | 17 | * Fix bug in the scale to pixel conversion in case of WGS84 grids. 18 | 19 | 0.2.0 (2021-06-12) 20 | ------------------ 21 | 22 | * Add Sx descriptor. 23 | * Add STD descriptor. 24 | 25 | 0.1.2 (2021-05-14) 26 | ------------------ 27 | 28 | * First working release on PyPI. 29 | -------------------------------------------------------------------------------- /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | # This workflow will upload a Python Package using Twine when a release is created 2 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries 3 | 4 | name: Upload Python Package 5 | 6 | on: 7 | release: 8 | types: [published] 9 | 10 | jobs: 11 | deploy: 12 | 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | - uses: actions/checkout@v3 17 | - name: Set up Python 18 | uses: actions/setup-python@v4 19 | with: 20 | python-version: '3.x' 21 | - name: Install dependencies 22 | run: | 23 | python -m pip install --upgrade pip 24 | pip install setuptools wheel twine 25 | - name: Build and publish 26 | env: 27 | PYPI_TOKEN: ${{ secrets.PYPI_API_TOKEN }} 28 | run: | 29 | python setup.py sdist bdist_wheel 30 | twine upload dist/* --verbose --skip-existing -u __token__ -p "$PYPI_TOKEN" 31 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2021, MeteoSwiss 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | 8 | * Redistributions of source code must retain the above copyright notice, this 9 | list of conditions and the following disclaimer. 10 | 11 | * Redistributions in binary form must reproduce the above copyright notice, 12 | this list of conditions and the following disclaimer in the documentation 13 | and/or other materials provided with the distribution. 14 | 15 | * Neither the name of the copyright holder nor the names of its 16 | contributors may be used to endorse or promote products derived from 17 | this software without specific prior written permission. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 23 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 25 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 27 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- /.github/workflows/python-package.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install the Python package, run tests and lint with a variety of Python versions 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 3 | 4 | name: Python package 5 | 6 | on: 7 | push: 8 | branches: [ main ] 9 | pull_request: 10 | branches: [ main ] 11 | 12 | jobs: 13 | build: 14 | 15 | runs-on: ubuntu-latest 16 | strategy: 17 | fail-fast: false 18 | matrix: 19 | python-version: ['3.8', '3.9', '3.10'] 20 | 21 | steps: 22 | - uses: actions/checkout@v3 23 | - name: Set up Python ${{ matrix.python-version }} 24 | uses: actions/setup-python@v4 25 | with: 26 | python-version: ${{ matrix.python-version }} 27 | - name: Install dependencies 28 | run: | 29 | python -m pip install --upgrade pip 30 | python -m pip install flake8 pytest 31 | pip install -r requirements.txt 32 | pip install . 33 | - name: Test import 34 | run: | 35 | python -c "import topo_descriptors" 36 | - name: Lint with flake8 37 | run: | 38 | # stop the build if there are Python syntax errors or undefined names 39 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics 40 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide 41 | flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics 42 | - name: Test with pytest 43 | run: | 44 | python -m pytest test/ 45 | -------------------------------------------------------------------------------- /test/test_topo.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from topo_descriptors import topo 4 | 5 | 6 | def test_sx_distance(): 7 | 8 | radius = 150.0 9 | dx = 50.0 10 | dy = 40.0 11 | 12 | output = topo._sx_distance(radius, dx, dy) 13 | expected_first_row = np.array( 14 | [ 15 | 256.1249695, 16 | 219.31712199, 17 | 188.67962264, 18 | 167.63054614, 19 | 160.0, 20 | 167.63054614, 21 | 188.67962264, 22 | 219.31712199, 23 | 256.1249695, 24 | ] 25 | ) 26 | 27 | assert np.all(np.isclose(output[0, :], expected_first_row)) 28 | assert output.dtype == np.float64 29 | 30 | 31 | def test_sx_bresenhamlines(): 32 | 33 | start = np.array([[8, 9], [17, 22]]) 34 | end = np.array([15, 15]) 35 | output = topo._sx_bresenhamlines(start, end) 36 | expected = np.array( 37 | [ 38 | [9, 10], 39 | [10, 11], 40 | [11, 12], 41 | [12, 12], 42 | [13, 13], 43 | [14, 14], 44 | [17, 21], 45 | [16, 20], 46 | [16, 19], 47 | [16, 18], 48 | [16, 17], 49 | [15, 16], 50 | ] 51 | ) 52 | 53 | assert np.all(output == expected) 54 | assert output.dtype == np.int64 55 | 56 | 57 | def test_sx_source_idx_delta(): 58 | 59 | azimuths = np.array([3.0, 4.0, 5.0, 6.0]) 60 | radius = 500 61 | dx = 20 62 | dy = 30 63 | output = topo._sx_source_idx_delta(azimuths, radius, dx, dy) 64 | expected = np.array([[17, 1], [17, 2], [17, 2], [17, 3]]) 65 | 66 | assert np.all(output == expected) 67 | assert output.dtype == np.int64 68 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *,cover 47 | .hypothesis/ 48 | .pytest_cache 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | 58 | # Flask stuff: 59 | instance/ 60 | .webassets-cache 61 | 62 | # Scrapy stuff: 63 | .scrapy 64 | 65 | # Sphinx documentation 66 | docs/**/_build/ 67 | doc/build 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # dotenv 82 | .env 83 | 84 | # virtualenv 85 | .venv 86 | venv/ 87 | ENV/ 88 | 89 | # vscode project settings 90 | .vscode 91 | 92 | # Spyder project settings 93 | .spyderproject 94 | 95 | # Rope project settings 96 | .ropeproject 97 | 98 | # Komodo Edit settings 99 | *.komodoproject 100 | 101 | # swap files 102 | *.swp 103 | 104 | # CI tools 105 | /codeship.aes 106 | /deploy/*.env 107 | /tmp/* 108 | 109 | # Pipenv stuff 110 | Pipfile 111 | Pipfile.lock 112 | 113 | # personal stuff 114 | 115 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # Copyright (C) 2021, MeteoSwiss, the authors 5 | 6 | from setuptools import setup, find_packages 7 | 8 | requirements = [ 9 | "netcdf4", 10 | "numba", 11 | "dask", 12 | "numpy", 13 | "pandas", 14 | "scipy", 15 | "utm", 16 | "xarray", 17 | "yaconfigobject", 18 | ] 19 | 20 | setup_requirements = [ 21 | "setuptools_scm", 22 | ] 23 | 24 | test_requirements = [ 25 | "pytest", 26 | ] 27 | 28 | extras = { 29 | "test": test_requirements, 30 | } 31 | 32 | packages = find_packages(include=["topo_descriptors"]) 33 | 34 | package_dir = {} 35 | 36 | package_data = {} 37 | 38 | setup( 39 | name="topo-descriptors", 40 | packages=packages, 41 | use_scm_version=True, 42 | author="Mathieu Schaer", 43 | author_email="mathieu.schaer@meteoswiss.ch", 44 | maintainer="Daniele Nerini", 45 | maintainer_email="daniele.nerini@meteoswiss.ch", 46 | description="A library to compute DEM-based topographical descriptors.", 47 | long_description=open("README.md").read() + "\n\n" + open("HISTORY.rst").read(), 48 | long_description_content_type="text/markdown", 49 | url="https://github.com/MeteoSwiss/topo-descriptors", 50 | classifiers=[ 51 | "Development Status :: 3 - Alpha", 52 | "Intended Audience :: Developers", 53 | "Intended Audience :: Science/Research", 54 | "License :: OSI Approved :: BSD License", 55 | "Natural Language :: English", 56 | "Programming Language :: Python :: 3.8", 57 | "Programming Language :: Python :: 3.9", 58 | "Programming Language :: Python :: 3.10", 59 | "Topic :: Scientific/Engineering", 60 | "Operating System :: OS Independent", 61 | ], 62 | license="BSD-3-Clause license", 63 | keywords="topo_descriptors", 64 | entry_points={}, 65 | py_modules=["topo-descriptors"], 66 | include_package_data=True, 67 | install_requires=requirements, 68 | package_dir=package_dir, 69 | package_data=package_data, 70 | setup_requires=setup_requirements, 71 | tests_require=test_requirements, 72 | extras_require=extras, 73 | ) 74 | -------------------------------------------------------------------------------- /scripts/compute_topo_descriptors.py: -------------------------------------------------------------------------------- 1 | """ 2 | Example script on how to compute spatial descriptors from a DEM file. 3 | """ 4 | 5 | import logging 6 | 7 | import topo_descriptors.topo as tp 8 | import topo_descriptors.helpers as hlp 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | if __name__ == "__main__": 13 | logging.basicConfig(level=logging.INFO) 14 | logging.captureWarnings(True) 15 | 16 | # get the DEM 17 | path_dem = "DEM.nc" 18 | dem_ds = hlp.get_dem_netcdf(path_dem) 19 | ind_nans, dem_ds = hlp.fill_na(dem_ds) 20 | 21 | # define the target domain 22 | domain = {"x": slice(255000, 965000), "y": slice(-160000, 480000)} 23 | 24 | # define the convolution scales in meters 25 | scales_meters = [ 26 | 100, 27 | 300, 28 | 500, 29 | 1000, 30 | 2000, 31 | 4000, 32 | 6000, 33 | 10000, 34 | 20000, 35 | 30000, 36 | 60000, 37 | 100000, 38 | ] 39 | 40 | # Launch computations and save output 41 | 42 | # smoothed DEM 43 | tp.compute_dem(dem_ds, scales_meters, ind_nans=ind_nans, crop=domain) 44 | 45 | # raw TPI 46 | tp.compute_tpi( 47 | dem_ds, scales_meters, smth_factors=None, ind_nans=ind_nans, crop=domain 48 | ) 49 | 50 | # TPI with prior smoothing 51 | tp.compute_tpi( 52 | dem_ds, scales_meters, smth_factors=1, ind_nans=ind_nans, crop=domain 53 | ) 54 | 55 | # Gradients with symmetric kernels 56 | tp.compute_gradient( 57 | dem_ds, scales_meters, sig_ratios=1, ind_nans=ind_nans, crop=domain 58 | ) 59 | 60 | # Standard deviation of surface 61 | tp.compute_std(dem_ds, scales_meters, ind_nans=ind_nans, crop=domain) 62 | 63 | # Valley Index with prior smoothing 64 | tp.compute_valley_ridge( 65 | dem_ds, 66 | scales_meters[3:], 67 | mode="valley", 68 | flat_list=[0, 0.2, 0.4], 69 | smth_factors=0.5, 70 | ind_nans=ind_nans, 71 | crop=domain, 72 | ) 73 | 74 | # Ridge Index with prior smoothing 75 | tp.compute_valley_ridge( 76 | dem_ds, 77 | scales_meters[3:], 78 | mode="ridge", 79 | flat_list=[0, 0.15, 0.3], 80 | smth_factors=0.5, 81 | ind_nans=ind_nans, 82 | crop=domain, 83 | ) 84 | 85 | # Sx for one azimuth 86 | tp.compute_sx( 87 | dem_ds, 88 | 0, 89 | 1000, 90 | crop=domain, 91 | ) 92 | -------------------------------------------------------------------------------- /topo_descriptors/helpers.py: -------------------------------------------------------------------------------- 1 | import datetime as dt 2 | import functools 3 | import logging 4 | import time 5 | from pathlib import Path 6 | 7 | import numpy as np 8 | import xarray as xr 9 | import utm 10 | 11 | from topo_descriptors import CFG 12 | 13 | 14 | logger = logging.getLogger(__name__) 15 | 16 | 17 | def get_dem_netcdf(path_dem): 18 | """Load the DEM into a xarray Dataset and filter NaNs 19 | 20 | Parameters 21 | ---------- 22 | path_dem: string 23 | absolute or relative path to the DEM netCDF file. 24 | 25 | Returns 26 | ------- 27 | xarray Dataset with the DEM values. 28 | """ 29 | 30 | dem_ds = xr.open_dataset(path_dem).astype(np.float32).squeeze(drop=True) 31 | return dem_ds.where(dem_ds > CFG.min_elevation) 32 | 33 | 34 | def to_netcdf(array, dem_ds, name, crop=None, outdir=".", units=None): 35 | """Save an array of topographic descriptors in NetCDF. It has the same coordinates 36 | and attributes as the input DEM and a specified name. 37 | 38 | Parameters 39 | ---------- 40 | array : array to be saved as netCDF 41 | dem_ds : xarray Dataset 42 | Original DEM dataset from which coordinates and attributes are copied. 43 | name : string 44 | Name for the array 45 | crop (optional) : dict 46 | The array is cropped to the given extend before being saved. Keys should 47 | be coordinates labels as in coords and values should be slices of [min,max] 48 | extend. Default is None. 49 | outdir (optional) : string 50 | The path to the output directory. Save to working directory by default. 51 | units (optional) : string 52 | Units of the topographic descriptor array added as variable attribute. 53 | """ 54 | 55 | name = str.upper(name) 56 | outdir = Path(outdir) 57 | ds = xr.Dataset( 58 | {name: (get_da(dem_ds).dims, array)}, coords=dem_ds.coords, attrs=dem_ds.attrs 59 | ).sel(crop) 60 | if units is not None: 61 | ds[name].attrs.update(units=units) 62 | 63 | filename = f"topo_{name}.nc" 64 | ds.to_netcdf(outdir / filename) 65 | logger.info(f"saved: {outdir / filename}") 66 | 67 | 68 | def scale_to_pixel(scales, dem_ds): 69 | """Convert distances in meters to the closest odd number of pixels based on 70 | the DEM resolution. 71 | 72 | Parameters 73 | ---------- 74 | scales : list of scalars 75 | Scales in meters on which we want to compute the topographic descriptor. 76 | Corresponds to the size of the squared kernel used to compute it. 77 | dem_ds : xarray Dataset representing the DEM and its grid coordinates. 78 | Coordinates must be projected or in WGS84 and named 'x', 'y'. In the latter case, 79 | they are reprojected to UTM to derive the average resolution in meters. 80 | 81 | Returns 82 | ------- 83 | list of int : 84 | Contain the corresponding scales in pixel size. 85 | dict with two 1-D or 2-D arrays : 86 | Resolution in meters of each DEM grid points in the x and y directions. 87 | """ 88 | check_dem(dem_ds) 89 | x_coords, y_coords = dem_ds["x"].values, dem_ds["y"].values 90 | epsg_code = dem_ds.attrs["crs"].lower() 91 | if "epsg:4326" in epsg_code: 92 | logger.debug( 93 | "Reprojecting coordinates from WGS84 to UTM to obtain units of meters" 94 | ) 95 | x_coords, y_coords = np.meshgrid(x_coords, y_coords) 96 | x_coords, y_coords, _, _ = utm.from_latlon(y_coords, x_coords) 97 | x_coords, y_coords = x_coords.astype(np.float32), y_coords.astype(np.float32) 98 | 99 | n_dims = len(x_coords.shape) 100 | x_res = np.gradient(x_coords, axis=n_dims - 1) 101 | y_res = np.gradient(y_coords, axis=0) 102 | mean_res = np.mean(np.abs([x_res.mean(), y_res.mean()])) 103 | logger.debug(f"Estimated resolution: {mean_res:.0f} meters.") 104 | 105 | return round_up_to_odd(np.array(scales) / mean_res), {"x": x_res, "y": y_res} 106 | 107 | 108 | def round_up_to_odd(f): 109 | """round float to the nearest odd integer""" 110 | 111 | return np.asarray(np.round((f - 1) / 2) * 2 + 1, dtype=np.int64) 112 | 113 | 114 | def get_sigmas(smth_factors, scales_pxl): 115 | """Return scales expressed in standard deviations for gaussian filters. 116 | 117 | Parameters 118 | ---------- 119 | smth_factors : list of scalars or None elements or a combination of both. 120 | Factors by which the scales in pixel must be multiplied. None or zeros 121 | results in None in the output. 122 | scales_pxl : list of int 123 | Scales expressed in pixels. 124 | 125 | Returns 126 | ------- 127 | list of scalars/None elements representing scales in standard deviations. 128 | """ 129 | 130 | sigmas = ( 131 | [fact if fact else np.nan for fact in smth_factors] * scales_pxl / CFG.scale_std 132 | ) 133 | 134 | return [None if np.isnan(sigma) else sigma for sigma in sigmas] 135 | 136 | 137 | def fill_na(dem_ds): 138 | """get indices of NaNs and interpolates them. 139 | 140 | Parameters 141 | ---------- 142 | dem_ds : xarray Dataset containing the elevation data. 143 | 144 | Returns 145 | ------- 146 | ind_nans : tuple of two 1D arrays 147 | Contains the row / column indices of the NaNs in the original dem. 148 | Xarray Dataset with interpolated NaNs in x direction using "nearest" method. 149 | """ 150 | 151 | ind_nans = np.where(np.isnan(get_da(dem_ds))) 152 | return ind_nans, dem_ds.interpolate_na( 153 | dim="x", method="nearest", fill_value="extrapolate" 154 | ) 155 | 156 | 157 | def timer(func): 158 | @functools.wraps(func) 159 | def wrapper_timer(*args, **kwargs): 160 | t_start = time.monotonic() 161 | value = func(*args, **kwargs) 162 | t_elapsed = str(dt.timedelta(seconds=time.monotonic() - t_start)).split(".", 2)[ 163 | 0 164 | ] 165 | logger.info(f"Computed in {t_elapsed} (HH:mm:ss)") 166 | return value 167 | 168 | return wrapper_timer 169 | 170 | 171 | def check_dem(dem): 172 | """ 173 | Check that the input dem conforms to the data model, namely: 174 | - instance of xarray.Dataset 175 | - 2D field 176 | - y and x dimensions 177 | - crs attribute specifying an EPSG code. 178 | """ 179 | if not isinstance(dem, xr.Dataset): 180 | raise ValueError("dem must be a xr.Dataset") 181 | if dem[list(dem)[0]].dims != ("y", "x"): 182 | raise ValueError("dem dimensions must be ('y', 'x')") 183 | if not "crs" in dem.attrs: 184 | raise KeyError("missing 'crs' (case sensitive) attribute in dem") 185 | if not "epsg:" in dem.attrs["crs"].lower(): 186 | raise ValueError( 187 | "missing 'epsg:' (case insensitive) key in the 'crs' attribute" 188 | ) 189 | 190 | 191 | def get_da(dem_ds): 192 | """ 193 | Return the xarray.DataArray with DEM values without knowing its name. 194 | """ 195 | 196 | return dem_ds[list(dem_ds)[0]] 197 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # topo-descriptors 2 | 3 | A python library to compute DEM-based topographical descriptors. 4 | 5 | ## Usage 6 | 7 | Let's install `topo-descriptors` with few additional packages that will help us 8 | to run a simple example (remember to use a virtual environment): 9 | 10 | 11 | ```python 12 | %pip install topo-descriptors elevation rioxarray matplotlib --quiet 13 | ``` 14 | 15 | Note: you may need to restart the kernel to use updated packages. 16 | 17 | 18 | The [elevation](https://github.com/bopen/elevation) package is an python library that 19 | provides an easy access to global elevation data. Here we are going to clip the SRTM 30m 20 | DEM around the Basodino region in southern Switzerland, around 46.4N 8.5E: 21 | 22 | 23 | ```python 24 | !eio clip -o Basodino-30m-DEM.tif --bounds 8.2 46.30 8.6 46.55 25 | ``` 26 | 27 | make: Entering directory '/prod/gve/home/mts/.cache/elevation/SRTM1' 28 | make: Nothing to be done for 'download'. 29 | make: Leaving directory '/prod/gve/home/mts/.cache/elevation/SRTM1' 30 | make: Entering directory '/prod/gve/home/mts/.cache/elevation/SRTM1' 31 | make: Nothing to be done for 'all'. 32 | make: Leaving directory '/prod/gve/home/mts/.cache/elevation/SRTM1' 33 | make: Entering directory '/prod/gve/home/mts/.cache/elevation/SRTM1' 34 | cp SRTM1.vrt SRTM1.d73260d0233a450ab8ca8ce05b9b46c6.vrt 35 | make: Leaving directory '/prod/gve/home/mts/.cache/elevation/SRTM1' 36 | make: Entering directory '/prod/gve/home/mts/.cache/elevation/SRTM1' 37 | gdal_translate -q -co TILED=YES -co COMPRESS=DEFLATE -co ZLEVEL=9 -co PREDICTOR=2 -projwin 8.2 46.55 8.6 46.3 SRTM1.d73260d0233a450ab8ca8ce05b9b46c6.vrt /prod/gve/home/mts/git/topo-descriptors/Basodino-30m-DEM.tif 38 | rm -f SRTM1.d73260d0233a450ab8ca8ce05b9b46c6.vrt 39 | make: Leaving directory '/prod/gve/home/mts/.cache/elevation/SRTM1' 40 | 41 | 42 | 43 | ```python 44 | import logging 45 | 46 | logger = logging.getLogger() 47 | handler = logging.StreamHandler() 48 | formatter = logging.Formatter("%(asctime)s %(name)-12s %(levelname)-8s %(message)s") 49 | handler.setFormatter(formatter) 50 | logger.addHandler(handler) 51 | logger.setLevel(logging.INFO) 52 | ``` 53 | 54 | Now in python we can easily import the 55 | `Basodino-30m-DEM.tif` file generated above: 56 | 57 | 58 | ```python 59 | from topo_descriptors.helpers import get_dem_netcdf, scale_to_pixel 60 | 61 | dem_ds = get_dem_netcdf("Basodino-30m-DEM.tif") 62 | varname = list(dem_ds)[0] 63 | dem_ds.attrs.update(crs="epsg:4326") 64 | dem_ds[varname].plot(robust=True) 65 | ``` 66 | 67 | 2023-12-20 17:53:55,120 yaconfigobject INFO Loading /prod/gve/home/mts/git/topo-descriptors/topo_descriptors/config/topo_descriptors.conf. 68 | 2023-12-20 17:53:55,121 yaconfigobject INFO Loading configuration file: /prod/gve/home/mts/git/topo-descriptors/topo_descriptors/config/topo_descriptors.conf 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | ![png](README_files/README_6_2.png) 81 | 82 | 83 | 84 | 85 | ```python 86 | from topo_descriptors import topo 87 | 88 | scale_meters = 500 89 | scale_pixel, __ = scale_to_pixel(scale_meters, dem_ds) 90 | topo.tpi(dem_ds[varname], scale_pixel).plot(vmin=-100, vmax=100, cmap="bwr") 91 | ``` 92 | 93 | 2023-12-20 17:53:58,736 topo_descriptors.helpers INFO Computed in 0:00:00 (HH:mm:ss) 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | ![png](README_files/README_7_2.png) 106 | 107 | 108 | 109 | The Sx is used to describe the horizon in a given direction and spatial scale. 110 | In the example below we compute the Sx for a 0° azimuth (i.e., looking North) 111 | and a radius of 500 meters. 112 | 113 | 114 | ```python 115 | import xarray as xr 116 | 117 | sx_500m = topo.sx(dem_ds, azimuth=0, radius=500) 118 | xr.DataArray(sx_500m, coords=dem_ds.coords).plot.imshow() 119 | ``` 120 | 121 | 2023-12-20 17:54:06,246 topo_descriptors.helpers INFO Computed in 0:00:06 (HH:mm:ss) 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | ![png](README_files/README_9_2.png) 134 | 135 | 136 | 137 | Other topographical descriptors are available, such as slope, aspect, derivatives, 138 | and more. As an example, below we show how to compute a range of descriptors for two 139 | distinc spatial scales (200 and 2000 meters). 140 | 141 | 142 | ```python 143 | from pathlib import Path 144 | 145 | output_dir = Path("out/") 146 | output_dir.mkdir(exist_ok=True) 147 | 148 | scales_meters = [200, 2000] 149 | domain = {"x": slice(8.25, 8.55), "y": slice(46.50, 46.35)} 150 | 151 | topo.compute_gradient(dem_ds, scales_meters, sig_ratios=1, crop=domain, outdir=output_dir) 152 | topo.compute_std(dem_ds, scales_meters, crop=domain, outdir=output_dir) 153 | topo.compute_tpi(dem_ds, scales_meters, crop=domain, outdir=output_dir) 154 | topo.compute_sx(dem_ds, azimuth=0, radius=scales_meters[0], crop=domain, outdir=output_dir) 155 | topo.compute_sx(dem_ds, azimuth=0, radius=scales_meters[1], crop=domain, outdir=output_dir) 156 | ``` 157 | 158 | 2023-12-20 17:54:06,586 topo_descriptors.topo INFO ***Starting gradients computation for scales [200, 2000] meters*** 159 | 2023-12-20 17:54:06,870 topo_descriptors.topo INFO Computing scale 200 meters with sigma ratio 1 ... 160 | 2023-12-20 17:54:06,920 topo_descriptors.helpers INFO Computed in 0:00:00 (HH:mm:ss) 161 | 2023-12-20 17:54:06,945 topo_descriptors.helpers INFO saved: out/topo_WE_DERIVATIVE_200M_SIGRATIO1.nc 162 | 2023-12-20 17:54:06,958 topo_descriptors.helpers INFO saved: out/topo_SN_DERIVATIVE_200M_SIGRATIO1.nc 163 | 2023-12-20 17:54:06,970 topo_descriptors.helpers INFO saved: out/topo_SLOPE_200M_SIGRATIO1.nc 164 | 2023-12-20 17:54:06,982 topo_descriptors.helpers INFO saved: out/topo_ASPECT_200M_SIGRATIO1.nc 165 | 2023-12-20 17:54:06,983 topo_descriptors.topo INFO Computing scale 2000 meters with sigma ratio 1 ... 166 | 2023-12-20 17:54:07,229 topo_descriptors.helpers INFO Computed in 0:00:00 (HH:mm:ss) 167 | 2023-12-20 17:54:07,242 topo_descriptors.helpers INFO saved: out/topo_WE_DERIVATIVE_2000M_SIGRATIO1.nc 168 | 2023-12-20 17:54:07,260 topo_descriptors.helpers INFO saved: out/topo_SN_DERIVATIVE_2000M_SIGRATIO1.nc 169 | 2023-12-20 17:54:07,272 topo_descriptors.helpers INFO saved: out/topo_SLOPE_2000M_SIGRATIO1.nc 170 | 2023-12-20 17:54:07,284 topo_descriptors.helpers INFO saved: out/topo_ASPECT_2000M_SIGRATIO1.nc 171 | 2023-12-20 17:54:07,286 topo_descriptors.topo INFO ***Starting STD computation for scales [200, 2000] meters*** 172 | 2023-12-20 17:54:07,569 topo_descriptors.topo INFO Computing scale 200 meters with smoothing factor None ... 173 | 2023-12-20 17:54:07,696 topo_descriptors.helpers INFO Computed in 0:00:00 (HH:mm:ss) 174 | 2023-12-20 17:54:07,717 topo_descriptors.helpers INFO saved: out/topo_STD_200M.nc 175 | 2023-12-20 17:54:07,719 topo_descriptors.topo INFO Computing scale 2000 meters with smoothing factor None ... 176 | 2023-12-20 17:54:07,853 topo_descriptors.helpers INFO Computed in 0:00:00 (HH:mm:ss) 177 | 2023-12-20 17:54:07,871 topo_descriptors.helpers INFO saved: out/topo_STD_2000M.nc 178 | 2023-12-20 17:54:07,873 topo_descriptors.topo INFO ***Starting TPI computation for scales [200, 2000] meters*** 179 | 2023-12-20 17:54:08,141 topo_descriptors.topo INFO Computing scale 200 meters with smoothing factor None ... 180 | 2023-12-20 17:54:08,182 topo_descriptors.helpers INFO Computed in 0:00:00 (HH:mm:ss) 181 | 2023-12-20 17:54:08,196 topo_descriptors.helpers INFO saved: out/topo_TPI_200M.nc 182 | 2023-12-20 17:54:08,197 topo_descriptors.topo INFO Computing scale 2000 meters with smoothing factor None ... 183 | 2023-12-20 17:54:08,235 topo_descriptors.helpers INFO Computed in 0:00:00 (HH:mm:ss) 184 | 2023-12-20 17:54:08,255 topo_descriptors.helpers INFO saved: out/topo_TPI_2000M.nc 185 | 2023-12-20 17:54:08,257 topo_descriptors.topo INFO ***Starting Sx computation for azimuth 0 meters and radius 200*** 186 | 2023-12-20 17:54:09,313 topo_descriptors.helpers INFO Computed in 0:00:01 (HH:mm:ss) 187 | 2023-12-20 17:54:09,326 topo_descriptors.helpers INFO saved: out/topo_SX_RADIUS200_AZIMUTH0.nc 188 | 2023-12-20 17:54:09,328 topo_descriptors.topo INFO ***Starting Sx computation for azimuth 0 meters and radius 2000*** 189 | 2023-12-20 17:54:15,750 topo_descriptors.helpers INFO Computed in 0:00:06 (HH:mm:ss) 190 | 2023-12-20 17:54:15,764 topo_descriptors.helpers INFO saved: out/topo_SX_RADIUS2000_AZIMUTH0.nc 191 | 192 | 193 | Above, the output was written directly to disk, while in the cell below we show how 194 | to easly import the results and visualize them using xarray. 195 | 196 | 197 | ```python 198 | ds = xr.open_mfdataset(str(output_dir / "topo_*.nc")) 199 | min_max = ds.quantile(q=[0.05, 0.95]) 200 | ds = (ds - min_max.isel(quantile=0)) / ( 201 | min_max.isel(quantile=1) - min_max.isel(quantile=0) 202 | ) 203 | ds.to_array().plot.imshow( 204 | col="variable", 205 | col_wrap=len(scales_meters), 206 | robust=True, 207 | add_colorbar=False, 208 | vmin=0, 209 | vmax=1, 210 | ) 211 | ds.close() 212 | ``` 213 | 214 | 215 | 216 | ![png](README_files/README_13_0.png) 217 | 218 | 219 | 220 | ## Build the README 221 | 222 | To use this Jupyter Notebook to compile the markdown's version for GitHub, first install 223 | the conda environment using the `environment.yaml` file: 224 | 225 | ```shell 226 | conda env create -f environment.yaml 227 | conda activate topo 228 | ``` 229 | 230 | Then generate the `README.md` by running: 231 | 232 | ```shell 233 | jupyter nbconvert --execute --to markdown README.ipynb 234 | ``` 235 | 236 | The associated figures are saved in the `README_files/` folder. 237 | -------------------------------------------------------------------------------- /topo_descriptors/topo.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import numpy as np 4 | import numpy.ma as ma 5 | import xarray as xr 6 | import dask.array as da 7 | from numba import njit, prange 8 | from scipy import ndimage, signal 9 | 10 | import topo_descriptors.helpers as hlp 11 | from topo_descriptors import CFG 12 | 13 | logger = logging.getLogger(__name__) 14 | 15 | 16 | def compute_dem(dem_ds, scales, ind_nans=[], crop=None, outdir="."): 17 | """Wrapper to 'dem' function to launch computations for all scales and save 18 | outputs as netCDF files. 19 | 20 | Parameters 21 | ---------- 22 | dem_ds : xarray Dataset representing the DEM and its grid coordinates. 23 | scales : scalar or list of scalars 24 | Scale(s) in meters on which we want to compute the DEM. 25 | Corresponds to the diameter of the kernel used to compute it. 26 | ind_nans (optional) : tuple of two 1D arrays 27 | Contains the (row, column) indices of the NaNs in the original DEM to be 28 | reassigned after computations. NaNs in the original DEM should be 29 | interpolated prior computations as they propagate in convolutions with 30 | the fast Fourier transform method (scipy.signal.convolve). 31 | crop (optional) : dict 32 | If specified the outputs are cropped to the given extend. Keys should be 33 | the coordinates labels of dem_ds and values should be slices of [min,max] 34 | extend. Default is None. 35 | outdir (optional) : string 36 | The path to the output directory. Save to working directory by default. 37 | 38 | See also 39 | -------- 40 | dem 41 | """ 42 | 43 | hlp.check_dem(dem_ds) 44 | logger.info(f"***Starting dem computation for scales {scales} meters***") 45 | if not hasattr(scales, "__iter__"): 46 | scales = [scales] 47 | 48 | scales_pxl, res_meters = hlp.scale_to_pixel(scales, dem_ds) 49 | sigmas = scales_pxl / CFG.scale_std 50 | dem_val = hlp.get_da(dem_ds).values 51 | units = "m" 52 | 53 | for idx, sigma in enumerate(sigmas): 54 | logger.info(f"Computing scale {scales[idx]} meters") 55 | name = _dem_name(scales[idx]) 56 | array = dem(dem_val, sigma) 57 | array[ind_nans] = np.nan 58 | hlp.to_netcdf(array, dem_ds, name, crop, outdir, units) 59 | del array 60 | 61 | 62 | def dem(dem, sigma): 63 | """Smoothen digital elevation model with a standard deviation of sigma 64 | 65 | Parameters 66 | ---------- 67 | dem : array representing the DEM. 68 | sigma (optional) : scalar 69 | If provided, the DEM is first smoothed with a gaussian filter of standard 70 | deviation sigma (in pixel size). 71 | 72 | Returns 73 | ------- 74 | array with smoothed dem 75 | 76 | See also 77 | -------- 78 | scipy.ndimage.gaussian_filter 79 | """ 80 | return ndimage.gaussian_filter(dem, sigma) 81 | 82 | 83 | def _dem_name(scale): 84 | """Return names for the arrays in output of the dem function""" 85 | return f"DEM_{scale}M" 86 | 87 | 88 | def compute_tpi(dem_ds, scales, smth_factors=None, ind_nans=[], crop=None, outdir="."): 89 | """Wrapper to 'tpi' function to launch computations for all scales and save 90 | outputs as netCDF files. 91 | 92 | Parameters 93 | ---------- 94 | dem_ds : xarray Dataset representing the DEM and its grid coordinates. 95 | scales : scalar or list of scalars 96 | Scale(s) in meters on which we want to compute the TPI. 97 | Corresponds to the diameter of the kernel used to compute it. 98 | smth_factors (optional) : scalar or None or list with a combination of both. 99 | Fraction(s) of the scale(s) at which the DEM is smoothed first (with a 100 | gaussian filter). If None (default), no prior smoothing is performed. 101 | If a scalar, the same fraction is used to determine the smoothing scale 102 | of all specified scales. If a list, must match the length of arg 'scales'. 103 | ind_nans (optional) : tuple of two 1D arrays 104 | Contains the (row, column) indices of the NaNs in the original DEM to be 105 | reassigned after computations. NaNs in the original DEM should be 106 | interpolated prior computations as they propagate in convolutions with 107 | the fast Fourier transform method (scipy.signal.convolve). 108 | crop (optional) : dict 109 | If specified the outputs are cropped to the given extend. Keys should be 110 | the coordinates labels of dem_ds and values should be slices of [min,max] 111 | extend. Default is None. 112 | outdir (optional) : string 113 | The path to the output directory. Save to working directory by default. 114 | 115 | See also 116 | -------- 117 | tpi, circular_kernel 118 | """ 119 | 120 | hlp.check_dem(dem_ds) 121 | logger.info(f"***Starting TPI computation for scales {scales} meters***") 122 | if not hasattr(scales, "__iter__"): 123 | scales = [scales] 124 | if not hasattr(smth_factors, "__iter__"): 125 | smth_factors = [smth_factors] * len(scales) 126 | 127 | scales_pxl, _ = hlp.scale_to_pixel(scales, dem_ds) 128 | sigmas = hlp.get_sigmas(smth_factors, scales_pxl) 129 | dem_val = hlp.get_da(dem_ds).values 130 | units = "m" 131 | 132 | for idx, scale_pxl in enumerate(scales_pxl): 133 | logger.info( 134 | f"Computing scale {scales[idx]} meters with smoothing factor" 135 | f" {smth_factors[idx]} ..." 136 | ) 137 | name = _tpi_name(scales[idx], smth_factors[idx]) 138 | array = tpi(dem_val, scale_pxl, sigma=sigmas[idx]) 139 | array[ind_nans] = np.nan 140 | hlp.to_netcdf(array, dem_ds, name, crop, outdir, units) 141 | del array 142 | 143 | 144 | @hlp.timer 145 | def tpi(dem, size, sigma=None): 146 | """Compute the TPI over a digital elevation model. The TPI represents 147 | the elevation difference of a pixel relative to its neighbors. 148 | 149 | Parameters 150 | ---------- 151 | dem : array representing the DEM. 152 | size : int 153 | Size of the kernel for the convolution. Represents the diameter (i.e. scale) 154 | in pixels at which the TPI is computed. 155 | sigma (optional) : scalar 156 | If provided, the DEM is first smoothed with a gaussian filter of standard 157 | deviation sigma (in pixel size). 158 | 159 | Returns 160 | ------- 161 | array with TPI values 162 | 163 | See also 164 | -------- 165 | scipy.signal.convolve, scipy.ndimage.gaussian_filter 166 | """ 167 | 168 | kernel = circular_kernel(size) 169 | # exclude mid point from the kernel 170 | kernel[int(size / 2), int(size / 2)] = 0 171 | 172 | if sigma: 173 | dem = ndimage.gaussian_filter(dem, sigma) 174 | 175 | conv_fn = lambda a: signal.convolve(a, kernel, mode="same") 176 | 177 | if isinstance(dem.data, da.Array): 178 | conv = da.map_overlap(conv_fn, dem.data, depth=size * 2, boundary="none") 179 | else: 180 | conv = conv_fn(dem) 181 | return dem - conv / np.sum(kernel) 182 | 183 | 184 | def _tpi_name(scale, smth_factor): 185 | """Return name for the array in output of the tpi function""" 186 | 187 | add = f"_SMTHFACT{smth_factor:.3g}" if smth_factor else "" 188 | return f"TPI_{scale}M{add}" 189 | 190 | 191 | def circular_kernel(size): 192 | """Generate a circular kernel. 193 | 194 | Parameters 195 | ---------- 196 | size : int 197 | Size of the circular kernel (its diameter). For size < 5, the kernel is 198 | a square instead of a circle. 199 | 200 | Returns 201 | ------- 202 | 2-D array with kernel values 203 | """ 204 | 205 | middle = int(size / 2) 206 | if size < 5: 207 | kernel = np.ones((size, size), dtype=np.float32) 208 | else: 209 | xx, yy = np.mgrid[:size, :size] 210 | circle = (xx - middle) ** 2 + (yy - middle) ** 2 211 | kernel = np.asarray(circle <= (middle**2), dtype=np.float32) 212 | 213 | return kernel 214 | 215 | 216 | def compute_std(dem_ds, scales, smth_factors=None, ind_nans=[], crop=None, outdir="."): 217 | """Wrapper to 'std' function to launch computations for all scales and save 218 | outputs as netCDF files. 219 | 220 | Parameters 221 | ---------- 222 | dem_ds : xarray Dataset representing the DEM and its grid coordinates. 223 | scales : scalar or list of scalars 224 | Scale(s) in meters on which we want to compute the TPI. 225 | Corresponds to the diameter of the kernel used to compute it. 226 | smth_factors (optional) : scalar or None or list with a combination of both. 227 | Fraction(s) of the scale(s) at which the DEM is smoothed first (with a 228 | gaussian filter). If None (default), no prior smoothing is performed. 229 | If a scalar, the same fraction is used to determine the smoothing scale 230 | of all specified scales. If a list, must match the length of arg 'scales'. 231 | ind_nans (optional) : tuple of two 1D arrays 232 | Contains the (row, column) indices of the NaNs in the original DEM to be 233 | reassigned after computations. NaNs in the original DEM should be 234 | interpolated prior computations as they propagate in convolutions with 235 | the fast Fourier transform method (scipy.signal.convolve). 236 | crop (optional) : dict 237 | If specified the outputs are cropped to the given extend. Keys should be 238 | the coordinates labels of dem_ds and values should be slices of [min,max] 239 | extend. Default is None. 240 | outdir (optional) : string 241 | The path to the output directory. Save to working directory by default. 242 | 243 | See also 244 | -------- 245 | std, circular_kernel 246 | """ 247 | 248 | hlp.check_dem(dem_ds) 249 | logger.info(f"***Starting STD computation for scales {scales} meters***") 250 | if not hasattr(scales, "__iter__"): 251 | scales = [scales] 252 | if not hasattr(smth_factors, "__iter__"): 253 | smth_factors = [smth_factors] * len(scales) 254 | 255 | scales_pxl, _ = hlp.scale_to_pixel(scales, dem_ds) 256 | sigmas = hlp.get_sigmas(smth_factors, scales_pxl) 257 | dem_val = hlp.get_da(dem_ds).values 258 | units = "m" 259 | 260 | for idx, scale_pxl in enumerate(scales_pxl): 261 | logger.info( 262 | f"Computing scale {scales[idx]} meters with smoothing factor" 263 | f" {smth_factors[idx]} ..." 264 | ) 265 | name = _std_name(scales[idx], smth_factors[idx]) 266 | array = std(dem_val, scale_pxl, sigma=sigmas[idx]) 267 | array[ind_nans] = np.nan 268 | hlp.to_netcdf(array, dem_ds, name, crop, outdir, units) 269 | del array 270 | 271 | 272 | @hlp.timer 273 | def std(dem, size, sigma=None): 274 | """Compute the standard deviation over a digital elevation model within 275 | a rolling window. 276 | 277 | Parameters 278 | ---------- 279 | dem : array representing the DEM. 280 | size : int 281 | Size of the kernel for the convolution. Represents the diameter (i.e. scale) 282 | in pixels at which the std is computed. 283 | sigma (optional) : scalar 284 | If provided, the DEM is first smoothed with a gaussian filter of standard 285 | deviation sigma (in pixel size). 286 | 287 | Returns 288 | ------- 289 | array with local standard deviation values 290 | 291 | See also 292 | -------- 293 | scipy.signal.convolve, scipy.ndimage.gaussian_filter 294 | """ 295 | kernel = circular_kernel(size) 296 | kernel_sum = np.sum(kernel) 297 | if sigma: 298 | dem = ndimage.gaussian_filter(dem, sigma) 299 | 300 | squared_dem = dem.astype("int32") ** 2 301 | sum_dem = signal.convolve(dem, kernel, mode="same") 302 | sum_squared_dem = signal.convolve(squared_dem, kernel, mode="same") 303 | 304 | variance = (sum_squared_dem - sum_dem**2 / kernel_sum) / (kernel_sum - 1) 305 | variance = np.clip(variance, 0, None) # avoid small negative values 306 | 307 | return np.sqrt(variance) 308 | 309 | 310 | def _std_name(scale, smth_factor): 311 | """Return name for the array in output of the tpi function""" 312 | 313 | add = f"_SMTHFACT{smth_factor:.3g}" if smth_factor else "" 314 | return f"STD_{scale}M{add}" 315 | 316 | 317 | def compute_valley_ridge( 318 | dem_ds, 319 | scales, 320 | mode, 321 | flat_list=[0, 0.15, 0.3], 322 | smth_factors=None, 323 | ind_nans=[], 324 | crop=None, 325 | outdir=".", 326 | ): 327 | """Wrapper to 'valley_ridge' function to launch computations for all scales 328 | and save outputs as netCDF files. 329 | 330 | Parameters 331 | ---------- 332 | dem_ds : xarray Dataset representing the DEM and its grid coordinates. 333 | scales : scalar or list of scalars 334 | Scale(s) in meters over which we want to compute the valley or the ridge 335 | index. Corresponds to the size of the squared kernel used to compute it. 336 | mode : {valley, ridge} 337 | Whether to compute the valley or ridge index. 338 | flat_list (optional) : list of floats in [0,1[ 339 | Fractions of flat along the center line of the V-shape kernels. A certain 340 | amount of flat is use to approximate the shape of glacial valleys. 341 | Default is [0, 0.15, 0.3]. 342 | smth_factors (optional) : scalar or None or list with a combination of both. 343 | Fraction(s) of the scale(s) at which the DEM is smoothed first (with a 344 | gaussian filter). If None (default), no prior smoothing is performed. 345 | If a scalar, the same fraction is used to determine the smoothing scale 346 | of all specified scales. If a list, must match the length of arg 'scales'. 347 | ind_nans (optional) : tuple of two 1D arrays 348 | Contains the (row, column) indices of the NaNs in the original DEM to be 349 | reassigned after computations. NaNs in the original DEM should be 350 | interpolated prior computations as they propagate in convolutions with 351 | the fast Fourier transform method (scipy.signal.convolve). 352 | crop (optional) : dict 353 | If specified the outputs are cropped to the given extend. Keys should be 354 | the coordinates labels of dem_ds and values should be slices of [min,max] 355 | extend. Default is None. 356 | outdir (optional) : string 357 | The path to the output directory. Save to working directory by default. 358 | 359 | See also 360 | -------- 361 | valley_ridge, _valley_kernels, _valley_ridge_names 362 | """ 363 | 364 | hlp.check_dem(dem_ds) 365 | logger.info(f"***Starting {mode} index computation for scales {scales} meters***") 366 | if not hasattr(scales, "__iter__"): 367 | scales = [scales] 368 | if not hasattr(smth_factors, "__iter__"): 369 | smth_factors = [smth_factors] * len(scales) 370 | 371 | scales_pxl, _ = hlp.scale_to_pixel(scales, dem_ds) 372 | sigmas = hlp.get_sigmas(smth_factors, scales_pxl) 373 | dem_val = hlp.get_da(dem_ds).values 374 | units = "1" 375 | 376 | for idx, scale_pxl in enumerate(scales_pxl): 377 | logger.info( 378 | f"Computing scale {scales[idx]} meters with smoothing factor" 379 | f" {smth_factors[idx]} ..." 380 | ) 381 | names = _valley_ridge_names(scales[idx], mode, smth_factors[idx]) 382 | arrays = valley_ridge(dem_val, scale_pxl, mode, flat_list, sigmas[idx]) 383 | 384 | for array, name in zip(arrays, names): 385 | array[ind_nans] = np.nan 386 | hlp.to_netcdf(array, dem_ds, name, crop, outdir, units) 387 | 388 | 389 | @hlp.timer 390 | def valley_ridge(dem, size, mode, flat_list=[0, 0.15, 0.3], sigma=None): 391 | """Compute the valley or ridge index over a digital elevation model. 392 | The valley/ridge index highlights valley or ridges at various scales. 393 | 394 | Parameters 395 | ---------- 396 | dem : array representing the DEM. 397 | size : int 398 | Size of the kernel for the convolution. Represents the width (i.e. scale) 399 | in pixels of the valleys we are trying to highlight. 400 | mode : {valley, ridge} 401 | Whether to compute the valley or ridge index. 402 | flat_list (optional) : list of floats in [0,1[ 403 | Fractions of flat along the center line of the V-shape kernels. A certain 404 | amount of flat is use to approximate the shape of glacial valleys. 405 | Default is [0, 0.15, 0.3]. 406 | sigma (optional) : scalar 407 | If provided, the DEM is first smoothed with a gaussian filter of standard 408 | deviation sigma (in pixel size). 409 | 410 | Returns 411 | ------- 412 | list of two arrays : 413 | First element is the norm and second the direction of the valley or ridge 414 | index. The direction in degrees is defined from 0° to 179°, increasing 415 | clockwise. W-E oriented valleys have a direction close to 0° or 180°, 416 | while S-N oriented valleys have a direction close to 90°. 417 | 418 | See also 419 | -------- 420 | scipy.signal.convolve, scipy.ndimage.gaussian_filter 421 | """ 422 | 423 | if mode not in ("valley", "ridge"): 424 | raise ValueError(f"Unknown mode {mode!r}") 425 | 426 | if sigma: 427 | dem = ndimage.gaussian_filter(dem, sigma) 428 | 429 | dem = (dem - dem.mean()) / dem.std() 430 | n_y, n_x = dem.shape 431 | dem = np.broadcast_to(dem, (len(flat_list), n_y, n_x)) 432 | angles = np.arange(0, 180, dtype=np.float32) 433 | index_norm = np.zeros((n_y, n_x), dtype=np.float32) - np.inf 434 | index_dir = np.empty((n_y, n_x), dtype=np.float32) 435 | 436 | if mode == "ridge": 437 | kernels = _ridge_kernels(size, flat_list) 438 | else: 439 | kernels = _valley_kernels(size, flat_list) 440 | 441 | for angle in angles: # 0° = E-W valleys, 90° = S-N valleys 442 | kernels_rot = _rotate_kernels(kernels, angle) 443 | conv = signal.convolve(dem, kernels_rot, mode="same") 444 | conv = np.max(conv, axis=0) 445 | bool_greater = conv > index_norm 446 | index_norm[bool_greater] = conv[bool_greater] 447 | index_dir[bool_greater] = angle 448 | del bool_greater 449 | if angle % 20 == 0: 450 | logger.info(f"angle {int(angle)}/180 finished") 451 | 452 | index_norm = np.ndarray.clip(index_norm, min=0) 453 | return [index_norm, index_dir] 454 | 455 | 456 | def _valley_ridge_names(scale, mode, smth_factor): 457 | """Return names for the arrays in output of the valley_ridge function""" 458 | 459 | add = f"_SMTHFACT{smth_factor:.3g}" if smth_factor else "" 460 | name_norm = f"{mode}_NORM_{scale}M{add}" 461 | name_dir = f"{mode}_DIR_{scale}M{add}" 462 | 463 | return [name_norm, name_dir] 464 | 465 | 466 | def _valley_kernels(size, flat_list): 467 | """Generate normalized V-shape and U-shape kernels to compute valley index. 468 | 469 | Parameters 470 | ---------- 471 | size : int 472 | Size of the kernel. 473 | flat_list : list of floats in [0,1[ 474 | Fractions of flat along the center line of the V-shape kernels. A certain 475 | amount of flat is use to approximate the shape of glacial valleys. 476 | 477 | Returns 478 | ------- 479 | 3-D array with 2-D kernels for each specified flat fraction. 480 | """ 481 | 482 | middle = int(np.floor(size / 2)) 483 | kernel_tmp = np.broadcast_to(np.arange(0, middle + 1), (size, middle + 1)).T 484 | kernel_tmp = np.concatenate( 485 | (np.flip(kernel_tmp[1:, :], axis=0), kernel_tmp), axis=0 486 | ) 487 | kernel_tmp = np.asarray(kernel_tmp, dtype=np.float32) 488 | kernels = np.broadcast_to(kernel_tmp, (len(flat_list), size, size)).copy() 489 | 490 | for ind, flat in enumerate(flat_list): 491 | halfwidth = int(np.floor(np.floor(size * flat / 2) + 0.5)) 492 | kernels[ind, middle - halfwidth : middle + halfwidth + 1, :] = kernels[ 493 | ind, middle - halfwidth, 0 494 | ] 495 | kernels = (kernels - np.mean(kernels, axis=(1, 2), keepdims=True)) / np.std( 496 | kernels, axis=(1, 2), keepdims=True 497 | ) 498 | 499 | return kernels 500 | 501 | 502 | def _ridge_kernels(size, flat_list): 503 | """Generate normalized flipped V-shape and U-shape kernels to compute ridge index. 504 | 505 | Parameters 506 | ---------- 507 | size : int 508 | Size of the kernel. 509 | flat_list : list of floats in [0,1[ 510 | Fractions of flat along the center line of the V-shape kernels. A certain 511 | amount of flat is use to approximate the shape of glacial valleys. 512 | 513 | Returns 514 | ------- 515 | 3-D array with 2-D kernels for each specified flat fraction. 516 | """ 517 | 518 | return _valley_kernels(size, flat_list) * -1 519 | 520 | 521 | def _rotate_kernels(kernel, angle): 522 | """Rotate a 3-D kernel in the plane given by the last two axes""" 523 | 524 | kernels_rot = ndimage.rotate( 525 | kernel, angle, axes=(1, 2), reshape=True, order=2, mode="constant", cval=-9999 526 | ) 527 | kernels_rot = ma.masked_array(kernels_rot, mask=kernels_rot == -9999) 528 | kernels_rot = ( 529 | kernels_rot - np.mean(kernels_rot, axis=(1, 2), keepdims=True) 530 | ) / np.std(kernels_rot, axis=(1, 2), keepdims=True) 531 | return ma.MaskedArray.filled(kernels_rot, 0).astype(np.float32) 532 | 533 | 534 | def compute_gradient(dem_ds, scales, sig_ratios=1, ind_nans=[], crop=None, outdir="."): 535 | """Wrapper to 'gradient' function to launch computations for all scales 536 | and save outputs as netCDF files. 537 | 538 | Parameters 539 | ---------- 540 | dem_ds : xarray Dataset representing the DEM and its grid coordinates. 541 | scales : scalar or list of scalars 542 | Scale(s) in meters on which we want to compute the the valley or ridge 543 | index. Corresponds to the size of the squared kernel used to compute it. 544 | sig_ratios (optional) : scalar or list of scalars. 545 | Ratios w.r.t scales to define the smoothing scale in the perpendicular 546 | direction of the directional derivatives. If a list, must match the length 547 | of arg 'scales'. Default is 1 (i.e. same smoothing on both directions for 548 | all scales) 549 | ind_nans (optional) : tuple of two 1D arrays 550 | Contains the (row, column) indices of the NaNs in the original DEM to be 551 | reassigned after computations. NaNs in the original DEM should be 552 | interpolated prior computations as they propagate in convolutions. 553 | crop (optional) : dict 554 | If specified the outputs are cropped to the given extend. Keys should be 555 | the coordinates labels of dem_ds and values should be slices of [min,max] 556 | extend. Default is None. 557 | outdir (optional) : string 558 | The path to the output directory. Save to working directory by default. 559 | 560 | See also 561 | -------- 562 | gradient, sobel, _gradient_names 563 | """ 564 | 565 | hlp.check_dem(dem_ds) 566 | logger.info(f"***Starting gradients computation for scales {scales} meters***") 567 | if not hasattr(scales, "__iter__"): 568 | scales = [scales] 569 | if not hasattr(sig_ratios, "__iter__"): 570 | sig_ratios = [sig_ratios] * len(scales) 571 | 572 | scales_pxl, res_meters = hlp.scale_to_pixel(scales, dem_ds) 573 | sigmas = scales_pxl / CFG.scale_std 574 | dem_val = hlp.get_da(dem_ds).values 575 | all_units = ["1", "1", "degree", "degree"] 576 | 577 | for idx, sigma in enumerate(sigmas): 578 | logger.info( 579 | f"Computing scale {scales[idx]} meters with sigma ratio " 580 | f"{sig_ratios[idx]} ..." 581 | ) 582 | names = _gradient_names(scales[idx], sig_ratios[idx]) 583 | arrays = gradient( 584 | dem_val, 585 | sigma, 586 | res_meters, 587 | sig_ratio=sig_ratios[idx], 588 | ) 589 | 590 | for array, name, units in zip(arrays, names, all_units): 591 | array[ind_nans] = np.nan 592 | hlp.to_netcdf(array, dem_ds, name, crop, outdir, units) 593 | 594 | del arrays 595 | 596 | 597 | @hlp.timer 598 | def gradient(dem, sigma, res_meters, sig_ratio=1): 599 | """Compute directional derivatives, slope and aspect over a digital elevation 600 | model. 601 | 602 | Parameters 603 | ---------- 604 | dem : array representing the DEM. 605 | sigma : scalar 606 | Standard deviation for the gaussian filters. This is set at 1/4 of the 607 | scales to which topo descriptors are computed (i.e. scale = 4*sigma) 608 | res_meters : dict with two 1-D or 2-D arrays 609 | Resolution in meters of each DEM grid points in the x and y directions. 610 | This is the second element returned by the function hlp.scale_to_pixel. 611 | sig_ratio (optional) : scalar 612 | Ratio w.r.t sigma to define the standard deviation of the gaussian in 613 | the direction perpendicular to the derivative. Default is 1 (i.e. same 614 | smoothing on both directions). 615 | 616 | Returns 617 | ------- 618 | list of four arrays : 619 | First element is the W-E derivative, second the S-N derivative, third 620 | the slope (i.e. magnitude of the gradient) and fourth the aspect (i.e. 621 | direction of the gradient). 622 | 623 | See also 624 | -------- 625 | scipy.ndimage.gaussian_filter, np.gradient, topo_helpers.scale_to_pixel 626 | """ 627 | 628 | if sigma <= 1: 629 | dx, dy = sobel(dem) # for lowest scale, use sobel filter instead 630 | elif sig_ratio == 1: # faster solution when sig_ratio is 1 631 | dy, dx = np.gradient(ndimage.gaussian_filter(dem, sigma)) 632 | else: 633 | sigma_perp = sigma * sig_ratio 634 | dx = np.gradient(ndimage.gaussian_filter(dem, (sigma_perp, sigma)), axis=1) 635 | dy = np.gradient(ndimage.gaussian_filter(dem, (sigma, sigma_perp)), axis=0) 636 | 637 | _normalize_dxy(dx, dy, res_meters) 638 | 639 | slope = np.arctan(np.sqrt(dx**2 + dy**2)) * (180 / np.pi) # in degrees 640 | aspect = ( 641 | 180 + np.degrees(np.arctan2(dx, dy)) 642 | ) % 360 # north faces = 0°, east faces = 90° 643 | 644 | return [dx, dy, slope, aspect] 645 | 646 | 647 | def _gradient_names(scale, sig_ratio): 648 | """Return names for the arrays in output of the gradient function""" 649 | 650 | name_dx = f"WE_DERIVATIVE_{scale}M_SIGRATIO{sig_ratio:.3g}" 651 | name_dy = f"SN_DERIVATIVE_{scale}M_SIGRATIO{sig_ratio:.3g}" 652 | name_slope = f"SLOPE_{scale}M_SIGRATIO{sig_ratio:.3g}" 653 | name_aspect = f"ASPECT_{scale}M_SIGRATIO{sig_ratio:.3g}" 654 | 655 | return [name_dx, name_dy, name_slope, name_aspect] 656 | 657 | 658 | def sobel(dem): 659 | """Compute directional derivatives, based on the sobel filter. The sobel is 660 | a combination of a derivative and a smoothing filter. It is defined over a 661 | 3x3 kernel. 662 | 663 | Parameters 664 | ---------- 665 | dem : array representing the DEM. 666 | 667 | Returns 668 | ------- 669 | dx : 2-D array 670 | Derivative in the x direction (i.e. along second axis) 671 | dy : 2-D array 672 | Derivative in the y direction (i.e. along first axis) 673 | 674 | See also 675 | -------- 676 | ndimage.convolve 677 | """ 678 | 679 | sobel_kernel = np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]], dtype=np.float32) 680 | 681 | sobel_kernel /= np.sum(np.abs(sobel_kernel)) 682 | dx = ndimage.convolve(dem, sobel_kernel) 683 | dy = ndimage.convolve(dem, sobel_kernel.T) 684 | 685 | return dx, dy 686 | 687 | 688 | def _normalize_dxy(dx, dy, res_meters): 689 | """Normalize directional derivatives, based on (projected) grid resolution. 690 | This converts from units of "m / pixel" to "1". Normalization occurs 'in place'. 691 | 692 | Parameters 693 | ---------- 694 | dx : 2-D array 695 | Derivative in the x direction (i.e. along second axis) 696 | dy : 2-D array 697 | Derivative in the y direction (i.e. along first axis) 698 | res_meters : dict with two 1-D or 2-D arrays 699 | Resolution in meters of each DEM grid points in the x and y directions. 700 | This is the second element returned by the function hlp.scale_to_pixel. 701 | 702 | See also 703 | -------- 704 | topo_helpers.scale_to_pixel 705 | """ 706 | 707 | y_res = res_meters["y"] 708 | if len(y_res.shape) == 1: 709 | y_res = y_res[:, np.newaxis] 710 | 711 | dx /= res_meters["x"] 712 | dy /= y_res 713 | 714 | 715 | def compute_sx( 716 | dem_ds, 717 | azimuth, 718 | radius, 719 | height=10.0, 720 | azimuth_arc=10.0, 721 | azimuth_steps=15, 722 | radius_min=0.0, 723 | crop=None, 724 | outdir=".", 725 | ): 726 | """Wrapper to 'Sx' function to launch computations and save 727 | outputs as netCDF files. 728 | 729 | Parameters 730 | ---------- 731 | dem_ds : xarray Dataset representing the DEM and its grid coordinates. 732 | azimuth : scalar 733 | Azimuth angle in degrees for imaginary lines. 734 | radius : scalar 735 | Maximum distance in meters for the imaginary lines. 736 | azimuth_arc (optional): scalar 737 | Angle of the circular sector centered around 'azimuth'. 738 | azimuth_steps (optional): 739 | Number of lines traced to find pixels within the circular sector. 740 | A higher number leads to more precise but longer computations. 741 | radius_min (optional): scalar 742 | Minimum value of radius below which pixels are excluded from imaginary lines. 743 | height (optional): scalar 744 | Parameter that accounts for instrument heights and 745 | reduce the impact of small proximal terrain perturbations. 746 | crop (optional) : dict 747 | If specified the outputs are cropped to the given extend. Keys should be 748 | the coordinates labels of dem_ds and values should be slices of [min,max] 749 | extend. Default is None. 750 | 751 | See also 752 | -------- 753 | sx, _sx_distance, _sx_source_idx_delta, _sx_bresenhamlines, _sx_rolling 754 | """ 755 | hlp.check_dem(dem_ds) 756 | logger.info( 757 | f"***Starting Sx computation for azimuth {azimuth} meters and radius {radius}***" 758 | ) 759 | 760 | array = sx( 761 | dem_ds, 762 | azimuth, 763 | radius, 764 | height=height, 765 | azimuth_arc=azimuth_arc, 766 | azimuth_steps=azimuth_steps, 767 | radius_min=radius_min, 768 | ) 769 | 770 | units = "degree" 771 | name = _sx_name(radius, azimuth) 772 | hlp.to_netcdf(array, dem_ds, name, crop, outdir, units) 773 | 774 | 775 | @hlp.timer 776 | def sx( 777 | dem_ds, 778 | azimuth, 779 | radius, 780 | height=10.0, 781 | azimuth_arc=10.0, 782 | azimuth_steps=15, 783 | radius_min=0.0, 784 | ): 785 | """Compute the Sx over a digital elevation model. 786 | 787 | The Sx represents the maximum slope among all imaginary lines connecting a 788 | given pixel with all the ones lying in a specific direction and within a 789 | specified distance (Winstral et al., 2017). The Sx is a proven wind-specific 790 | terrain parameterization, as it is able to differentiate the slopes based 791 | on given wind directions and identify sheltered and exposed locations with 792 | respect to the incoming wind. 793 | Note that this routine computes one azimuth at a time. It is accelerated with 794 | Numba's Just In Time compilation, but is still computationally expensive. 795 | 796 | Parameters 797 | ---------- 798 | dem_ds : xarray Dataset representing the DEM and its grid coordinates. 799 | azimuth : scalar 800 | Azimuth angle in degrees for imaginary lines. 801 | radius : scalar 802 | Maximum distance in meters for the imaginary lines. 803 | azimuth_arc (optional): scalar 804 | Angle of the circular sector centered around 'azimuth'. 805 | Set to zero in order to draw a single line. 806 | azimuth_steps (optional): scalar integer 807 | Number of lines traced to find pixels within the circular sector. 808 | A higher number leads to more precise but longer computations. 809 | Defaults to 1 when 'azimuth_arc' is 0. 810 | radius_min (optional): scalar 811 | Minimum value of radius below which pixels are excluded from imaginary lines. 812 | height (optional): scalar 813 | Parameter that accounts for instrument heights and 814 | reduce the impact of small proximal terrain perturbations. 815 | 816 | Returns 817 | ------- 818 | array with Sx values for one azimuth 819 | 820 | See also 821 | -------- 822 | _sx_distance, _sx_source_idx_delta, _sx_bresenhamlines, _sx_rolling 823 | """ 824 | 825 | if not isinstance(dem_ds, xr.Dataset): 826 | raise TypeError("Argument 'dem_ds' must be a xr.Dataset.") 827 | 828 | if azimuth_arc == 0: 829 | azimuth_steps = 1 830 | 831 | # define all azimuths 832 | azimuths = np.linspace( 833 | azimuth - azimuth_arc / 2, azimuth + azimuth_arc / 2, azimuth_steps 834 | ) 835 | 836 | # grid resolutions 837 | _, res_meters = hlp.scale_to_pixel(radius, dem_ds) 838 | dx = res_meters["x"].mean() 839 | dy = res_meters["y"].mean() 840 | 841 | # horizontal distance in meters from center in a window of size 2*radius 842 | window_distance = _sx_distance(radius, dx, dy) 843 | 844 | # exclude pixels closer than radius_min 845 | window_distance[window_distance < radius_min] = np.nan 846 | 847 | # indices of pixels that lie at distance radius in direction azimuth 848 | window_center = np.floor(np.array(window_distance.shape) / 2) 849 | source_delta = _sx_source_idx_delta(azimuths, radius, dx, dy) 850 | source = (window_center + source_delta).astype(int) 851 | 852 | # indices of all pixels between source pixels and target (center) 853 | lines_indices = _sx_bresenhamlines(source, window_center) 854 | 855 | # compute Sx 856 | sx = _sx_rolling(hlp.get_da(dem_ds).values, window_distance, lines_indices, height) 857 | 858 | return sx 859 | 860 | 861 | def _sx_distance(radius, dx, dy): 862 | """Compute distance from center in meters in a window of size 'radius'.""" 863 | 864 | dx_abs = np.abs(dx) 865 | dy_abs = np.abs(dy) 866 | radius_pxl = max(radius / dy_abs, radius / dx_abs) 867 | 868 | # initialize window 869 | window = 2 * radius_pxl + 1 # must be odd 870 | center = np.floor(window / 2) 871 | x = np.arange(window) 872 | y = np.arange(window) 873 | x, y = np.meshgrid(x, y) 874 | 875 | # calculate distances from center for all points in the window 876 | distances = np.sqrt((((y - center) * dy) ** 2) + ((x - center) * dx) ** 2) 877 | 878 | return distances 879 | 880 | 881 | def _sx_source_idx_delta(azimuths, radius, dx, dy): 882 | """Compute indices of pixels that lie at a distance 'radius' from 883 | the target, in the direction of 'azimuths'. 884 | """ 885 | 886 | azimuths_rad = np.deg2rad(azimuths) 887 | delta_y_idx = np.rint(radius / dy * np.cos(azimuths_rad)) 888 | delta_x_idx = np.rint(radius / dx * np.sin(azimuths_rad)) 889 | 890 | delta = np.column_stack([delta_y_idx, delta_x_idx]) 891 | 892 | return delta.astype(np.int64) 893 | 894 | 895 | def _sx_bresenhamlines(start, end): 896 | """Compute indices of all pixels that lie between two sets of pixels.""" 897 | 898 | max_iter = np.max(np.max(np.abs(end - start), axis=1)) 899 | npts, dim = start.shape 900 | 901 | slope = end - start 902 | scale = np.max(np.abs(slope), axis=1).reshape(-1, 1) 903 | zeroslope = (scale == 0).all(1) 904 | scale[zeroslope] = np.ones(1) 905 | normalizedslope = np.array(slope, dtype=np.double) / scale 906 | normalizedslope[zeroslope] = np.zeros(slope[0].shape) 907 | 908 | # steps to iterate on 909 | stepseq = np.arange(1, max_iter + 1) 910 | stepmat = np.tile(stepseq, (dim, 1)).T 911 | 912 | # some hacks for broadcasting properly 913 | blines = start[:, np.newaxis, :] + normalizedslope[:, np.newaxis, :] * stepmat 914 | 915 | # Approximate to nearest int 916 | blines = np.array(np.rint(blines), dtype=start.dtype) 917 | 918 | # Stop lines before center 919 | bsum = np.abs(blines - end).sum(axis=2) 920 | mask = np.diff(bsum, prepend=bsum[:, 0:1]) <= 0 921 | blines = blines[mask].reshape(-1, start.shape[-1]) 922 | mask = np.all(blines == end, axis=1) 923 | blines = blines[~mask] 924 | 925 | return blines 926 | 927 | 928 | @njit(parallel=True) 929 | def _sx_rolling(dem, distance, blines, height): 930 | """Compute Sx values for the array with a loop over all elements.""" 931 | 932 | window = int(distance.shape[0] / 2) 933 | ny, nx = dem.shape 934 | distance = np.array( 935 | [distance[j, i] for j, i in list(zip(blines[:, 0], blines[:, 1]))] 936 | ) 937 | blines_centered = blines - window 938 | 939 | sx = np.zeros_like(dem) 940 | for j in prange(window, ny - window): 941 | for i in prange(window, nx - window): 942 | j_blines = j + blines_centered[:, 0] 943 | i_blines = i + blines_centered[:, 1] 944 | dem_blines = np.array([dem[j, i] for j, i in list(zip(j_blines, i_blines))]) 945 | 946 | # compute tangent z / distance between P0 and all points 947 | z = dem_blines - (dem[j, i] + height) 948 | elev_angle = np.rad2deg(np.arctan(z / distance)) 949 | 950 | # find the maximum angle in the cone 951 | sx[j, i] = np.nanmax(elev_angle) 952 | 953 | return sx 954 | 955 | 956 | def _sx_name(radius, azimuth): 957 | """Return name for the array in output of the Sx function""" 958 | 959 | add = f"_RADIUS{int(radius)}_AZIMUTH{int(azimuth)}" 960 | return f"SX{add}" 961 | --------------------------------------------------------------------------------