├── .github └── workflows │ ├── python-publish.yml │ └── python-tests.yml ├── .gitignore ├── .readthedocs.yaml ├── LICENSE ├── README.md ├── demo ├── getting_started.ipynb ├── las_623_5718_1_th_2014-2019.lax ├── netzkater_polygons.gpkg └── plotwise_metrics.ipynb ├── docs ├── .gitkeep ├── Makefile ├── make.bat ├── requirements.txt └── source │ ├── conf.py │ ├── index.rst │ ├── metricCalculators.rst │ ├── metrix.rst │ └── readme.rst ├── environment.yml ├── pyproject.toml ├── src └── pyForMetrix │ ├── __init__.py │ ├── __pycache__ │ ├── __init__.cpython-310.pyc │ ├── __init__.cpython-39.pyc │ ├── metrix.cpython-310.pyc │ └── rasterizer.cpython-310.pyc │ ├── metricCalculators │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-310.pyc │ │ ├── publications.cpython-310.pyc │ │ └── types.cpython-310.pyc │ ├── lidRmetrics │ │ ├── HOME.py │ │ ├── Lmoments.py │ │ ├── __init__.py │ │ ├── basic.py │ │ ├── canopydensity.py │ │ ├── dispersion.py │ │ ├── echo.py │ │ ├── interval.py │ │ ├── kde.py │ │ ├── lad.py │ │ ├── percabove.py │ │ ├── percentiles.py │ │ ├── rumple.py │ │ └── voxels.py │ ├── publications.py │ └── types.py │ ├── metrix.py │ ├── normalizer.py │ └── utils │ ├── __init__.py │ ├── rasterizer.py │ └── voxelizer.py └── tests ├── .gitkeep ├── requirements.txt ├── test_plot_metrics.py └── test_raster_metrics.py /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | # This workflow will upload a Python Package using Twine when a release is created 2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries 3 | 4 | # This workflow uses actions that are not certified by GitHub. 5 | # They are provided by a third-party and are governed by 6 | # separate terms of service, privacy policy, and support 7 | # documentation. 8 | 9 | name: Upload Python Package 10 | 11 | on: 12 | release: 13 | types: [published] 14 | workflow_dispatch: 15 | 16 | permissions: 17 | contents: read 18 | 19 | jobs: 20 | deploy: 21 | 22 | runs-on: ubuntu-latest 23 | 24 | steps: 25 | - uses: actions/checkout@v3 26 | - name: Set up Python 27 | uses: actions/setup-python@v3 28 | with: 29 | python-version: '3.x' 30 | - name: Install dependencies 31 | run: | 32 | python -m pip install --upgrade pip 33 | pip install build 34 | - name: Build package 35 | run: python -m build 36 | - name: Publish package 37 | uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29 38 | with: 39 | user: __token__ 40 | password: ${{ secrets.PYPI_API_TOKEN }} 41 | -------------------------------------------------------------------------------- /.github/workflows/python-tests.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions 2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python 3 | 4 | name: Python tests 5 | 6 | on: 7 | push: 8 | branches: [ "main" ] 9 | pull_request: 10 | branches: [ "main" ] 11 | workflow_dispatch: 12 | 13 | jobs: 14 | build: 15 | 16 | runs-on: ubuntu-latest 17 | strategy: 18 | fail-fast: false 19 | matrix: 20 | python-version: ["3.8", "3.9", "3.10"] 21 | 22 | steps: 23 | - uses: actions/checkout@v3 24 | - name: Set up Python ${{ matrix.python-version }} 25 | uses: actions/setup-python@v3 26 | with: 27 | python-version: ${{ matrix.python-version }} 28 | - name: Install dependencies 29 | run: | 30 | python -m pip install --upgrade pip 31 | python -m pip install flake8 pytest 32 | python -m pip install -r tests/requirements.txt 33 | - name: Lint with flake8 34 | run: | 35 | # stop the build if there are Python syntax errors or undefined names 36 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics 37 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide 38 | flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics 39 | - name: Install package 40 | run: | 41 | python -m pip install . 42 | - name: Test with pytest 43 | run: | 44 | python -m pytest tests 45 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /dist/ 2 | /demo/*.tif 3 | /demo/*.las 4 | /demo/*.lasx 5 | /demo/*.prj 6 | /demo/*.zip 7 | 8 | # Byte-compiled / optimized / DLL files 9 | __pycache__/ 10 | *.py[cod] 11 | *$py.class 12 | 13 | # C extensions 14 | *.so 15 | 16 | # Distribution / packaging 17 | .Python 18 | build/ 19 | develop-eggs/ 20 | dist/ 21 | downloads/ 22 | eggs/ 23 | .eggs/ 24 | lib/ 25 | lib64/ 26 | parts/ 27 | sdist/ 28 | var/ 29 | wheels/ 30 | share/python-wheels/ 31 | *.egg-info/ 32 | .installed.cfg 33 | *.egg 34 | MANIFEST 35 | 36 | # PyInstaller 37 | # Usually these files are written by a python script from a template 38 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 39 | *.manifest 40 | *.spec 41 | 42 | # Installer logs 43 | pip-log.txt 44 | pip-delete-this-directory.txt 45 | 46 | # Unit test / coverage reports 47 | htmlcov/ 48 | .tox/ 49 | .nox/ 50 | .coverage 51 | .coverage.* 52 | .cache 53 | nosetests.xml 54 | coverage.xml 55 | *.cover 56 | *.py,cover 57 | .hypothesis/ 58 | .pytest_cache/ 59 | cover/ 60 | 61 | # Translations 62 | *.mo 63 | *.pot 64 | 65 | # Django stuff: 66 | *.log 67 | local_settings.py 68 | db.sqlite3 69 | db.sqlite3-journal 70 | 71 | # Flask stuff: 72 | instance/ 73 | .webassets-cache 74 | 75 | # Scrapy stuff: 76 | .scrapy 77 | 78 | # Sphinx documentation 79 | docs/_build/ 80 | 81 | # PyBuilder 82 | .pybuilder/ 83 | target/ 84 | 85 | # Jupyter Notebook 86 | .ipynb_checkpoints 87 | 88 | # IPython 89 | profile_default/ 90 | ipython_config.py 91 | 92 | # pyenv 93 | # For a library or package, you might want to ignore these files since the code is 94 | # intended to run in multiple environments; otherwise, check them in: 95 | # .python-version 96 | 97 | # pipenv 98 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 99 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 100 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 101 | # install all needed dependencies. 102 | #Pipfile.lock 103 | 104 | # poetry 105 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 106 | # This is especially recommended for binary packages to ensure reproducibility, and is more 107 | # commonly ignored for libraries. 108 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 109 | #poetry.lock 110 | 111 | # pdm 112 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 113 | #pdm.lock 114 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 115 | # in version control. 116 | # https://pdm.fming.dev/#use-with-ide 117 | .pdm.toml 118 | 119 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 120 | __pypackages__/ 121 | 122 | # Celery stuff 123 | celerybeat-schedule 124 | celerybeat.pid 125 | 126 | # SageMath parsed files 127 | *.sage.py 128 | 129 | # Environments 130 | .env 131 | .venv 132 | env/ 133 | venv/ 134 | ENV/ 135 | env.bak/ 136 | venv.bak/ 137 | 138 | # Spyder project settings 139 | .spyderproject 140 | .spyproject 141 | 142 | # Rope project settings 143 | .ropeproject 144 | 145 | # mkdocs documentation 146 | /site 147 | 148 | # mypy 149 | .mypy_cache/ 150 | .dmypy.json 151 | dmypy.json 152 | 153 | # Pyre type checker 154 | .pyre/ 155 | 156 | # pytype static type analyzer 157 | .pytype/ 158 | 159 | # Cython debug symbols 160 | cython_debug/ 161 | 162 | # PyCharm 163 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 164 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 165 | # and can be added to the global gitignore or merged into this file. 166 | 167 | 168 | # User-specific stuff 169 | .idea/**/workspace.xml 170 | .idea/**/tasks.xml 171 | .idea/**/usage.statistics.xml 172 | .idea/**/dictionaries 173 | .idea/**/shelf 174 | 175 | # AWS User-specific 176 | .idea/**/aws.xml 177 | 178 | # Generated files 179 | .idea/**/contentModel.xml 180 | 181 | # Sensitive or high-churn files 182 | .idea/**/dataSources/ 183 | .idea/**/dataSources.ids 184 | .idea/**/dataSources.local.xml 185 | .idea/**/sqlDataSources.xml 186 | .idea/**/dynamic.xml 187 | .idea/**/uiDesigner.xml 188 | .idea/**/dbnavigator.xml 189 | 190 | # Gradle 191 | .idea/**/gradle.xml 192 | .idea/**/libraries 193 | 194 | # Gradle and Maven with auto-import 195 | # When using Gradle or Maven with auto-import, you should exclude module files, 196 | # since they will be recreated, and may cause churn. Uncomment if using 197 | # auto-import. 198 | # .idea/artifacts 199 | # .idea/compiler.xml 200 | # .idea/jarRepositories.xml 201 | # .idea/modules.xml 202 | # .idea/*.iml 203 | # .idea/modules 204 | # *.iml 205 | # *.ipr 206 | 207 | # CMake 208 | cmake-build-*/ 209 | 210 | # Mongo Explorer plugin 211 | .idea/**/mongoSettings.xml 212 | 213 | # File-based project format 214 | *.iws 215 | 216 | # IntelliJ 217 | out/ 218 | 219 | # mpeltonen/sbt-idea plugin 220 | .idea_modules/ 221 | 222 | # JIRA plugin 223 | atlassian-ide-plugin.xml 224 | 225 | # Cursive Clojure plugin 226 | .idea/replstate.xml 227 | 228 | # SonarLint plugin 229 | .idea/sonarlint/ 230 | 231 | # Crashlytics plugin (for Android Studio and IntelliJ) 232 | com_crashlytics_export_strings.xml 233 | crashlytics.properties 234 | crashlytics-build.properties 235 | fabric.properties 236 | 237 | # Editor-based Rest Client 238 | .idea/httpRequests 239 | 240 | # Android studio 3.1+ serialized cache file 241 | .idea/caches/build_file_checksums.ser -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yaml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | # Set the version of Python and other tools you might need 9 | build: 10 | os: ubuntu-20.04 11 | tools: 12 | python: "3.9" 13 | # You can also specify other tool versions: 14 | # nodejs: "16" 15 | # rust: "1.55" 16 | # golang: "1.17" 17 | 18 | # Build documentation in the docs/ directory with Sphinx 19 | sphinx: 20 | configuration: docs/source/conf.py 21 | 22 | # If using Sphinx, optionally build your docs in additional formats such as PDF 23 | # formats: 24 | # - pdf 25 | formats: 26 | - pdf 27 | - epub 28 | 29 | # Optionally declare the Python requirements required to build your docs 30 | python: 31 | install: 32 | - requirements: docs/requirements.txt -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2022 Lukas Winiwarter 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # pyForMetrix 2 | [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/lwiniwar/pyForMetrix/HEAD?labpath=demo%2Fgetting_started.ipynb) 3 | [![ReadTheDocs](https://readthedocs.org/projects/pyformetrix/badge/?version=latest)](https://pyformetrix.readthedocs.io/en/latest/) 4 | [![FWF](https://img.shields.io/badge/Funding-FWF-green)](#acknowledgement) 5 | [![Python tests](https://github.com/lwiniwar/pyForMetrix/actions/workflows/python-tests.yml/badge.svg?branch=main)](https://github.com/lwiniwar/pyForMetrix/actions/workflows/python-tests.yml) 6 | 7 | `pyForMetrix` is a Python package to extract metrics commonly used in forestry from laser scanning/LiDAR data. Main functionalities include a plot-based and a pixel-based calculation, and handling of large datasets. 8 | 9 | ## Installation 10 | `pyForMetrix` is packaged and delivered via PyPi, and can be installed using **pip**: 11 | 12 | ```bash 13 | python -m pip install pyForMetrix 14 | ``` 15 | 16 | ## Getting started 17 | > Note: You can run this *Getting started* section on binder: 18 | [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/lwiniwar/pyForMetrix/HEAD?labpath=demo%2Fgetting_started.ipynb) 19 | 20 | First, we need a point cloud dataset. You can use your own or download a sample dataset, e.g. from the City of Vancouver: 21 | https://webtransfer.vancouver.ca/opendata/2018LiDAR/4830E_54560N.zip 22 | 23 | Unzip this file after download to find a `.las`-File, which we will use in the following. 24 | 25 | We need to read in the point cloud into a numpy array. Depending on the metrics we will derive later, 26 | different attributes also have to be loaded in. In this example, the 3D point cloud along with classification and 27 | echo number information is required. For reading in the file, we use [`laspy`](https://laspy.readthedocs.io/). 28 | 29 | ```python 30 | import numpy as np 31 | import laspy 32 | 33 | inFile = laspy.read(r"4830E_54560N.las") 34 | coords = np.vstack([inFile.x, 35 | inFile.y, 36 | inFile.z]).transpose() 37 | points = { 38 | 'points': coords, 39 | 'echo_number': inFile.return_number, 40 | 'classification': inFile.classification 41 | } 42 | ``` 43 | 44 | After importing the package `pyForMetrics`, we can create a `RasterMetrics` or a `PlotMetrics` object, depending on 45 | the application. Let's first work with `RasterMetrics`, which will calculate the set of metrics for each cell of a 46 | raster overlaid on the point cloud data. 47 | 48 | ```python 49 | from pyForMetrix.metrix import RasterMetrics 50 | rm = RasterMetrics(points, raster_size=25) 51 | ``` 52 | The code above may take some time to run, as on the creation of the`RasterMetrics` object, the point cloud is rasterized 53 | to the final cells. The runtime will increase with more points and a smaller raster size. 54 | 55 | We then select which metrics we want to calculate. `pyForMetrix` comes with a number of predefined metrics, convieniently grouped in two collections: `publications`, where metrics from different publications in the literature are taken, and `types`, which groups metrics by their type. Later, we will see how to create your own metric calculators. For now, we will use the ones presented by Woods et al. (2009): 56 | 57 | ```python 58 | from pyForMetrix.metricCalculators.publications import MCalc_Woods_et_al_2009 59 | mc = MCalc_Woods_et_al_2009() 60 | metrics = rm.calc_custom_metrics(metrics=mc) 61 | ``` 62 | 63 | With the last line, we created an [`xarray`](https://docs.xarray.dev/en/stable/)`.DataArray` object containing the metrics for each pixel: 64 | ```python 65 | print(metrics) 66 | ``` 67 | ``` 68 | 69 | array([[[ 1.19169000e+03, 1.19212000e+03, 1.19236000e+03, ..., 70 | -1.26632802e+00, 7.51640760e-01, 0.00000000e+00], 71 | [ 1.19254700e+03, 1.19255400e+03, 1.19256100e+03, ..., 72 | -2.00000000e+00, 1.00000000e+00, 0.00000000e+00], 73 | ... 74 | ``` 75 | 76 | Using [`rioxarray`](https://corteva.github.io/rioxarray/stable/), we can save the values (here: the `p90` metric, i.e., the 90th height percentile) to a raster file: 77 | 78 | ```python 79 | import rioxarray 80 | metrics.sel(val='p90').rio.to_raster(f"p90.tif", "COG") 81 | ``` 82 | 83 | ## More examples 84 | ### Multiple metric sets at once 85 | Instead of passing a single `metricCalculator` class to `calc_custom_metrics`, you can call it with a list of `metricCalculator`s: 86 | ````python 87 | from pyForMetrix.metricCalculators.types import MCalc_HeightMetrics, MCalc_DensityMetrics 88 | heightMetrics = MCalc_HeightMetrics() 89 | densityMetrics = MCalc_DensityMetrics() 90 | metrics = rm.calc_custom_metrics(metrics=[heightMetrics, densityMetrics]) 91 | ```` 92 | ### Override percentiles, custom options 93 | Some `metricCalculator`s can be customized, e.g. the `MCalc_HeightMetrics` accept an optional keyword `percentiles`, which 94 | replaces the percentiles calculated by default: 95 | 96 | ````python 97 | heightMetrics = MCalc_HeightMetrics(percentiles=np.array([15, 25, 50, 75, 85, 95, 99])) 98 | ```` 99 | 100 | Similarly, the cell size for the rumple index (e.g. in `MCalc_White_et_al_2015`) or the DSM in `MCalc_Hollaus_et_al_2009` 101 | can be set - these variables are set as parameter to the `__call__` function. `calc_custom_metrics` accepts them as a (list of) 102 | additional dictionaries with the settings: 103 | 104 | ````python 105 | from pyForMetrix.metricCalculators.publications import MCalc_White_et_al_2015, MCalc_Hollaus_et_al_2009 106 | whiteMetrics = MCalc_White_et_al_2015() 107 | metrics = rm.calc_custom_metrics(metrics=whiteMetrics, metric_options={'rumple_pixel_size': 0.2}) 108 | ```` 109 | ````python 110 | hollausMetrics = MCalc_Hollaus_et_al_2009() 111 | metrics = rm.calc_custom_metrics(metrics=[whiteMetrics, hollausMetrics], 112 | metric_options=[ 113 | {'rumple_pixel_size': 5}, 114 | {'CHM_pixel_size': 7.5} 115 | ]) 116 | ```` 117 | ### Parallelize metric computation 118 | On computers with multiple cores, processing can be sped up significantly by multiprocessing. To this end, 119 | we provide a function `calc_custom_metrics_parallel` which takes similar arguments to `calc_custom_metrics`, 120 | but runs on multiple cores. Note that the parallelization is carried out over the raster cells, i.e., the multiple 121 | processes treat different subsets of the raster cells. As there is a certain overhead in starting the processes, 122 | speedup is only expected if there is a large enough number of (a) valid raster cells and (b) metrics that are complex 123 | to compute. The parameter `multiprocessing_point_threshold` checks the input point cloud and either spawns multiple processes 124 | (in case the number of points is larger than the threshold) or passes the arguments on to `calc_custom_metrics`. 125 | 126 | The other parameters are `n_chunks` (default: 16), which is the number of blocks the raster cells are divided into to be processed, 127 | and `n_processes` (default: 4), which is the number of concurrent processes. A higher number of `n_chunks` uses less memory, but takes 128 | longer due to the overhead. 129 | 130 | On systems with sufficient memory (RAM > (number of processes) x (max. size of a tile)), it is generally better to parallelize over 131 | input tiles rather than pixels. 132 | 133 | ### Plotwise metric extraction 134 | You can find an example notebook for plotwise metric extraction [here](https://github.com/lwiniwar/pyForMetrix/blob/main/demo/plotwise_metrics.ipynb), or 135 | 136 | [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/lwiniwar/pyForMetrix/HEAD?labpath=demo%2Fplotwise_metrics.ipynb) 137 | 138 | directly. 139 | 140 | ## Full / API documentation 141 | The full documentation can be found at [readthedocs](https://pyformetrix.readthedocs.io/en/latest/). 142 | 143 | 144 | ## Dependencies 145 | This package relies on the following packages (installed automatically when using pip). Thank you to all developers making this project possible! 146 | 147 | - [`laxpy`](https://github.com/brycefrank/laxpy) 148 | - [`numpy`](https://numpy.org/) 149 | - [`scipy`](https://scipy.org/) 150 | - [`pandas`](https://pandas.pydata.org/) 151 | - [`tqdm`](https://tqdm.github.io/) 152 | - [`xarray`](https://docs.xarray.dev/en/stable/) 153 | - [`matplotlib`](https://matplotlib.org/) 154 | - [`shapely`](https://shapely.readthedocs.io/en/stable/manual.html) 155 | 156 | ## Acknowledgement 157 | This package has been developed in the course of the *UncertainTree* project, funded by the Austrian Science Fund ([FWF](https://www.fwf.ac.at/)) [Grant number J 4672-N]. -------------------------------------------------------------------------------- /demo/las_623_5718_1_th_2014-2019.lax: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lwiniwar/pyForMetrix/736d0de2faa156511cd97f8a1a0e7e6e06b6e5ea/demo/las_623_5718_1_th_2014-2019.lax -------------------------------------------------------------------------------- /demo/netzkater_polygons.gpkg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lwiniwar/pyForMetrix/736d0de2faa156511cd97f8a1a0e7e6e06b6e5ea/demo/netzkater_polygons.gpkg -------------------------------------------------------------------------------- /demo/plotwise_metrics.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "source": [ 6 | "## Derive plotwise metrics with pyForMetrics" 7 | ], 8 | "metadata": { 9 | "collapsed": false 10 | } 11 | }, 12 | { 13 | "cell_type": "markdown", 14 | "source": [ 15 | "In this tutorial, we will derive some LiDAR metrics for forest inventory (FI) plots.\n", 16 | "\n", 17 | "First, we ensure that the required packages are installed:" 18 | ], 19 | "metadata": { 20 | "collapsed": false 21 | } 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": 1, 26 | "outputs": [ 27 | { 28 | "name": "stdout", 29 | "output_type": "stream", 30 | "text": [ 31 | "Requirement already satisfied: pyForMetrix in c:\\users\\lukas\\miniconda3\\envs\\fsct\\lib\\site-packages (0.0.1)\n", 32 | "Requirement already satisfied: shapely in c:\\users\\lukas\\miniconda3\\envs\\fsct\\lib\\site-packages (1.8.4)\n" 33 | ] 34 | } 35 | ], 36 | "source": [ 37 | "!python -m pip install pyForMetrix\n", 38 | "!python -m pip install geopandas wget" 39 | ], 40 | "metadata": { 41 | "collapsed": false 42 | } 43 | }, 44 | { 45 | "cell_type": "code", 46 | "execution_count": null, 47 | "outputs": [], 48 | "source": [], 49 | "metadata": { 50 | "collapsed": false 51 | } 52 | }, 53 | { 54 | "cell_type": "markdown", 55 | "source": [ 56 | "We then need to gather some data. In this example, we use data from Germany (Open Geodata Thüringen, ©GDI-Th, [dl-de/by-2-0](http://www.govdata.de/dl-de/by-2-0)). They can be downloaded from the [geodata portal of the State of Thuringia](https://www.geoportal-th.de/de-de/Downloadbereiche/Download-Offene-Geodaten-Th%C3%BCringen) (in German). **Approximate download size: 120 MB**" 57 | ], 58 | "metadata": { 59 | "collapsed": false 60 | } 61 | }, 62 | { 63 | "cell_type": "code", 64 | "execution_count": 1, 65 | "outputs": [ 66 | { 67 | "name": "stdout", 68 | "output_type": "stream", 69 | "text": [ 70 | "Ready!\n" 71 | ] 72 | } 73 | ], 74 | "source": [ 75 | "import os, wget, zipfile\n", 76 | "if not os.path.exists('las_623_5718_1_th_2014-2019.laz'):\n", 77 | " if not os.path.exists('data_netzkater.zip'):\n", 78 | " print('Downloading file')\n", 79 | " wget.download('https://geoportal.geoportal-th.de/hoehendaten/LAS/las_2014-2019/las_623_5718_1_th_2014-2019.zip', 'data_netzkater.zip')\n", 80 | " print('Unzipping file')\n", 81 | " zipfile.ZipFile('data_netzkater.zip').extractall('.')\n", 82 | "print('Ready!')" 83 | ], 84 | "metadata": { 85 | "collapsed": false 86 | } 87 | }, 88 | { 89 | "cell_type": "markdown", 90 | "source": [ 91 | "We then use `laspy` to load the file. As the input point cloud is not normalized by height, we first use a utility function in `pyForMetrics.normalizer` to do that for us.\n" 92 | ], 93 | "metadata": { 94 | "collapsed": false 95 | } 96 | }, 97 | { 98 | "cell_type": "code", 99 | "execution_count": 2, 100 | "outputs": [ 101 | { 102 | "name": "stdout", 103 | "output_type": "stream", 104 | "text": [ 105 | "Rasterizing for point normalization...\n" 106 | ] 107 | }, 108 | { 109 | "name": "stderr", 110 | "output_type": "stream", 111 | "text": [ 112 | "Normalizing LiDAR points: 100%|██████████| 40000/40000 [00:03<00:00, 11261.75it/s]\n" 113 | ] 114 | } 115 | ], 116 | "source": [ 117 | "import laspy\n", 118 | "file = laspy.read(r\"las_623_5718_1_th_2014-2019.laz\")\n", 119 | "points = {\n", 120 | " 'points': file.xyz,\n", 121 | " 'classification': file.classification,\n", 122 | " 'scan_angle_rank': file.scan_angle_rank\n", 123 | "}\n", 124 | "\n", 125 | "from pyForMetrix.normalizer import normalize\n", 126 | "points = normalize(points, distance=5)" 127 | ], 128 | "metadata": { 129 | "collapsed": false 130 | } 131 | }, 132 | { 133 | "cell_type": "markdown", 134 | "source": [ 135 | "Now we load some polygons from a shapefile. These polygons represent circular areas, for which e.g. forest inventories have been carried out, but any valid polygon shape may be used. Our data is stored in a [GeoPackage](https://www.geopackage.org/) file, but any file supported by [GeoPandas](https://geopandas.org/en/stable/) or [Shapely](https://shapely.readthedocs.io/en/stable/manual.html) will work." 136 | ], 137 | "metadata": { 138 | "collapsed": false 139 | } 140 | }, 141 | { 142 | "cell_type": "code", 143 | "execution_count": 3, 144 | "outputs": [ 145 | { 146 | "name": "stdout", 147 | "output_type": "stream", 148 | "text": [ 149 | " geometry\n", 150 | "0 POLYGON ((623210.094 5718429.347, 623210.781 5...\n", 151 | "1 POLYGON ((623257.611 5718187.129, 623258.297 5...\n", 152 | "2 POLYGON ((623700.325 5718631.002, 623701.012 5...\n", 153 | "3 POLYGON ((623709.596 5718801.366, 623710.283 5...\n", 154 | "4 POLYGON ((623910.093 5718108.321, 623910.779 5...\n", 155 | "5 POLYGON ((623234.432 5718588.122, 623235.119 5...\n", 156 | "6 POLYGON ((623516.054 5718391.102, 623516.740 5...\n" 157 | ] 158 | } 159 | ], 160 | "source": [ 161 | "import geopandas as gpd\n", 162 | "plots = gpd.GeoDataFrame.from_file(r\"netzkater_polygons.gpkg\")\n", 163 | "print(plots)" 164 | ], 165 | "metadata": { 166 | "collapsed": false 167 | } 168 | }, 169 | { 170 | "cell_type": "markdown", 171 | "source": [ 172 | "Now we will calculate metrics for each of these plots. In this example, we use a combination of different metrics in the `types` namespace. As we don't have an index file (.lax) for this input file, scanning though all the points may take a minute or two. If you have [LAStools](https://rapidlasso.com/lastools/) installed, you can create an index file by running `lasindex -i las_623_5718_1_th_2014-2019.laz`." 173 | ], 174 | "metadata": { 175 | "collapsed": false 176 | } 177 | }, 178 | { 179 | "cell_type": "code", 180 | "execution_count": 4, 181 | "outputs": [ 182 | { 183 | "name": "stderr", 184 | "output_type": "stream", 185 | "text": [ 186 | "Scanning input files to find polygon plots: 100%|███████████████████████████████████████████████████████████████████████| 1/1 [00:05<00:00, 5.35s/it]\n", 187 | "Calculating metrics: 100%|██████████| 7/7 [00:00<00:00, 700.05it/s]" 188 | ] 189 | }, 190 | { 191 | "name": "stdout", 192 | "output_type": "stream", 193 | "text": [ 194 | " d10 d20 d30 d40 d50 d60 d70 d80 d90 d100 ... p70 p80 \\\n", 195 | "0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 0.0 ... 390.7311 392.3204 \n", 196 | "1 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 0.0 ... 468.9515 470.5180 \n", 197 | "2 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 0.0 ... 406.6710 407.7968 \n", 198 | "3 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 0.0 ... 434.1044 435.8306 \n", 199 | "4 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 0.0 ... 377.1825 379.0810 \n", 200 | "5 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 0.0 ... 325.9076 326.1004 \n", 201 | "6 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 0.0 ... 327.1171 327.3150 \n", 202 | "\n", 203 | " p90 p100 h_mean h_stddev h_absdev h_skew h_kurtosis \\\n", 204 | "0 393.7132 400.546 382.338601 9.024213 8.644587 0.153006 -1.736357 \n", 205 | "1 471.9530 474.233 458.965645 10.989637 10.410608 -0.085498 -1.753655 \n", 206 | "2 408.8468 413.032 397.055385 10.786964 10.365036 -0.177707 -1.769685 \n", 207 | "3 438.1616 441.869 421.794961 14.293303 13.655775 -0.162661 -1.759780 \n", 208 | "4 380.5385 384.530 364.064363 13.554681 12.987975 0.062077 -1.811338 \n", 209 | "5 326.3062 326.627 325.513633 0.595316 0.509010 -0.084625 -1.069922 \n", 210 | "6 327.5887 327.990 326.703678 0.645383 0.547947 -0.013708 -0.996035 \n", 211 | "\n", 212 | " h_entropy \n", 213 | "0 2.063130 \n", 214 | "1 2.628478 \n", 215 | "2 2.532251 \n", 216 | "3 2.578637 \n", 217 | "4 2.448916 \n", 218 | "5 2.963720 \n", 219 | "6 2.940392 \n", 220 | "\n", 221 | "[7 rows x 26 columns]\n" 222 | ] 223 | }, 224 | { 225 | "name": "stderr", 226 | "output_type": "stream", 227 | "text": [ 228 | "\n" 229 | ] 230 | } 231 | ], 232 | "source": [ 233 | "from pyForMetrix.metrix import PlotMetrics\n", 234 | "from pyForMetrix.metricCalculators.types import *\n", 235 | "\n", 236 | "pm = PlotMetrics([\"las_623_5718_1_th_2014-2019.laz\"], plots)\n", 237 | "mc = [MCalc_DensityMetrics(), MCalc_HeightMetrics(), MCalc_VarianceMetrics()]\n", 238 | "metr = pm.calc_custom_metrics(mc)\n", 239 | "print(metr)" 240 | ], 241 | "metadata": { 242 | "collapsed": false 243 | } 244 | }, 245 | { 246 | "cell_type": "code", 247 | "execution_count": 9, 248 | "outputs": [ 249 | { 250 | "name": "stdout", 251 | "output_type": "stream", 252 | "text": [ 253 | "-102.91839999999996\n" 254 | ] 255 | } 256 | ], 257 | "source": [ 258 | "print(np.min(points['points'][:, 2\n", 259 | " ]))" 260 | ], 261 | "metadata": { 262 | "collapsed": false 263 | } 264 | } 265 | ], 266 | "metadata": { 267 | "kernelspec": { 268 | "display_name": "Python 3", 269 | "language": "python", 270 | "name": "python3" 271 | }, 272 | "language_info": { 273 | "codemirror_mode": { 274 | "name": "ipython", 275 | "version": 2 276 | }, 277 | "file_extension": ".py", 278 | "mimetype": "text/x-python", 279 | "name": "python", 280 | "nbconvert_exporter": "python", 281 | "pygments_lexer": "ipython2", 282 | "version": "2.7.6" 283 | } 284 | }, 285 | "nbformat": 4, 286 | "nbformat_minor": 0 287 | } 288 | -------------------------------------------------------------------------------- /docs/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lwiniwar/pyForMetrix/736d0de2faa156511cd97f8a1a0e7e6e06b6e5ea/docs/.gitkeep -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.http://sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | m2r2 2 | sphinx 3 | sphinx_pyproject 4 | furo -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | 3 | import os 4 | import sys 5 | sys.path.insert(0, os.path.abspath('../../src')) 6 | # -- Project information 7 | 8 | from sphinx_pyproject import SphinxConfig 9 | 10 | config = SphinxConfig("../../pyproject.toml", globalns=globals()) 11 | # 12 | project = 'pyForMetrix' 13 | copyright = '2022, Lukas Winiwarter' 14 | author = 'Lukas Winiwarter' 15 | master_doc = 'index' 16 | # release = '0.0' 17 | # version = '0.0.1a' 18 | 19 | 20 | autodoc_mock_imports = ['xarray', 'pandas', 'numpy', 21 | 'scipy', 'laxpy', 'tqdm', 'laspy', 22 | 'matplotlib', 'shapely', 'deprecated'] 23 | # -- General configuration 24 | 25 | extensions = [ 26 | 'sphinx.ext.duration', 27 | 'sphinx.ext.doctest', 28 | 'sphinx.ext.autodoc', 29 | 30 | 'm2r2', 31 | 'sphinx.ext.napoleon', 32 | 'sphinx.ext.autosummary', 33 | 'sphinx.ext.intersphinx', 34 | ] 35 | 36 | intersphinx_mapping = { 37 | 'python': ('https://docs.python.org/3/', None), 38 | 'sphinx': ('https://www.sphinx-doc.org/en/master/', None), 39 | 'numpy': ('https://numpy.org/doc/stable/', None), 40 | 'xarray': ('https://docs.xarray.dev/en/stable/', None), 41 | 'pandas': ('http://pandas.pydata.org/pandas-docs/dev', None), 42 | 'geopandas': ('https://geopandas.org/en/stable/', None), 43 | } 44 | 45 | 46 | intersphinx_disabled_domains = ['std'] 47 | 48 | 49 | templates_path = ['_templates'] 50 | 51 | # -- Options for HTML output 52 | 53 | html_theme = 'furo' 54 | 55 | # -- Options for EPUB output 56 | epub_show_urls = 'footnote' 57 | 58 | # 59 | # import matplotlib 60 | # matplotlib.use('agg') -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | Welcome to the pyForMetrix documentation! 2 | ========================================= 3 | 4 | .. warning:: 5 | Note that `pyForMetrix` is in an early development stage. You should expect bugs and incorrect results. 6 | If you want to help us improve `pyForMetrix`, consider `reporting bugs on GitHub `_ or `contributing to the documention and code `_! 7 | 8 | .. toctree:: 9 | :maxdepth: 2 10 | 11 | readme 12 | metrix 13 | metricCalculators 14 | -------------------------------------------------------------------------------- /docs/source/metricCalculators.rst: -------------------------------------------------------------------------------- 1 | LiDAR metric sets by publication 2 | ================================ 3 | .. automodule:: pyForMetrix.metricCalculators.publications 4 | :members: 5 | :inherited-members: 6 | :special-members: __call__ 7 | 8 | LiDAR metric sets by type 9 | ========================= 10 | .. automodule:: pyForMetrix.metricCalculators.types 11 | :members: 12 | :inherited-members: 13 | :special-members: __call__ 14 | -------------------------------------------------------------------------------- /docs/source/metrix.rst: -------------------------------------------------------------------------------- 1 | Calculation base classes 2 | ========================= 3 | 4 | .. automodule:: pyForMetrix.metrix 5 | :members: 6 | :inherited-members: 7 | :special-members: __call__, __init__ -------------------------------------------------------------------------------- /docs/source/readme.rst: -------------------------------------------------------------------------------- 1 | .. mdinclude:: ../../README.md 2 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: python 3.9 2 | dependencies: 3 | - python=3.9 4 | - numpy 5 | - scipy 6 | - pandas 7 | - tqdm 8 | - xarray 9 | - matplotlib 10 | - shapely 11 | - rioxarray 12 | - pip: 13 | - laxpy 14 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["hatchling"] 3 | build-backend = "hatchling.build" 4 | 5 | [project] 6 | name = "pyForMetrix" 7 | version = "0.0.7" 8 | authors = [ 9 | { name="Lukas Winiwarter", email="lukas.pypi@winiwarter.dev" }, 10 | ] 11 | description = "" 12 | readme = "README.md" 13 | requires-python = ">=3.8" 14 | dependencies = [ 15 | "pip>=19.3", 16 | "laxpy", 17 | "laspy", 18 | "numpy", 19 | "scipy", 20 | "pandas", 21 | "tqdm", 22 | "xarray", 23 | "matplotlib", 24 | "shapely", 25 | "lmoments3", 26 | "deprecated" 27 | ] 28 | #dependencies = [ 29 | # "pip>=19.3", 30 | # "laxpy>=0.2.2", 31 | # "laspy>=2.1.2", 32 | # "numpy>=1.23.1", 33 | # "scipy>=1.9.0", 34 | # "pandas>=1.4.3", 35 | # "tqdm>=4.64.0", 36 | # "xarray>=0.20.1", 37 | # "matplotlib>=3.6.2", 38 | # "shapely>=1.8.4" 39 | #] 40 | classifiers = [ 41 | "Programming Language :: Python :: 3", 42 | "License :: OSI Approved :: MIT License", 43 | "Operating System :: OS Independent", 44 | ] 45 | 46 | [project.urls] 47 | "Homepage" = "https://github.com/lwiniwar/pyForMetrix" 48 | "Bug Tracker" = "https://github.com/lwiniwar/pyForMetrix/issues" 49 | 50 | [tool.hatch.metadata] 51 | allow-direct-references = true 52 | -------------------------------------------------------------------------------- /src/pyForMetrix/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lwiniwar/pyForMetrix/736d0de2faa156511cd97f8a1a0e7e6e06b6e5ea/src/pyForMetrix/__init__.py -------------------------------------------------------------------------------- /src/pyForMetrix/__pycache__/__init__.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lwiniwar/pyForMetrix/736d0de2faa156511cd97f8a1a0e7e6e06b6e5ea/src/pyForMetrix/__pycache__/__init__.cpython-310.pyc -------------------------------------------------------------------------------- /src/pyForMetrix/__pycache__/__init__.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lwiniwar/pyForMetrix/736d0de2faa156511cd97f8a1a0e7e6e06b6e5ea/src/pyForMetrix/__pycache__/__init__.cpython-39.pyc -------------------------------------------------------------------------------- /src/pyForMetrix/__pycache__/metrix.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lwiniwar/pyForMetrix/736d0de2faa156511cd97f8a1a0e7e6e06b6e5ea/src/pyForMetrix/__pycache__/metrix.cpython-310.pyc -------------------------------------------------------------------------------- /src/pyForMetrix/__pycache__/rasterizer.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lwiniwar/pyForMetrix/736d0de2faa156511cd97f8a1a0e7e6e06b6e5ea/src/pyForMetrix/__pycache__/rasterizer.cpython-310.pyc -------------------------------------------------------------------------------- /src/pyForMetrix/metricCalculators/__init__.py: -------------------------------------------------------------------------------- 1 | import abc 2 | 3 | 4 | class MetricCalculator(abc.ABC): 5 | _names = [] 6 | def __call__(self, *args, **kwargs): 7 | raise NotImplementedError 8 | 9 | def __len__(self): 10 | return len(self.get_names()) 11 | 12 | def get_names(self): 13 | return self._names 14 | -------------------------------------------------------------------------------- /src/pyForMetrix/metricCalculators/__pycache__/__init__.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lwiniwar/pyForMetrix/736d0de2faa156511cd97f8a1a0e7e6e06b6e5ea/src/pyForMetrix/metricCalculators/__pycache__/__init__.cpython-310.pyc -------------------------------------------------------------------------------- /src/pyForMetrix/metricCalculators/__pycache__/publications.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lwiniwar/pyForMetrix/736d0de2faa156511cd97f8a1a0e7e6e06b6e5ea/src/pyForMetrix/metricCalculators/__pycache__/publications.cpython-310.pyc -------------------------------------------------------------------------------- /src/pyForMetrix/metricCalculators/__pycache__/types.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lwiniwar/pyForMetrix/736d0de2faa156511cd97f8a1a0e7e6e06b6e5ea/src/pyForMetrix/metricCalculators/__pycache__/types.cpython-310.pyc -------------------------------------------------------------------------------- /src/pyForMetrix/metricCalculators/lidRmetrics/HOME.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def HOME_home(points, zmin=None): 5 | z = points['points'][:, 2] 6 | intensity = points['intensity'] 7 | if zmin is not None: 8 | valid_idx = z >= zmin 9 | z = z[valid_idx] 10 | intensity = intensity[valid_idx] 11 | order = np.argsort(z) 12 | z = z[order] 13 | intensity = intensity[order] 14 | csum1 = np.cumsum(intensity).astype(float) 15 | csum2 = np.cumsum(intensity[::-1])[::-1].astype(float) 16 | diffc = np.diff(np.sign(csum1 - csum2)) 17 | # import matplotlib.pyplot as plt 18 | # plt.plot(csum1, 'r-') 19 | # plt.plot(csum2, 'k-') 20 | # plt.plot(csum1-csum2, 'b--') 21 | # plt.show() 22 | loc = np.nonzero(diffc) 23 | if len(z[loc]) == 0: # no solution 24 | return np.nan 25 | return z[loc][0] 26 | 27 | 28 | 29 | 30 | 31 | if __name__ == '__main__': 32 | import laspy 33 | 34 | f = laspy.read(r"C:\Users\Lukas\Documents\Data\PetawawaHarmonized\Harmonized\2016_ALS\4_plots_clipped\1_inv\PRF009.las") 35 | points = {'points': f.xyz, 'intensity': f.intensity} 36 | z = HOME_home(points) 37 | print(np.sum(points['intensity'][points['points'][:, 2] <= z])) 38 | print(np.sum(points['intensity'][points['points'][:, 2] >= z])) -------------------------------------------------------------------------------- /src/pyForMetrix/metricCalculators/lidRmetrics/Lmoments.py: -------------------------------------------------------------------------------- 1 | import lmoments3 2 | import numpy as np 3 | 4 | 5 | def Lmoments_moments(points): 6 | try: 7 | res = lmoments3.lmom_ratios(points['points'][:, 2], 4) 8 | except: 9 | res = np.array([np.nan, np.nan, np.nan, np.nan]) 10 | return res 11 | 12 | def Lmoments_coefficients(points): 13 | mom = Lmoments_moments(points) 14 | L_CV = mom[1]/mom[0] 15 | L_skew = mom[2]/mom[1] 16 | L_kurt = mom[3]/mom[2] 17 | return np.array([L_skew, L_kurt, L_CV]) -------------------------------------------------------------------------------- /src/pyForMetrix/metricCalculators/lidRmetrics/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lwiniwar/pyForMetrix/736d0de2faa156511cd97f8a1a0e7e6e06b6e5ea/src/pyForMetrix/metricCalculators/lidRmetrics/__init__.py -------------------------------------------------------------------------------- /src/pyForMetrix/metricCalculators/lidRmetrics/basic.py: -------------------------------------------------------------------------------- 1 | from functools import lru_cache 2 | 3 | import numpy as np 4 | import scipy.stats 5 | 6 | #@lru_cache(100) 7 | def basic_n(points): 8 | return points['points'].shape[0] 9 | #@lru_cache(100) 10 | def basic_zmax(points): 11 | return np.max(points['points'][:, 2]) 12 | #@lru_cache(100) 13 | def basic_zmin(points): 14 | return np.min(points['points'][:, 2]) 15 | #@lru_cache(100) 16 | def basic_zmean(points): 17 | if points['points'].shape[0] == 0: 18 | return np.nan 19 | return np.mean(points['points'][:, 2]) 20 | 21 | def basic_zsd(points): 22 | return np.std(points['points'][:, 2]) 23 | 24 | def basic_zcv(points): 25 | return scipy.stats.variation(points['points'][:, 2]) 26 | 27 | def basic_zskew(points): 28 | return scipy.stats.skew(points['points'][:, 2]) 29 | 30 | def basic_zkurt(points): 31 | return scipy.stats.kurtosis(points['points'][:, 2]) 32 | 33 | -------------------------------------------------------------------------------- /src/pyForMetrix/metricCalculators/lidRmetrics/canopydensity.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from pyForMetrix.metricCalculators.lidRmetrics.basic import basic_n, basic_zmax 4 | def canopydensity_zpcum(points, num_groups): 5 | n_points = basic_n(points) 6 | max_height = basic_zmax(points) 7 | return np.array([np.count_nonzero(points['points'][:, 2] <= (group * max_height/num_groups)) / n_points 8 | for group in range(1, num_groups)]) -------------------------------------------------------------------------------- /src/pyForMetrix/metricCalculators/lidRmetrics/dispersion.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy.stats 3 | from pyForMetrix.metricCalculators.lidRmetrics.basic import basic_zmean 4 | 5 | def dispersion_ziqr(points): 6 | return np.diff(np.quantile(points['points'][:, 2], [0.25, 0.75]))[0] 7 | 8 | def dispersion_zMADmean(points): 9 | mean = basic_zmean(points) 10 | if np.isnan(mean): 11 | return np.nan 12 | return np.mean(np.abs(points['points'][:, 2] - mean)) 13 | 14 | def dispersion_zMADmedian(points): 15 | median = np.median(points['points'][:, 2]) 16 | return np.mean(np.abs(points['points'][:, 2] - median)) 17 | 18 | def dispersion_CRR(points): 19 | mean = basic_zmean(points) 20 | ptp = np.ptp(points['points'][:, 2]) 21 | min = np.min(points['points'][:, 2]) 22 | return (mean - min)/(ptp) 23 | 24 | def dispersion_zentropy(points, binsize=1): 25 | hist = np.histogram(points['points'][:, 2], 26 | bins=np.arange(np.min(points['points'][:, 2]), np.max(points['points'][:, 2]) + binsize, binsize), 27 | density=True)[0] 28 | hist = hist.flatten() 29 | hist = hist[hist.nonzero()] 30 | return scipy.stats.entropy(hist) 31 | 32 | def dispersion_VCI(points, binsize=1): 33 | hist, bins = np.histogram(points['points'][:, 2], 34 | bins=np.arange(np.min(points['points'][:, 2]), np.max(points['points'][:, 2]) + binsize, binsize), 35 | density=True) 36 | hist = hist.flatten() 37 | hist = hist[hist.nonzero()] 38 | return -1 * np.sum(hist * np.log(hist)) / np.log(len(bins)) 39 | 40 | -------------------------------------------------------------------------------- /src/pyForMetrix/metricCalculators/lidRmetrics/echo.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from pyForMetrix.metricCalculators.lidRmetrics.basic import basic_n 4 | 5 | def echo_pFirst(points): 6 | n = basic_n(points) 7 | first = np.count_nonzero(points['echo_number'] == 1) 8 | return first/n 9 | def echo_pIntermediate(points): 10 | n = basic_n(points) 11 | intermediate = np.count_nonzero((points['echo_number'] > 1) & (points['echo_number'] != points['number_of_echoes'])) 12 | return intermediate/n 13 | 14 | def echo_pLast(points): 15 | n = basic_n(points) 16 | last = np.count_nonzero((points['echo_number'] == points['number_of_echoes']) & (points['echo_number'] > 1)) # excluding single echoes 17 | return last/n 18 | 19 | def echo_pSingle(points): 20 | n = basic_n(points) 21 | single = np.count_nonzero(points['number_of_echoes'] == 1) 22 | return single/n 23 | def echo_pMultiple(points): 24 | n = basic_n(points) 25 | multiple = np.count_nonzero(points['number_of_echoes'] > 1) 26 | return multiple/n -------------------------------------------------------------------------------- /src/pyForMetrix/metricCalculators/lidRmetrics/interval.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from pyForMetrix.metricCalculators.lidRmetrics.basic import basic_n 4 | 5 | def interval_p_below(points, threshold): 6 | n = basic_n(points) 7 | xyz = points['points'] 8 | return np.count_nonzero(xyz[:, 2] < threshold) / n -------------------------------------------------------------------------------- /src/pyForMetrix/metricCalculators/lidRmetrics/kde.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy.stats 3 | 4 | def kde_kde(points, bw=2): 5 | z = points['points'][:, 2] 6 | try: 7 | kernel = scipy.stats.gaussian_kde(z, bw) 8 | except: 9 | # could fail because of singular matrices or too few entries 10 | return np.nan, np.nan, np.nan 11 | domain = np.arange(z.min(), z.max(), bw) 12 | estim = kernel(domain) 13 | peaks = np.argwhere(np.diff(np.sign(np.diff(estim))) < 0) +1 14 | # import matplotlib.pyplot as plt 15 | # plt.plot(domain, estim, 'b-', linewidth=0.4) 16 | # plt.plot(domain[peaks], estim[peaks], 'ro', markersize=2) 17 | # plt.show() 18 | n_peaks = len(peaks) 19 | elevs = estim[peaks][::-1] # sort from top to bottom 20 | values = domain[peaks][::-1] 21 | return n_peaks, elevs, values 22 | 23 | 24 | if __name__ == '__main__': 25 | import laspy 26 | 27 | f = laspy.read(r"C:\Users\Lukas\Documents\Data\PetawawaHarmonized\Harmonized\2016_ALS\4_plots_clipped\1_inv\PRF009.las") 28 | points = {'points': f.xyz} 29 | print(kde_kde(points, bw=0.05)) -------------------------------------------------------------------------------- /src/pyForMetrix/metricCalculators/lidRmetrics/lad.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy.stats 3 | 4 | from pyForMetrix.metricCalculators.lidRmetrics.basic import basic_n, basic_zmax, basic_zmin 5 | def lad_lad(points, dz): 6 | z = points['points'][:, 2] 7 | n = basic_n(points) 8 | zmax = basic_zmax(points) 9 | p_i = np.array([np.count_nonzero(z <= lower_bound) / (n - np.count_nonzero(z <= (lower_bound + dz))) 10 | for lower_bound in np.arange(0, zmax-dz, dz)]) # omit last layer, as it will result in #DIV0 11 | p_i = p_i[p_i > 0] 12 | if len(p_i) == 0: 13 | return np.array([np.nan, np.nan, np.nan, np.nan]) 14 | LAI_i = -1 * np.log(p_i) / 0.5 15 | LAD_i = LAI_i / dz 16 | return np.array([np.max(LAD_i), np.mean(LAD_i), scipy.stats.variation(LAD_i), np.min(LAD_i)]) -------------------------------------------------------------------------------- /src/pyForMetrix/metricCalculators/lidRmetrics/percabove.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from pyForMetrix.metricCalculators.lidRmetrics.basic import basic_n, basic_zmean 4 | 5 | def percabove_pzabovemean(points): 6 | n_points = basic_n(points) 7 | mean = basic_zmean(points) 8 | return np.count_nonzero(points['points'][:, 2] > mean) / n_points 9 | 10 | def percabove_pzaboveX(points, X): 11 | n_points = basic_n(points) 12 | return np.count_nonzero(points['points'][:, 2] > X) / n_points 13 | 14 | 15 | -------------------------------------------------------------------------------- /src/pyForMetrix/metricCalculators/lidRmetrics/percentiles.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | def percentiles_z(points, percentiles): 3 | return np.percentile(points['points'][:, 2], percentiles) -------------------------------------------------------------------------------- /src/pyForMetrix/metricCalculators/lidRmetrics/rumple.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy 3 | 4 | from pyForMetrix.utils.rasterizer import Rasterizer 5 | 6 | 7 | def rumple_index(points, rumple_pixel_size): 8 | xyz = points['points'] 9 | # rumple index 10 | ras = Rasterizer(xyz, raster_size=(rumple_pixel_size, rumple_pixel_size)) 11 | CHM_x, CHM_y, CHM_z = ras.to_matrix(reducer=np.max) 12 | area_3d = 0 13 | CHM_xx, CHM_yy = np.meshgrid(CHM_x, CHM_y) 14 | raster_points = np.vstack([CHM_xx.flatten(), CHM_yy.flatten(), CHM_z.flatten()]).T 15 | raster_points = raster_points[np.logical_not(np.isnan(raster_points[:, 2])), :] 16 | if raster_points.shape[0] < 4: # min. 4 points needed for convex hull 17 | return np.nan 18 | try: 19 | tri = scipy.spatial.Delaunay(raster_points[:, :2]) 20 | for p1, p2, p3 in tri.simplices: 21 | a = raster_points[p2] - raster_points[p1] 22 | b = raster_points[p3] - raster_points[p1] 23 | # c = raster_points[p2] - raster_points[p3] 24 | tri_3d = np.linalg.norm(np.cross(a, b)) / 2 25 | area_3d += tri_3d 26 | hull_2d = scipy.spatial.ConvexHull(raster_points[:, :2]) 27 | except Exception as e: 28 | # print(e) 29 | # print("Setting rumple index to nan and continuing...") 30 | return np.nan 31 | area_2d = hull_2d.volume # volume in 2D is the area! 32 | rumple = area_3d / area_2d 33 | return rumple 34 | -------------------------------------------------------------------------------- /src/pyForMetrix/metricCalculators/lidRmetrics/voxels.py: -------------------------------------------------------------------------------- 1 | from functools import reduce, lru_cache 2 | from operator import mul 3 | 4 | import numpy as np 5 | import scipy.stats 6 | 7 | from pyForMetrix.utils.voxelizer import Voxelizer 8 | 9 | 10 | def create_voxelization(points, voxel_size): 11 | vox = Voxelizer(points['points'], voxel_size=voxel_size) 12 | XVoxelCenter, XVoxelContains, idxVoxelUnique, XClosestIndex = vox.voxelize() 13 | return XVoxelCenter, XVoxelContains, idxVoxelUnique, XClosestIndex 14 | 15 | #@lru_cache(100) 16 | def create_histogram(z, voxel_size): 17 | return np.histogram(z, bins=np.arange(z.min(), z.max() + 2*voxel_size, voxel_size), density=True)[0] 18 | #@lru_cache(100) 19 | def voxels_vn(idxVoxelUnique): 20 | return idxVoxelUnique.shape[0] 21 | 22 | def voxels_vFRall(idxVoxelUnique): 23 | spanAll = np.ptp(idxVoxelUnique, axis=0) 24 | vall = reduce(mul, spanAll) 25 | if vall == 0: 26 | return np.nan 27 | return voxels_vn(idxVoxelUnique) / vall 28 | 29 | def voxels_vFRcanopy(idxVoxelUnique): 30 | idxVoxelUnique = idxVoxelUnique.astype(int) 31 | spanAll = np.ptp(idxVoxelUnique, axis=0) 32 | map2d = np.zeros((spanAll[0]+1, spanAll[1]+1)) 33 | for (x,y,z) in idxVoxelUnique: 34 | map2d[x,y] = np.max([z, map2d[x,y]]) 35 | vbelowcanopy = np.sum(map2d) 36 | return voxels_vn(idxVoxelUnique) / vbelowcanopy 37 | 38 | def voxels_vzrumple(idxVoxelUnique, voxel_size): 39 | z = idxVoxelUnique[:, 2] 40 | hist = create_histogram(z, voxel_size) 41 | flength = np.sum(np.sqrt(np.square(hist) + voxel_size ** 2)) 42 | fheight = len(hist) * voxel_size 43 | return flength/fheight 44 | 45 | def voxels_vzsd(idxVoxelUnique, voxel_size): 46 | z = idxVoxelUnique[:, 2] 47 | hist = create_histogram(z, voxel_size) 48 | return np.std(hist) 49 | def voxels_vzcv(idxVoxelUnique, voxel_size): 50 | z = idxVoxelUnique[:, 2] 51 | hist = create_histogram(z, voxel_size) 52 | return scipy.stats.variation(hist) 53 | 54 | def voxels_lefsky(idxVoxelUnique): 55 | """ 56 | 57 | Args: 58 | idxVoxelUnique: 59 | 60 | Returns: Percentages of: 61 | - Empty voxels above canopy 62 | - Empty voxels below canopy 63 | - Filled voxels in the top 65% of the canopy (Euphotic) 64 | - Filled voxels below the top 65% of the canopy (Oligophotic) 65 | """ 66 | idxVoxelUnique = idxVoxelUnique.astype(int) 67 | spanAll = np.ptp(idxVoxelUnique, axis=0) 68 | map2d = np.zeros((spanAll[0]+1, spanAll[1]+1)) 69 | for (x,y,z) in idxVoxelUnique: 70 | map2d[x,y] = np.max([z, map2d[x,y]]) 71 | maxZ = np.max(map2d) 72 | counts = [0,0,0,0] 73 | for locx, locy in np.unique(idxVoxelUnique[:, :2], axis=0): 74 | voxels_at_xy = (idxVoxelUnique[:, 0] == locx) & (idxVoxelUnique[:, 1] == locy) 75 | counts[0] += maxZ - map2d[locx, locy] # empty voxels above canopy 76 | counts[1] += map2d[locx, locy] - np.count_nonzero( # empty voxels below canopy: height minus number of filled 77 | (voxels_at_xy)) # voxels at that location 78 | counts[2] += np.count_nonzero(voxels_at_xy & 79 | (idxVoxelUnique[:, 2] >= 0.65 * map2d[locx, locy])) 80 | counts[3] += np.count_nonzero(voxels_at_xy & 81 | (idxVoxelUnique[:, 2] < 0.65 * map2d[locx, locy])) 82 | # do we need to add full empty columns to the empty voxels above? I don't think so, as this would 83 | # include all the voxels outside of the mbr/polygon/... 84 | # counts[0] += np.count_nonzero(map2d == 0) * maxZ 85 | counts = np.array(counts) / np.sum(counts) * 100. 86 | return counts 87 | 88 | if __name__ == '__main__': 89 | import laspy 90 | f = laspy.read(r"C:\Users\Lukas\Documents\Data\PetawawaHarmonized\Harmonized\2016_ALS\4_plots_clipped\2_psp\PSP 005.las") 91 | points = {'points': f.xyz} 92 | XVoxelCenter, XVoxelContains, idxVoxelUnique, XClosestIndex = create_voxelization(points, 1) 93 | voxels_vFRcanopy(idxVoxelUnique) -------------------------------------------------------------------------------- /src/pyForMetrix/metricCalculators/publications.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | 3 | import numpy as np 4 | import scipy 5 | 6 | from pyForMetrix.utils.rasterizer import Rasterizer 7 | from pyForMetrix.metricCalculators import MetricCalculator 8 | from pyForMetrix.metricCalculators.lidRmetrics.rumple import rumple_index 9 | 10 | 11 | class MCalc_White_et_al_2015(MetricCalculator): 12 | """ 13 | Metric calculation class 14 | 15 | Calculate metrics following 16 | White et al. (2015): 17 | Comparing ALS and Image-Based Point Cloud Metrics and 18 | Modelled Forest Inventory Attributes in a Complex Coastal 19 | Forest Environment 20 | 21 | https://doi.org/10.3390/f6103704 22 | 23 | See Table 6 in the paper and :meth:`__call__` for more information. 24 | 25 | """ 26 | name = "White et al. (2015)" 27 | 28 | @staticmethod 29 | def get_names(): 30 | """ 31 | List names of the generated metrics 32 | 33 | Returns: 34 | :class:`list`: 35 | list of strings with the metrics that will be generated. 36 | """ 37 | return [ 38 | "Hmean", 39 | "CoV", 40 | "Skewness", 41 | "Kurtosis", 42 | "P10", 43 | "P90", 44 | "CCmean", 45 | "Rumple" 46 | ] 47 | 48 | def __call__(self, points_in_poly, rumple_pixel_size=1): 49 | """ 50 | Calculate the metrics 51 | 52 | Args: 53 | points_in_poly: :class:`dict` that contains a key `points`, pointing to a :class:`numpy.ndarray` of shape (n,3) 54 | rumple_pixel_size: pixel size used for rumple index calculation 55 | 56 | Returns: 57 | :class:`numpy.ndarray`: 58 | - Hmean 59 | - CoV 60 | - Skewness 61 | - Kurtosis 62 | - P10 63 | - P90 64 | - CCmean 65 | - Rumple 66 | 67 | """ 68 | points = points_in_poly['points'] 69 | outArray = np.full(((len(self.get_names())), ), np.nan) 70 | points = points[points[:, 2] > 2] 71 | if points.shape[0] == 0: 72 | return outArray 73 | with warnings.catch_warnings(): 74 | warnings.simplefilter("ignore") 75 | outArray[0] = np.mean(points[:, 2]) 76 | outArray[1] = np.std(points[:, 2], ddof=1) / outArray[0] 77 | outArray[2] = scipy.stats.skew(points[:, 2]) 78 | outArray[3] = scipy.stats.kurtosis(points[:, 2]) 79 | outArray[4:6] = np.percentile(points[:, 2], [10, 90]) 80 | outArray[6] = np.count_nonzero(points[:, 2] > outArray[0]) / points.shape[0] 81 | rumple = rumple_index(points_in_poly, rumple_pixel_size) 82 | outArray[7] = rumple 83 | 84 | return outArray 85 | 86 | 87 | class MCalc_Hollaus_et_al_2009(MetricCalculator): 88 | """ 89 | Metric calculation class 90 | 91 | Calculate metrics following 92 | Hollaus et al. (2009): Growing stock estimation for alpine forests in Austria: a robust lidar-based approach. 93 | 94 | https://doi.org/10.1139/X09-042 95 | 96 | See Table 3 in the paper and :meth:`__call__` for more information. 97 | 98 | Args: 99 | height_bins_upper: a :class:`numpy.ndarray` defining the upper limits for the height bin classes 100 | 101 | """ 102 | name = "Hollaus et al. (2009)" 103 | 104 | def __init__(self, height_bins_upper=np.array([2, 5, 10, 15, 20, 25, 30, 35])): 105 | self.height_bins = np.array(height_bins_upper) 106 | 107 | 108 | def get_names(self): 109 | """ 110 | List names of the generated metrics 111 | 112 | Returns: 113 | :class:`list`: 114 | list of strings with the metrics that will be generated. As the number of height bins 115 | is given by the user, the length of the list depends on the settings. 116 | 117 | """ 118 | return [ 119 | f"v_fe_i{h}" for h in range(len(self.height_bins)) 120 | ] 121 | 122 | def __call__(self, points_in_poly, CHM_pixel_size=1): 123 | """ 124 | Calculate the metrics 125 | 126 | Args: 127 | points_in_poly: :class:`dict` that contains a key `points`, pointing to a :class:`numpy.ndarray` of shape (n,3) 128 | CHM_pixel_size: the pixel size for the canopy height model calculation 129 | 130 | Returns: 131 | :class:`numpy.ndarray`: 132 | - relative count of first echoes in each canopy height class 133 | 134 | """ 135 | points = points_in_poly['points'] 136 | # take first echoes only 137 | points = points[points_in_poly['echo_number'] == 1] 138 | if len(points) == 0: 139 | return np.zeros((len(self), )) 140 | fe_counts = np.zeros((len(self), )) 141 | fe_sumCHM = np.zeros((len(self), )) 142 | with warnings.catch_warnings(): 143 | warnings.simplefilter("ignore") 144 | # rasterize points 145 | ras = Rasterizer(points, raster_size=(CHM_pixel_size, CHM_pixel_size)) 146 | XVoxelCenter, XVoxelContains, idxVoxelUnique, XClosestIndex = ras.rasterize() 147 | for contents in XVoxelContains: 148 | points_in_cell = points[contents] 149 | cell_CHM = np.max(points_in_cell[:, 2]) 150 | 151 | fe_counts[np.argmin([max(0, elem) for elem in cell_CHM - self.height_bins])] += len(contents) 152 | fe_sumCHM[np.argmin([max(0, elem) for elem in cell_CHM - self.height_bins])] += np.sum(points_in_cell[:, 2]) 153 | total_fe = np.sum(fe_sumCHM) 154 | meanCHM = fe_sumCHM / fe_counts 155 | p_CHM = fe_counts / total_fe 156 | outArray = meanCHM * p_CHM 157 | outArray[np.isnan(outArray)] = 0 158 | return outArray 159 | 160 | 161 | class MCalc_Xu_et_al_2019(MetricCalculator): 162 | """ 163 | Metric calculation class 164 | 165 | Calculate metrics following Xu et al. (2019): 166 | Estimation of secondary forest parameters by integrating 167 | image and point cloud-based metrics acquired 168 | from unmanned aerial vehicle 169 | 170 | https://doi.org/10.1117/1.JRS.14.022204 171 | 172 | See Table 3 in the paper and :meth:`__call__` for more information. 173 | 174 | Args: 175 | percentiles: a :class:`numpy.ndarray` with values between 0 and 100, representing the percentiles to be calculated 176 | density_percentiles: a :class:`numpy.ndarray` with values between 0 and 100, representing the height percentiles for 177 | which densities are calculated 178 | 179 | """ 180 | 181 | name = "Xu et al. (2019)" 182 | 183 | def __init__(self, percentiles=np.array([10, 25, 30, 40, 60, 75, 85, 90]), 184 | density_percentiles=np.array([10, 25, 30, 40, 60, 75, 85, 90])): 185 | self.p = np.array(percentiles) 186 | self.d = np.array(density_percentiles) 187 | 188 | 189 | def get_names(self): 190 | """ 191 | List names of the generated metrics 192 | 193 | Returns: 194 | :class:`list`: 195 | list of strings with the metrics that will be generated. As the percentiles and density metrics 196 | can be of different lengths, the length of the list depends on the settings. 197 | 198 | """ 199 | return [ 200 | f"p{p}" for p in self.p] + \ 201 | [f"d{d}" for d in self.d] + \ 202 | [ 203 | "h_mean", 204 | "h_max", 205 | "h_min", 206 | "h_cv", 207 | ] 208 | 209 | def __call__(self, points_in_poly): 210 | """ 211 | Calculate the metrics 212 | 213 | Args: 214 | points_in_poly: points_in_poly: :class:`dict` that contains a key `points`, pointing to a :class:`numpy.ndarray` of shape (n,3) 215 | 216 | Returns: 217 | :class:`numpy.ndarray`: 218 | - Height percentiles (p10, p25, p30, p40, p60, p75, p85, p90) 219 | - Density metrics (d10, d25, d30, d40, d60, d75, d85, d90) 220 | 221 | (The proportion of points above the height percentiles, Shen et al., 2018: https://doi.org/10.3390/rs10111729) 222 | 223 | - Height variation metrics (h_mean, h_max, h_min, h_cv) 224 | 225 | """ 226 | points = points_in_poly['points'] 227 | outArray = np.full(((len(self.get_names())), ), np.nan) 228 | points = points[points[:, 2] > 2] 229 | if points.shape[0] < 1: 230 | return outArray 231 | with warnings.catch_warnings(): 232 | warnings.simplefilter("ignore") 233 | outArray[0:len(self.p)] = np.percentile(points[:, 2], self.p) 234 | total_points = points.shape[0] 235 | var_length_end = len(self.p) + len(self.d) 236 | max_height = np.max(points[:, 2]) 237 | outArray[len(self.p):var_length_end] = [np.count_nonzero(points[:, 2] > val) / total_points 238 | for val in (self.d / 100. * max_height)] 239 | outArray[var_length_end] = np.mean(points[:, 2]) 240 | outArray[var_length_end + 1] = max_height 241 | outArray[var_length_end + 2] = np.min(points[:, 2]) 242 | outArray[var_length_end + 3] = np.std(points[:, 2], ddof=1) / outArray[var_length_end] 243 | return outArray 244 | 245 | 246 | 247 | class MCalc_Woods_et_al_2009(MetricCalculator): 248 | """ 249 | Metric calculation class 250 | 251 | Calculate metrics following Woods et al. (2009): 252 | Predicting forest stand variables from LiDAR data 253 | in the Great Lakes – St. Lawrence forest of Ontario 254 | 255 | https://doi.org/10.5558/tfc84827-6 256 | 257 | See Section "LIDAR based predictors" in the paper and :meth:`__call__` for more information. 258 | 259 | Args: 260 | percentiles: a :class:`numpy.ndarray` with values between 0 and 100, representing the percentiles to be calculated 261 | density_percentiles: a :class:`numpy.ndarray` with values between 0 and 100, representing the height percentiles for 262 | which densities are calculated 263 | """ 264 | name = "Woods et al. (2009)" 265 | 266 | def __init__(self, percentiles=np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]), 267 | density_percentiles=np.array([10, 20, 30, 40, 50, 60, 70, 80, 90])): 268 | self.p = np.array(percentiles) 269 | self.d = np.array(density_percentiles) 270 | 271 | 272 | def get_names(self): 273 | """ 274 | List names of the generated metrics 275 | 276 | Returns: 277 | :class:`list`: 278 | list of strings with the metrics that will be generated. As the percentiles and density metrics 279 | can be of different lengths, the length of the list depends on the settings. 280 | """ 281 | return [ 282 | f"p{p}" for p in self.p] + \ 283 | [f"d{d}" for d in self.d] + \ 284 | [ 285 | "h_mean", 286 | "h_stddev", 287 | "h_absdev", 288 | "h_skew", 289 | "h_kurtosis", 290 | "p_first_returns", 291 | "p_first_veg_returns" 292 | ] 293 | def __call__(self, points_in_poly:dict): 294 | """ 295 | Calculate the metrics 296 | 297 | Args: 298 | points_in_poly: :class:`dict` that contains keys `points`, `echo_number` and `classification`. 299 | Each of the keys points to a `numpy.ndarray` with n entries (3xn for `points`), 300 | 301 | Returns: 302 | :class:`numpy.ndarray`: 303 | - Statistical metrics (h_mean, h_stddev, h_absdev, h_skew, h_kurtosis) 304 | - Canopy height metrics (default: p10, p20, p30, p40, p50, p60, p70, p80, p90, p100) 305 | - Density metrics (default: d10, d20, d30, d40, d50, d60, d70, d80, d90) 306 | (The proportion of points above the height percentiles, 307 | Shen et al., 2018: https://doi.org/10.3390/rs10111729) 308 | - Fraction of first returns 309 | - Fraction of first returns in the vegetation class 310 | """ 311 | points = points_in_poly['points'] 312 | echo_number = points_in_poly['echo_number'] 313 | classification = points_in_poly['classification'] 314 | outArray = np.full(((len(self.get_names())), ), np.nan) 315 | # no height threshold used by Wood et al. 316 | with warnings.catch_warnings(): 317 | warnings.simplefilter("ignore") 318 | outArray[0:len(self.p)] = np.percentile(points[:, 2], self.p) 319 | total_points = points.shape[0] 320 | var_length_end = len(self.p) + len(self.d) 321 | max_height = np.max(points[:, 2]) 322 | outArray[len(self.p):var_length_end] = [np.count_nonzero(points[:, 2] > val) / total_points 323 | for val in (self.d / 100. * max_height)] 324 | outArray[var_length_end] = np.mean(points[:, 2]) 325 | outArray[var_length_end + 1] = np.std(points[:, 2], ddof=1) 326 | outArray[var_length_end + 2] = np.mean(np.abs(outArray[var_length_end] - points[:, 2])) 327 | outArray[var_length_end + 3] = scipy.stats.skew(points[:, 2]) 328 | outArray[var_length_end + 4] = scipy.stats.kurtosis(points[:, 2]) 329 | outArray[var_length_end + 5] = np.count_nonzero(echo_number == 1) / total_points 330 | outArray[var_length_end + 6] = np.count_nonzero(np.logical_and( 331 | echo_number == 1, np.logical_and(classification >= 3, classification<=5)) # classes 3, 4, 5 are vegetation classes 332 | ) / total_points 333 | return outArray 334 | -------------------------------------------------------------------------------- /src/pyForMetrix/metricCalculators/types.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | 3 | import numpy as np 4 | import scipy 5 | 6 | from pyForMetrix.metricCalculators import MetricCalculator 7 | from pyForMetrix.metricCalculators.lidRmetrics.rumple import rumple_index 8 | 9 | 10 | class MCalc_EchoMetrics(MetricCalculator): 11 | name = "Echo metrics" 12 | 13 | def __init__(self): 14 | ... 15 | 16 | def get_names(self): 17 | return [ 18 | "p_first_returns", 19 | "p_first_veg_returns" 20 | ] 21 | 22 | def __call__(self, points_in_poly: dict): 23 | points = points_in_poly['points'] 24 | echo_number = points_in_poly['echo_number'] 25 | classification = points_in_poly['classification'] 26 | 27 | total_points = points.shape[0] 28 | 29 | outArray = np.full(((len(self.get_names())),), np.nan) 30 | 31 | with warnings.catch_warnings(): 32 | warnings.simplefilter("ignore") 33 | outArray[0] = np.count_nonzero(echo_number == 1) / total_points 34 | outArray[1] = np.count_nonzero(np.logical_and( 35 | echo_number == 1, np.logical_and(classification >= 3, classification <= 5)) 36 | # classes 3, 4, 5 are vegetation classes 37 | ) / total_points 38 | return outArray 39 | 40 | 41 | class MCalc_HeightMetrics(MetricCalculator): 42 | name = "Height metrics" 43 | 44 | def __init__(self, percentiles=np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])): 45 | self.p = np.array(percentiles) 46 | 47 | def get_names(self): 48 | return [ 49 | f"p{p}" for p in self.p] + \ 50 | [ 51 | "h_mean", 52 | ] 53 | 54 | def __call__(self, points_in_poly: dict): 55 | points = points_in_poly['points'] 56 | outArray = np.full(((len(self.get_names())),), np.nan) 57 | 58 | with warnings.catch_warnings(): 59 | warnings.simplefilter("ignore") 60 | outArray[0:len(self.p)] = np.percentile(points[:, 2], self.p) 61 | var_length_end = len(self.p) 62 | outArray[var_length_end] = np.mean(points[:, 2]) 63 | return outArray 64 | 65 | 66 | class MCalc_DensityMetrics(MetricCalculator): 67 | name = "Density metrics" 68 | 69 | def __init__(self, density_percentiles=np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])): 70 | self.d = np.array(density_percentiles) 71 | 72 | def get_names(self): 73 | return [ 74 | f"d{d}" for d in self.d] 75 | 76 | def __call__(self, points_in_poly: dict): 77 | points = points_in_poly['points'] 78 | outArray = np.full(((len(self.get_names())),), np.nan) 79 | with warnings.catch_warnings(): 80 | warnings.simplefilter("ignore") 81 | total_points = points.shape[0] 82 | max_height = np.max(points[:, 2]) 83 | outArray[0:len(self.d)] = [np.count_nonzero(points[:, 2] > val) / total_points 84 | for val in (self.d / 100. * max_height)] 85 | return outArray 86 | 87 | 88 | class MCalc_VarianceMetrics(MetricCalculator): 89 | name = "Variance metrics" 90 | 91 | def __init__(self): 92 | ... 93 | 94 | def get_names(self): 95 | return [ 96 | "h_stddev", 97 | "h_absdev", 98 | "h_skew", 99 | "h_kurtosis", 100 | "h_entropy" 101 | ] 102 | 103 | def __call__(self, points_in_poly: dict): 104 | points = points_in_poly['points'] 105 | outArray = np.full(((len(self.get_names())),), np.nan) 106 | with warnings.catch_warnings(): 107 | warnings.simplefilter("ignore") 108 | outArray[0] = np.std(points[:, 2], ddof=1) 109 | outArray[1] = np.mean(np.abs(np.mean(points[:, 2]) - points[:, 2])) 110 | outArray[2] = scipy.stats.skew(points[:, 2]) 111 | outArray[3] = scipy.stats.kurtosis(points[:, 2]) 112 | 113 | hist = np.histogramdd(points[:, 2], bins=20)[0] 114 | hist /= hist.sum() 115 | hist = hist.flatten() 116 | hist = hist[hist.nonzero()] 117 | outArray[4] = scipy.stats.entropy(hist) 118 | return outArray 119 | 120 | 121 | class MCalc_CoverMetrics(MetricCalculator): 122 | name = "Variance metrics" 123 | 124 | def __init__(self): 125 | ... 126 | 127 | def get_names(self): 128 | return [ 129 | "cover_cc2", 130 | "cover_ccmean", 131 | "cover_rumple", 132 | 133 | ] 134 | 135 | def __call__(self, points_in_poly: dict, rumple_pixel_size=1): 136 | points = points_in_poly['points'] 137 | outArray = np.full(((len(self.get_names())),), np.nan) 138 | with warnings.catch_warnings(): 139 | warnings.simplefilter("ignore") 140 | 141 | outArray[0] = np.count_nonzero(points[:, 2] > 2.0) / points.shape[0] 142 | outArray[1] = np.count_nonzero(points[:, 2] > np.mean(points[:, 2])) / points.shape[0] 143 | rumple = rumple_index(points_in_poly, rumple_pixel_size) 144 | outArray[2] = rumple 145 | return outArray 146 | 147 | 148 | class MCalc_VisMetrics(MetricCalculator): 149 | """ 150 | Calculate metrics for visualisation of the point cloud data 151 | 152 | :param points: np.array of shape (n, 3) with points in a single compute unit -- normalized height required 153 | :param progressbar: multiprocessing queue object to push updates to or None 154 | :return: np.array of shape (2, ) with metrics:
155 | - Total number of points
156 | - Max. height model (CHM)
157 | - Number of unique strips at location
158 | """ 159 | name = "Visualisation only" 160 | 161 | def __init__(self): 162 | super(MCalc_VisMetrics, self).__init__() 163 | 164 | def get_names(self): 165 | return [ 166 | 'nPoints', 167 | 'hmax', 168 | 'uniqueStrips', 169 | 'maxScanAngle' 170 | ] 171 | 172 | def __call__(self, points_in_poly:dict): 173 | points = points_in_poly['points'] 174 | sar = points_in_poly['scan_angle_rank'] 175 | outArray = np.full(((len(self.get_names())), ), np.nan) 176 | with warnings.catch_warnings(): 177 | warnings.simplefilter("ignore") 178 | outArray[0] = points.shape[0] 179 | outArray[1] = np.max(points[:, 2]) 180 | outArray[2] = len(np.unique(points_in_poly['pt_src_id'])) 181 | outArray[3] = np.max(sar) 182 | return outArray 183 | 184 | class MCalc_lidRmetrics_basic(MetricCalculator): 185 | name = "_basic" 186 | _names = ['n', 'zmax', 'zmin', 'zmean', 'zsd', 'zcv', 'zskew', 'zkurt'] 187 | 188 | def __call__(self, points:dict): 189 | from pyForMetrix.metricCalculators.lidRmetrics import basic 190 | return np.array([ 191 | basic.basic_n(points), 192 | basic.basic_zmax(points), 193 | basic.basic_zmin(points), 194 | basic.basic_zmean(points), 195 | basic.basic_zsd(points), 196 | basic.basic_zcv(points), 197 | basic.basic_zskew(points), 198 | basic.basic_zkurt(points) 199 | ]) 200 | 201 | class MCalc_lidRmetrics_percentiles(MetricCalculator): 202 | name = "_percentiles" 203 | def __init__(self, percentiles=np.array([1, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 204 | 55, 60, 65, 70, 75, 80, 85, 90, 95, 99])): 205 | self.percentiles = percentiles 206 | super(MCalc_lidRmetrics_percentiles, self).__init__() 207 | def get_names(self): 208 | return [f'zq{q:d}' for q in self.percentiles] 209 | 210 | def __call__(self, points:dict): 211 | from pyForMetrix.metricCalculators.lidRmetrics import percentiles 212 | return percentiles.percentiles_z(points, self.percentiles) 213 | 214 | class MCalc_lidRmetrics_percabove(MetricCalculator): 215 | name = '_percabove' 216 | def __init__(self, p_above=np.array([2, 5])): 217 | self.p_above = p_above 218 | super(MCalc_lidRmetrics_percabove, self).__init__() 219 | def get_names(self): 220 | return ['pzabovemean'] + [f'pzabove{q}' for q in self.p_above] 221 | def __call__(self, points:dict): 222 | from pyForMetrix.metricCalculators.lidRmetrics import percabove 223 | outArray = np.full((len(self),), np.nan) 224 | outArray[0] = percabove.percabove_pzabovemean(points) 225 | outArray[1:] = [percabove.percabove_pzaboveX(points, X) for X in self.p_above] 226 | return outArray 227 | 228 | class MCalc_lidRmetrics_dispersion(MetricCalculator): 229 | name = '_dispersion' 230 | _names = ['ziqr', 'zMADmean', 'zMADmedian', 'CRR', 'zentropy', 'VCI'] 231 | def __call__(self, points:dict, binsize:float=1): 232 | from pyForMetrix.metricCalculators.lidRmetrics import dispersion 233 | return np.array([ 234 | dispersion.dispersion_ziqr(points), 235 | dispersion.dispersion_zMADmean(points), 236 | dispersion.dispersion_zMADmedian(points), 237 | dispersion.dispersion_CRR(points), 238 | dispersion.dispersion_zentropy(points, binsize=binsize), 239 | dispersion.dispersion_VCI(points, binsize=binsize), 240 | ]) 241 | 242 | class MCalc_lidRmetrics_canopydensity(MetricCalculator): 243 | name = '_canopydensity' 244 | def __init__(self, num_groups:int=10): 245 | self.num_groups = num_groups 246 | super(MCalc_lidRmetrics_canopydensity, self).__init__() 247 | def get_names(self): 248 | return [f'zpcum{i:d}' for i in range(1, self.num_groups)] 249 | def __call__(self, points): 250 | from pyForMetrix.metricCalculators.lidRmetrics import canopydensity 251 | return canopydensity.canopydensity_zpcum(points, self.num_groups) 252 | 253 | 254 | 255 | class MCalc_lidRmetrics_Lmoments(MetricCalculator): 256 | name = '_Lmoments' 257 | _names = ['L1', 'L2', 'L3', 'L4', 'Lskew', 'Lkurt', 'Lcoefvar'] 258 | 259 | def __call__(self, points): 260 | from pyForMetrix.metricCalculators.lidRmetrics import Lmoments 261 | outArray = np.full((len(self),), np.nan) 262 | outArray[:4] = Lmoments.Lmoments_moments(points) 263 | outArray[4:] = Lmoments.Lmoments_coefficients(points) # order: L_skew, L_kurt, L_CV 264 | return outArray 265 | class MCalc_lidRmetrics_lad(MetricCalculator): 266 | """ 267 | Following http://doi.org/10.1016/j.rse.2014.10.004 268 | """ 269 | name = '_lad' 270 | _names = ['lad_max', 'lad_mean', 'lad_cv', 'lad_min'] 271 | def __call__(self, points, dz=1): 272 | from pyForMetrix.metricCalculators.lidRmetrics import lad 273 | return lad.lad_lad(points, dz) 274 | class MCalc_lidRmetrics_interval(MetricCalculator): 275 | name = "_interval" 276 | def __init__(self, intervals=np.array([0, 0.15, 2, 5, 10, 20, 30])): 277 | self.intervals = intervals 278 | super(MCalc_lidRmetrics_interval, self).__init__() 279 | def get_names(self): 280 | return [f'pz_below_{q}' for q in self.intervals] 281 | def __call__(self, points): 282 | from pyForMetrix.metricCalculators.lidRmetrics import interval 283 | return np.array([interval.interval_p_below(points, threshold=X) for X in self.intervals]) 284 | class MCalc_lidRmetrics_rumple(MetricCalculator): 285 | name = '_rumple' 286 | _names = ['rumple'] 287 | def __call__(self, points, rumple_pixel_size=1): 288 | from pyForMetrix.metricCalculators.lidRmetrics import rumple 289 | return np.array([rumple.rumple_index(points, rumple_pixel_size)]) 290 | 291 | class MCalc_lidRmetrics_voxels(MetricCalculator): 292 | name = '_voxels' 293 | _names = ['vn', 'vFRall', 'vFRcanopy', 'vzrumple', 'vzsd', 'vzcv', 'OpenGapSpace', 'ClosedGapSpace', 'Euphotic', 'Oligophotic'] 294 | 295 | def __call__(self, points, voxel_size=1): 296 | from pyForMetrix.metricCalculators.lidRmetrics import voxels 297 | outArray = np.full((len(self),), np.nan) 298 | XVoxelCenter, XVoxelContains, idxVoxelUnique, XClosestIndex = voxels.create_voxelization(points, voxel_size) 299 | outArray[0] = voxels.voxels_vn(idxVoxelUnique) 300 | outArray[1] = voxels.voxels_vFRall(idxVoxelUnique) 301 | outArray[2] = voxels.voxels_vFRcanopy(idxVoxelUnique) 302 | outArray[3] = voxels.voxels_vzrumple(idxVoxelUnique, voxel_size) 303 | outArray[4] = voxels.voxels_vzsd(idxVoxelUnique, voxel_size) 304 | outArray[5] = voxels.voxels_vzcv(idxVoxelUnique, voxel_size) 305 | outArray[6:] = voxels.voxels_lefsky(idxVoxelUnique) 306 | return outArray 307 | 308 | class MCalc_lidRmetrics_kde(MetricCalculator): 309 | name = '_kde' 310 | _names = ['kde_peaks_count', 'kde_peaks_elev', 'kde_peaks_value'] 311 | 312 | def __call__(self, points, bw=2): 313 | from pyForMetrix.metricCalculators.lidRmetrics import kde 314 | count, elev, value = kde.kde_kde(points, bw) 315 | return np.array([count, np.mean(elev), np.mean(value)]) 316 | 317 | class MCalc_lidRmetrics_echo(MetricCalculator): 318 | name = '_echo' 319 | _names = ['pFirst', 'pIntermediate', 'pLast', 'pSingle', 'pMultiple'] 320 | 321 | def __call__(self, points): 322 | from pyForMetrix.metricCalculators.lidRmetrics import echo 323 | return np.array([ 324 | echo.echo_pFirst(points), 325 | echo.echo_pIntermediate(points), 326 | echo.echo_pLast(points), 327 | echo.echo_pSingle(points), 328 | echo.echo_pMultiple(points), 329 | ]) 330 | class MCalc_lidRmetrics_HOME(MetricCalculator): 331 | name = '_HOME' 332 | _names = ['HOME'] 333 | def __call__(self, points): 334 | from pyForMetrix.metricCalculators.lidRmetrics import HOME 335 | return np.array([ 336 | HOME.HOME_home(points), 337 | ]) -------------------------------------------------------------------------------- /src/pyForMetrix/metrix.py: -------------------------------------------------------------------------------- 1 | import abc 2 | import re 3 | import os 4 | import warnings 5 | import time 6 | import functools 7 | import multiprocessing 8 | import multiprocessing.shared_memory 9 | 10 | from deprecated import deprecated 11 | 12 | import numpy as np 13 | import pandas as pd 14 | import scipy 15 | import scipy.stats 16 | import tqdm 17 | import xarray 18 | 19 | from shapely.geometry import Polygon 20 | from matplotlib.path import Path as mplPath 21 | from laxpy.tree import LAXTree 22 | from laxpy.file import LAXParser 23 | import laspy 24 | 25 | from pyForMetrix.metricCalculators import MetricCalculator 26 | from pyForMetrix.utils.rasterizer import Rasterizer 27 | 28 | def parallel_raster_metrics_for_chunk(XVoxelCenter, XVoxelContains, inPoints, 29 | outArrayName, outArrayShape, outArrayType, 30 | raster_size, raster_min, 31 | perc, p_zabovex, 32 | progressbar): 33 | if progressbar is not None: 34 | progressbar.put((0, 1)) 35 | shm = multiprocessing.shared_memory.SharedMemory(outArrayName) 36 | outArray = np.ndarray(outArrayShape, dtype=outArrayType, buffer=shm.buf) 37 | for xcenter, ycenter, contains in zip(XVoxelCenter[0], XVoxelCenter[1], XVoxelContains): 38 | cellY = int((xcenter - raster_size / 2 - raster_min[0]) / raster_size) # note that xarray expects (y,x) 39 | cellX = int((ycenter - raster_size / 2 - raster_min[1]) / raster_size) 40 | points = inPoints[contains, :] 41 | cell_metrics = calc_standard_metrics(points, p_zabovex, perc, progressbar) 42 | outArray[cellX, cellY, :] = cell_metrics 43 | shm.close() 44 | if progressbar is not None: 45 | progressbar.put((0, -1)) 46 | def parallel_custom_raster_metrics_for_chunk(XVoxelCenter, XVoxelContains, inPoints, 47 | outArrayName, outArrayShape, outArrayType, 48 | raster_size, raster_min, 49 | progressbar, metric, metric_options): 50 | if progressbar is not None: 51 | progressbar.put((0, 1)) 52 | shm = multiprocessing.shared_memory.SharedMemory(outArrayName) 53 | outArray = np.ndarray(outArrayShape, dtype=outArrayType, buffer=shm.buf) 54 | for xcenter, ycenter, contains in zip(XVoxelCenter[0], XVoxelCenter[1], XVoxelContains): 55 | cellY = int((xcenter - raster_size / 2 - raster_min[0]) / raster_size) # note that xarray expects (y,x) 56 | cellX = int((ycenter - raster_size / 2 - raster_min[1]) / raster_size) 57 | points = {key: item[contains, ...] for key, item in inPoints.items()} 58 | 59 | out_metrics = [] 60 | for mx, mo in zip(metric, metric_options): 61 | cell_metrics = mx(points, **mo) 62 | out_metrics.append(cell_metrics) 63 | outArray[cellX, cellY, :] = np.concatenate(out_metrics) 64 | shm.close() 65 | if progressbar is not None: 66 | progressbar.put((0, -1)) 67 | 68 | 69 | def calc_standard_metrics(points, p_zabovex, perc, progressbar): 70 | if progressbar is not None: 71 | progressbar.put((1, 0)) 72 | outArray = np.full((8 + len(perc) + len(p_zabovex)), np.nan) 73 | with warnings.catch_warnings(): 74 | warnings.simplefilter("ignore") 75 | outArray[0] = points.shape[0] 76 | outArray[1] = np.prod(np.max(points[:, :2], axis=0) - np.min(points[:, :2], axis=0)) 77 | outArray[2] = np.mean(points[:, 2]) 78 | outArray[3] = np.std(points[:, 2]) 79 | outArray[4] = scipy.stats.skew(points[:, 2]) 80 | outArray[5] = scipy.stats.kurtosis(points[:, 2]) 81 | 82 | hist = np.histogramdd(points[:, 2], bins=20)[0] 83 | hist /= hist.sum() 84 | hist = hist.flatten() 85 | hist = hist[hist.nonzero()] 86 | outArray[6] = scipy.stats.entropy(hist) 87 | 88 | outArray[7] = np.count_nonzero(points[:, 2] > outArray[2]) / points.shape[0] 89 | data_pos = 8 + len(perc) 90 | outArray[8:data_pos] = np.percentile(points[:, 2], perc) 91 | for x in p_zabovex: 92 | outArray[data_pos] = np.count_nonzero(points[:, 2] > (x + np.min(points[:, 2]))) / points.shape[0] 93 | data_pos += 1 94 | return outArray 95 | 96 | 97 | def updatePbar(total, queue, maxProc, pbar_position): 98 | desc = "Computing raster metrics" 99 | pCount = 0 100 | pbar = tqdm.tqdm(total=total, ncols=150, desc=desc + " (%02d/%02d Process(es))" % (pCount, maxProc), position=pbar_position, 101 | colour='#94f19b') 102 | pbar.update(0) 103 | while True: 104 | inc, process = queue.get() 105 | pbar.update(inc) 106 | if process != 0: 107 | pCount += process 108 | pbar.set_description(desc + " (%02d/%02d Process(es))" % (pCount, maxProc)) 109 | 110 | 111 | class Metrics(abc.ABC): 112 | def calc_metrics(self): 113 | raise NotImplementedError 114 | 115 | def calc_metrics_parallel(self): 116 | raise NotImplementedError 117 | 118 | 119 | class RasterMetrics(Metrics): 120 | def __init__(self, points, raster_size, percentiles=np.arange(0, 101, 5), p_zabovex=None, silent=True, pbars=True, 121 | raster_min=None, raster_max=None, origin=None): 122 | """ 123 | Class to calculate metrics on a raster (cell) basis. 124 | 125 | Args: 126 | points: :class:`dict` containing keys 'points' and potentially other attributes, which are :numpy:ndarray s containing the points. 127 | raster_size: :class:`float` raster cell size used for calculation 128 | percentiles: deprecated 129 | p_zabovex: deprecated 130 | silent: deprecated 131 | pbars: :class:`bool` whether to show progress bars or not 132 | raster_min: :class:`numpy.ndarray` of shape `(2,)` with the minimum x/y coordinates for the raster (default: derive from point cloud) 133 | raster_max: :class:`numpy.ndarray` of shape `(2,)` with the maximum x/y coordinates for the raster (default: derive from point cloud) 134 | origin: :class:`numpy.ndarray` of shape `(2,)` with the origin x/y coordinates (pixel center) for the raster (default: same as `raster_min`) 135 | """ 136 | self.pbars = pbars 137 | self.perc = percentiles 138 | self.p_zabovex = p_zabovex if p_zabovex is not None else [] 139 | if not isinstance(self.p_zabovex, list): 140 | self.p_zabovex = [self.p_zabovex] 141 | self.raster_size = raster_size 142 | self.points = points 143 | coords = points['points'] 144 | self.raster_min = np.min(coords[:, 0:2], axis=0) if raster_min is None else raster_min 145 | self.raster_max = np.max(coords[:, 0:2], axis=0) if raster_max is None else raster_max 146 | self.origin = origin if origin is not None else self.raster_min 147 | 148 | n = ((self.raster_min - self.origin) // self.raster_size).astype(int) # find next integer multiple of origin 149 | self.raster_min = self.origin + n * self.raster_size 150 | 151 | self.raster_dims = ( 152 | int(np.ceil((self.raster_max[1] - self.raster_min[1]) / raster_size)), 153 | int(np.ceil((self.raster_max[0] - self.raster_min[0]) / raster_size))) 154 | ts = time.time() 155 | # np.warnings.filterwarnings('error', category=np.VisibleDeprecationWarning) 156 | r = Rasterizer(coords, (raster_size, raster_size)) 157 | XVoxelCenter, XVoxelContains, idxVoxelUnique, XClosestIndex = r.rasterize(origin=self.origin) 158 | if not silent: 159 | print(f"Rasterization complete in {(time.time() - ts):.2f} seconds.") 160 | self.XVoxelCenter = XVoxelCenter 161 | self.XVoxelContains = XVoxelContains 162 | 163 | def calc_custom_metrics(self, metrics: MetricCalculator, metric_options=None): 164 | """ 165 | Calculates the given metrics on the point cloud this class was initialized on. 166 | 167 | Args: 168 | metrics: a single :class:`pyForMetrix.metricCalculators.MetricCalculator` instance or a :class:`list` of such classes 169 | metric_options: a :class:`list` of :class:`dict`s with options (kwargs) for each `MetricCalculator`, or None. 170 | 171 | Returns: 172 | An :class:`xarray.Dataset` containing the metric(s) in a raster grid 173 | """ 174 | if not isinstance(metrics, list): 175 | metrics = [metrics] 176 | if metric_options is None: 177 | metric_options = [dict()] * len(metrics) 178 | 179 | num_feats = sum([len(m.get_names()) for m in metrics]) 180 | data = np.full((self.raster_dims + (num_feats, )), np.nan, dtype=float) 181 | 182 | for xcenter, ycenter, contains in zip(self.XVoxelCenter[0], self.XVoxelCenter[1], self.XVoxelContains): 183 | cellY = int((xcenter - self.raster_size / 2 - self.raster_min[0]) / self.raster_size) # note that xarray expects (y,x) 184 | cellX = int((ycenter - self.raster_size / 2 - self.raster_min[1]) / self.raster_size) 185 | points = {key: item[contains, ...] for key, item in self.points.items()} 186 | out_metrics = [] 187 | 188 | for metric, metric_option in zip(metrics, metric_options): 189 | cell_metrics = metric(points, **metric_option) 190 | out_metrics.append(cell_metrics) 191 | data[cellX, cellY, :] = np.concatenate(out_metrics) 192 | return self.convert_to_custom_data_array(data, metrics) 193 | 194 | 195 | def calc_custom_metrics_parallel(self, metrics, n_chunks=16, n_processes=4, pbar_position=0, 196 | multiprocessing_point_threshold=10_000, metric_options=None): 197 | """ 198 | Calculates the given metrics on the point cloud this class was initialized on, in parallel. 199 | Parallelization is achieved by spawning multiple processes for subsets of the raster cells. Note that 200 | it might be faster to parallelize over input datasets, if they are chunked. 201 | 202 | Args: 203 | metrics: see :func:`calc_custom_metrics` 204 | n_chunks: number of chunks to split the valid raster cells into (more chunks decrease memory usage) 205 | n_processes: number of processes to work on the chunks (more processes increase memory usage) 206 | pbar_position: deprecated 207 | multiprocessing_point_threshold: number of raster cells at which multiprocessing should be started. For 208 | relatively small datasets, the overhead of spawning extra processes outweights the benefit. Ideal setting 209 | depends on the features that are calculated 210 | metric_options: see :func:`calc_custom_metrics` 211 | 212 | Returns: 213 | An :class:`xarray.Dataset` containing the metric(s) in a raster grid 214 | 215 | """ 216 | if not isinstance(metrics, list): 217 | metrics = [metrics] 218 | if metric_options is None: 219 | metric_options = [dict()] * len(metrics) 220 | 221 | # if there are actually rather few voxels (e.g., < 10,000), single thread is faster due to less overhead 222 | if len(self.XVoxelCenter[0]) < multiprocessing_point_threshold: 223 | return self.calc_custom_metrics(metrics=metrics, metric_options=metric_options) 224 | 225 | num_feats = sum([len(m.get_names()) for m in metrics]) 226 | 227 | data = np.empty(self.raster_dims + (num_feats,), dtype=float) 228 | shm = multiprocessing.shared_memory.SharedMemory(create=True, size=data.nbytes) 229 | data_arr = np.ndarray(data.shape, dtype=data.dtype, buffer=shm.buf) 230 | data_arr[:] = np.nan 231 | 232 | XVoxelCenterChunks = np.array_split(np.array(self.XVoxelCenter), n_chunks, axis=-1) 233 | XVoxelContainsChunks = np.array_split(np.array(self.XVoxelContains, dtype=object), n_chunks) 234 | 235 | m = multiprocessing.Manager() 236 | if self.pbars: 237 | pbarQueue = m.Queue() 238 | pbarProc = multiprocessing.Process(target=updatePbar, args=(self.XVoxelCenter[0].shape[0], pbarQueue, n_processes, pbar_position)) 239 | pbarProc.start() 240 | else: 241 | pbarQueue = None 242 | 243 | pool = multiprocessing.Pool(processes=n_processes) 244 | processing_function = functools.partial(parallel_custom_raster_metrics_for_chunk, 245 | inPoints=self.points, 246 | outArrayName=shm.name, 247 | outArrayShape=data_arr.shape, 248 | outArrayType=data_arr.dtype, 249 | raster_size=self.raster_size, 250 | raster_min=self.raster_min, 251 | progressbar=pbarQueue, 252 | metric = metrics, 253 | metric_options = metric_options) 254 | pool.starmap(processing_function, zip(XVoxelCenterChunks, XVoxelContainsChunks), chunksize=1) 255 | data[:] = data_arr[:] 256 | shm.close() 257 | shm.unlink() 258 | if self.pbars: 259 | pbarProc.kill() 260 | return self.convert_to_custom_data_array(data, metrics) 261 | 262 | @deprecated(version="0.0.5", reason="This function is being replaced by calc_custom_metrics") 263 | def calc_metrics(self, 264 | progressbaropts=None, 265 | pbar_position=0, 266 | *args, **kwargs): 267 | if progressbaropts is None: 268 | progressbaropts = {'desc': 'Computing raster metrics ( Single Process)', 269 | 'ncols': 150, 270 | 'leave': False, 271 | 'colour': '#94f19b'} 272 | num_feats = len(self.perc) + 8 + len(self.p_zabovex) 273 | data = np.full(self.raster_dims + (num_feats, ), np.nan, dtype=float) 274 | 275 | for xcenter, ycenter, contains in zip(tqdm.tqdm(self.XVoxelCenter[0], position=pbar_position, **progressbaropts), self.XVoxelCenter[1], self.XVoxelContains): 276 | cellY = int((xcenter - self.raster_size / 2 - self.raster_min[0]) / self.raster_size) # note that xarray expects (y,x) 277 | cellX = int((ycenter - self.raster_size / 2 - self.raster_min[1]) / self.raster_size) 278 | points = self.points["points"][contains, :] 279 | cell_metrics = calc_standard_metrics(points, self.p_zabovex, self.perc, None) 280 | data[cellX, cellY, :] = cell_metrics 281 | return self.convert_to_data_array(data) 282 | 283 | def convert_to_custom_data_array(self, data, metrics): 284 | return xarray.DataArray(data, dims=('y', 'x', 'val'), 285 | coords={'y': np.arange(self.raster_min[1], self.raster_max[1], self.raster_size) + self.raster_size/2, 286 | # coords={'y': np.arange(self.raster_min[1], self.raster_max[1], self.raster_dims[1]) + self.raster_size/2, 287 | 'x': np.arange(self.raster_min[0], self.raster_max[0], self.raster_size) + self.raster_size/2, 288 | # 'x': np.linspace(self.raster_min[0], self.raster_max[0], self.raster_dims[0]) + self.raster_size/2, 289 | 'val': np.concatenate([m.get_names() for m in metrics]) 290 | }) 291 | 292 | @deprecated(version="0.0.5", reason="This function is being replaced by convert_to_custom_data_array") 293 | def convert_to_data_array(self, data): 294 | return xarray.DataArray(data, dims=('y', 'x', 'val'), 295 | coords={'y': np.arange(self.raster_min[1], self.raster_max[1], self.raster_size) + self.raster_size/2, 296 | 'x': np.arange(self.raster_min[0], self.raster_max[0], self.raster_size) + self.raster_size/2, 297 | 'val': [ 298 | 'n', 299 | 'area', 300 | 'meanZ', 301 | 'stdZ', 302 | 'skewZ', 303 | 'kurtZ', 304 | 'entropyZ', 305 | 'nAboveMean' 306 | ] + 307 | [f"perc{p}Z" for p in self.perc] + 308 | [f"pAboveX{x}Z" for x in self.p_zabovex] 309 | }) 310 | 311 | @deprecated(version="0.0.5", reason="This function is being replaced by calc_custom_metrics_parallel") 312 | def calc_metrics_parallel(self, n_chunks=16, n_processes=4, pbar_position=0, *args, **kwargs): 313 | # if there are actually rather few voxels (e.g., < 10,000), single thread is faster due to less overhead 314 | if len(self.XVoxelCenter[0]) < 10_000: 315 | return self.calc_metrics(pbar_position=pbar_position) 316 | 317 | num_feats = len(self.perc) + 8 + len(self.p_zabovex) 318 | 319 | data = np.empty(self.raster_dims + (num_feats,), dtype=float) 320 | shm = multiprocessing.shared_memory.SharedMemory(create=True, size=data.nbytes) 321 | data_arr = np.ndarray(data.shape, dtype=data.dtype, buffer=shm.buf) 322 | data_arr[:] = np.nan 323 | 324 | XVoxelCenterChunks = np.array_split(np.array(self.XVoxelCenter), n_chunks, axis=-1) 325 | XVoxelContainsChunks = np.array_split(np.array(self.XVoxelContains, dtype=object), n_chunks) 326 | 327 | m = multiprocessing.Manager() 328 | if self.pbars: 329 | pbarQueue = m.Queue() 330 | pbarProc = multiprocessing.Process(target=updatePbar, args=(self.XVoxelCenter[0].shape[0], pbarQueue, n_processes, pbar_position)) 331 | pbarProc.start() 332 | else: 333 | pbarQueue = None 334 | 335 | pool = multiprocessing.Pool(processes=n_processes) 336 | processing_function = functools.partial(parallel_raster_metrics_for_chunk, 337 | inPoints=self.coords, 338 | outArrayName=shm.name, 339 | outArrayShape=data_arr.shape, 340 | outArrayType=data_arr.dtype, 341 | raster_size=self.raster_size, 342 | raster_min=self.raster_min, 343 | perc=self.perc, 344 | p_zabovex=self.p_zabovex, 345 | progressbar=pbarQueue) 346 | pool.starmap(processing_function, zip(XVoxelCenterChunks, XVoxelContainsChunks), chunksize=1) 347 | data[:] = data_arr[:] 348 | shm.close() 349 | shm.unlink() 350 | if self.pbars: 351 | pbarProc.kill() 352 | return self.convert_to_data_array(data) 353 | 354 | 355 | class PlotMetrics(Metrics): 356 | def __init__(self, lasfiles, plot_polygons, silent=True, pbars=True): 357 | """ 358 | Class to calculate metrics on a plot (polygon) basis 359 | 360 | Args: 361 | lasfiles: :class:`list` of input las-Files to consider. Note that the scanning (finding the points inside 362 | the plots) can be sped up siginificantly by providing `.lax`-Files, which can be generated e.g. using 363 | lasindex, part of the LASTools (https://rapidlasso.com/lastools/, proprietory software with free/open 364 | components). 365 | plot_polygons: :class:`geopandas.GeoDataFrame` array containing the geometries (polygons) of interest 366 | silent: :class:`boolean` whether to print output or not 367 | pbars: :class:`boolean` whether to display progress bars or not 368 | """ 369 | self.lasfiles = lasfiles 370 | self.plot_polygons = plot_polygons 371 | self.silent = silent 372 | self.pbars = pbars 373 | 374 | # find points that are in the polygons 375 | self.points = [ 376 | { 377 | 'points': np.empty((0, 3), dtype=float), 378 | 'echo_number': np.empty((0, ), dtype=int), 379 | 'number_of_echoes': np.empty((0, ), dtype=int), 380 | 'intensity': np.empty((0, ), dtype=float), 381 | 'classification': np.empty((0, ), dtype=int), 382 | 'pt_src_id': np.empty((0, ), dtype=int), 383 | 'scan_angle_rank': np.empty((0, ), dtype=int), 384 | } for i in range(len(plot_polygons)) 385 | ] 386 | if not isinstance(lasfiles, list): 387 | self.lasfiles = [lasfiles] 388 | 389 | for lasfile in tqdm.tqdm(self.lasfiles, ncols=150, desc='Scanning input files to find polygon plots'): 390 | laxfile = re.sub(r'^(.*).la[sz]$', r'\1.lax', str(lasfile)) 391 | inFile = laspy.read(lasfile) 392 | if not os.path.exists(laxfile): 393 | print(f"File {lasfile} does not have a corresponding .lax index file. Expect much slower performance.") 394 | print(f"Run `lasindex -i {lasfile}` to create an index file (requires LAStools installation)") 395 | else: 396 | parser = LAXParser(laxfile) 397 | tree = LAXTree(parser) 398 | for q_id, q in plot_polygons.iterrows(): 399 | q_polygon = q.geometry 400 | candidate_indices = [] 401 | 402 | if not os.path.exists(laxfile): 403 | candidate_indices = [np.arange(0, inFile.header.point_count)] # brute force through all points 404 | else: 405 | minx, maxx, miny, maxy = parser.bbox 406 | bbox = Polygon([(minx, miny), (minx, maxy), (maxx, maxy), (maxx, miny)]) 407 | if not q_polygon.intersects(bbox): 408 | continue 409 | for cell_index, polygon in tree.cell_polygons.items(): # use quadtree for preselection 410 | if q_polygon.intersects(polygon): 411 | candidate_indices.append(parser.create_point_indices(cell_index)) 412 | 413 | if len(candidate_indices) > 0: # brute force the rest 414 | candidate_indices = np.unique(np.concatenate(candidate_indices)) 415 | p = mplPath(list(q_polygon.exterior.coords)) 416 | candidate_points = np.vstack((inFile.x[candidate_indices], inFile.y[candidate_indices])).T 417 | is_inside = p.contains_points(candidate_points) 418 | points_sel = np.argwhere(is_inside).flatten() 419 | final_selection = candidate_indices[points_sel] #[::nth_point_subsample] 420 | self.points[q_id]['points'] = np.concatenate((self.points[q_id]['points'], inFile.xyz[final_selection, :]), axis=0) 421 | self.points[q_id]['echo_number'] = np.concatenate((self.points[q_id]['echo_number'], inFile.return_number[final_selection]), axis=0) 422 | self.points[q_id]['number_of_echoes'] = np.concatenate((self.points[q_id]['number_of_echoes'], inFile.number_of_returns[final_selection]), axis=0) 423 | self.points[q_id]['intensity'] = np.concatenate((self.points[q_id]['intensity'], inFile.intensity[final_selection]), axis=0) 424 | self.points[q_id]['classification'] = np.concatenate((self.points[q_id]['classification'], inFile.classification[final_selection]), axis=0) 425 | self.points[q_id]['pt_src_id'] = np.concatenate((self.points[q_id]['pt_src_id'], inFile.pt_src_id[final_selection]), axis=0) 426 | self.points[q_id]['scan_angle_rank'] = np.concatenate((self.points[q_id]['scan_angle_rank'], 427 | inFile.scan_angle_rank[final_selection] if hasattr(inFile, 'scan_angle_rank') else inFile.scan_angle[final_selection] 428 | ), axis=0) 429 | 430 | def calc_custom_metrics(self, metrics: MetricCalculator, metric_options=None): 431 | """ 432 | Calculates given metrics for points contained in the polygons given during construction of this class. 433 | 434 | Args: 435 | metrics: a single :class:`pyForMetrix.metricCalculators.MetricCalculator` instance or a :class:`list` of such classes 436 | metric_options: a :class:`list` of :class:`dict` s with options (kwargs) for each `MetricCalculator`, or None. 437 | 438 | Returns: 439 | a :class:`pandas.DataFrame` containing the metrics for each polygon in the input. 440 | 441 | """ 442 | if metric_options is None: 443 | metric_options = dict() 444 | out_metrics = np.full((len(self.plot_polygons), sum(map(lambda x: len(x.get_names()), metrics))), np.nan) 445 | # plot_names = [] 446 | if not self.silent: 447 | print('Calculating features for plot polygons...', end='') 448 | for q_id, q in tqdm.tqdm(self.plot_polygons.iterrows(), f"Calculating metrics", total=len(self.plot_polygons)): 449 | points_in_poly = self.points[q_id] 450 | if len(points_in_poly['points']) > 0: 451 | out_metrics[q_id] = np.concatenate(list(map(lambda x: x(points_in_poly, **metric_options), metrics))) 452 | out_data = pd.DataFrame(out_metrics, # index=plot_names, 453 | columns= 454 | np.concatenate(list(map(lambda x: x.get_names(), metrics))) 455 | ) 456 | 457 | if not self.silent: 458 | print(' [done]') 459 | return out_data 460 | 461 | def calc_custom_metrics_stripwise(self, metrics: MetricCalculator, metric_options=None): 462 | if metric_options is None: 463 | metric_options = dict() 464 | out_metrics = [] 465 | meta_metrics = [] 466 | # plot_names = [] 467 | if not self.silent: 468 | print('Calculating features for plot polygons...', end='') 469 | for q_id, q in tqdm.tqdm(self.plot_polygons.iterrows(), f"Calculating metrics", total=len(self.plot_polygons)): 470 | points_in_poly = self.points[q_id] 471 | unique_strips = np.unique(points_in_poly['pt_src_id']) 472 | for strip in unique_strips: 473 | points_in_poly_and_strip = {k: v[points_in_poly['pt_src_id'] == strip] for k, v in points_in_poly.items()} 474 | if points_in_poly_and_strip['points'].shape[0] > 3: 475 | areaPoly = q.geometry.area 476 | areaPc = scipy.spatial.ConvexHull(points_in_poly_and_strip['points'][:, :2]).volume 477 | out_metrics.append(np.concatenate(list(map(lambda x: x(points_in_poly_and_strip, **metric_options), metrics)))) 478 | meta_metrics.append(np.array([q_id, areaPc, areaPoly, len(points_in_poly_and_strip['points']), strip, 479 | np.min(points_in_poly_and_strip['scan_angle_rank']), 480 | np.max(points_in_poly_and_strip['scan_angle_rank']), 481 | np.mean(points_in_poly_and_strip['scan_angle_rank']), 482 | ])) 483 | 484 | out_data = pd.DataFrame(out_metrics, # index=plot_names, 485 | columns= 486 | np.concatenate(list(map(lambda x: x.get_names(), metrics))) 487 | ) 488 | out_meta = pd.DataFrame(meta_metrics, columns=['plot_id', 'areaPC', 'areaPoly', 'numPts', 'stripid', 'minSA', 'maxSA', 'meanSA']) 489 | 490 | if not self.silent: 491 | print(' [done]') 492 | return out_data, out_meta 493 | 494 | @deprecated(version="0.0.5", reason="This function is being replaced by calc_custom_metrics") 495 | 496 | def calc_metrics(self): 497 | out_metrics = np.full((len(self.plot_polygons), 8 + len(self.perc) + len(self.p_zabovex)), np.nan) 498 | # plot_names = [] 499 | if not self.silent: 500 | print('Calculating features for plot polygons...', end='') 501 | for q_id, q in self.plot_polygons.iterrows(): 502 | # plot_names.append(q.PLOT) 503 | if self.coords[q_id].shape[0] > 0: 504 | out_metrics[q_id] = calc_standard_metrics(self.coords[q_id], self.p_zabovex, self.perc, None) 505 | out_data = pd.DataFrame(out_metrics, # index=plot_names, 506 | columns= 507 | [ 508 | 'n', 509 | 'area', 510 | 'meanZ', 511 | 'stdZ', 512 | 'skewZ', 513 | 'kurtZ', 514 | 'entropyZ', 515 | 'nAboveMean' 516 | ] + 517 | [f"perc{p}Z" for p in self.perc] + 518 | [f"pAboveX{x}Z" for x in self.p_zabovex] 519 | ) 520 | 521 | if not self.silent: 522 | print(' [done]') 523 | return out_data 524 | 525 | class FileMetrics(Metrics): 526 | def __init__(self, lasfiles, silent=True, pbars=True): 527 | """ 528 | Class to calculate metrics on a file basis 529 | 530 | Args: 531 | lasfiles: :class:`list` of input las-Files to consider. Note that the scanning (finding the points inside 532 | the plots) can be sped up siginificantly by providing `.lax`-Files, which can be generated e.g. using 533 | lasindex, part of the LASTools (https://rapidlasso.com/lastools/, proprietory software with free/open 534 | components). 535 | silent: :class:`boolean` whether to print output or not 536 | pbars: :class:`boolean` whether to display progress bars or not 537 | """ 538 | self.lasfiles = lasfiles 539 | self.silent = silent 540 | self.pbars = pbars 541 | 542 | # find points that are in the polygons 543 | self.points = [] 544 | if not isinstance(lasfiles, list): 545 | self.lasfiles = [lasfiles] 546 | 547 | for lasfile in tqdm.tqdm(self.lasfiles, ncols=150, desc='Loading input files'): 548 | inFile = laspy.read(lasfile) 549 | 550 | self.points.append({ 551 | 'points': inFile.xyz, 552 | 'echo_number': inFile.return_number, 553 | 'number_of_echoes': inFile.number_of_returns, 554 | 'intensity': inFile.intensity, 555 | 'classification': inFile.classification, 556 | 'pt_src_id': inFile.pt_src_id, 557 | 'scan_angle_rank': inFile.scan_angle_rank 558 | }) 559 | 560 | def calc_custom_metrics(self, metrics: MetricCalculator, metric_options=None): 561 | """ 562 | Calculates given metrics for points contained in the polygons given during construction of this class. 563 | 564 | Args: 565 | metrics: a single :class:`pyForMetrix.metricCalculators.MetricCalculator` instance or a :class:`list` of such classes 566 | metric_options: a :class:`list` of :class:`dict` s with options (kwargs) for each `MetricCalculator`, or None. 567 | 568 | Returns: 569 | a :class:`pandas.DataFrame` containing the metrics for each polygon in the input. 570 | 571 | """ 572 | if metric_options is None: 573 | metric_options = dict() 574 | out_metrics = np.full((len(self.points), sum(map(lambda x: len(x.get_names()), metrics))), np.nan) 575 | # plot_names = [] 576 | if not self.silent: 577 | print('Calculating features for plot files...', end='') 578 | for q_id, q in tqdm.tqdm(self.lasfiles, f"Calculating metrics", total=len(self.lasfiles)): 579 | points_in_file = self.points[q_id] 580 | if len(points_in_file['points']) > 0: 581 | out_metrics[q_id] = np.concatenate(list(map(lambda x: x(points_in_file, **metric_options), metrics))) 582 | out_data = pd.DataFrame(out_metrics, # index=plot_names, 583 | columns= 584 | np.concatenate(list(map(lambda x: x.get_names(), metrics))) 585 | ) 586 | 587 | if not self.silent: 588 | print(' [done]') 589 | return out_data 590 | 591 | 592 | if __name__ == '__main__': 593 | pass -------------------------------------------------------------------------------- /src/pyForMetrix/normalizer.py: -------------------------------------------------------------------------------- 1 | import enum 2 | 3 | import numpy as np 4 | import tqdm 5 | from scipy.spatial import cKDTree 6 | 7 | from pyForMetrix.utils.rasterizer import Rasterizer 8 | 9 | class NormalizeMode(enum.Enum): 10 | CylinderMode = 0 11 | CellMode = 1 12 | 13 | def normalize(points, distance=3, mode=NormalizeMode.CellMode, percentile=5, show_progress=True, add_as_entry=False): 14 | """ 15 | Function to normalize a point cloud. 16 | Args: 17 | points: :class:dict with a key 'points', which contains a n x 3 :class:numpy.ndarray with point coordinates 18 | distance: :class:int, the search radius or the cell size (depending on the `mode`) 19 | mode: :class:NormalizeMode 20 | percentile: :class:float, which percentile to use as 'ground' (default: 5, 0<=value<=100) 21 | show_progress: :class:bool, whether to print progress or not. 22 | add_as_entry: :class:bool, whether to add the normalized height as an entry to the input dictionary (key `nZ`) 23 | or to overwrite z coordinates (default) 24 | 25 | Returns: a pointer to the input `points` :class:dict, with an added key `nZ` containing the normalized height 26 | (in case add_as_entry is set to `True`), or with the z-Value overwritten (default). 27 | 28 | """ 29 | nZ = np.copy(points['points'][:, 2]) 30 | 31 | if mode == NormalizeMode.CylinderMode: 32 | tree = cKDTree(points['points'][:, 0:2]) # build kD-Tree on x/z coordinates 33 | if show_progress: 34 | print("Building kD-Tree for point normalization...") 35 | neighbours = tree.query_ball_tree(tree, r=distance) 36 | # get percentile for normalization 37 | it = tqdm.tqdm(enumerate(neighbours), desc="Normalizing LiDAR points") if show_progress else enumerate(neighbours) 38 | for nIdx, neighbour in it: 39 | perc = np.percentile(neighbour, percentile) 40 | nZ[nIdx] -= perc 41 | elif mode == NormalizeMode.CellMode: 42 | ras = Rasterizer(points['points'][:, 0:2], raster_size=distance) 43 | if show_progress: 44 | print("Rasterizing for point normalization...") 45 | _, XVoxelContains, _, _ = ras.rasterize() 46 | it = tqdm.tqdm(XVoxelContains, desc="Normalizing LiDAR points") if show_progress else XVoxelContains 47 | for contains in it: 48 | # get percentile for normalization 49 | perc = np.percentile(points['points'][contains, 2], percentile) 50 | nZ[contains] -= perc 51 | else: 52 | raise NotImplementedError(f"Unknown NormalizeMode: '{mode}'. " 53 | f"Supported modes: {', '.join([mode.name for mode in NormalizeMode])}") 54 | 55 | points['points'][:,2] = nZ 56 | return points 57 | 58 | 59 | if __name__ == '__main__': 60 | from pyForMetrix.metricCalculators import MetricCalculator 61 | import warnings 62 | 63 | class CHMmetric(MetricCalculator): 64 | name = "chm" 65 | def get_names(self): 66 | return [ 67 | 'chm' 68 | ] 69 | 70 | def __call__(self, points_in_poly: dict): 71 | points = points_in_poly['points'] 72 | outArray = np.full((len(self),), np.nan) 73 | with warnings.catch_warnings(): 74 | warnings.simplefilter("ignore") 75 | outArray[0] = np.max(points[:, 2]) 76 | return outArray 77 | 78 | 79 | import laspy 80 | file = laspy.read(r"C:\Users\Lukas\Documents\Projects\pyForMetrix\demo\las_623_5718_1_th_2014-2019.laz") 81 | points = { 82 | 'points': file.xyz 83 | } 84 | points = normalize(points, percentile=1) 85 | print(np.ptp(points['points'][:, 2])) 86 | print(np.ptp(points['nZ'])) 87 | 88 | origZ = points['points'][:, 2].copy() 89 | 90 | from pyForMetrix.metrix import RasterMetrics 91 | for z in (origZ, points['points'][:, 2]): 92 | pts = points.copy() 93 | pts['points'][:, 2] = z 94 | rm = RasterMetrics(pts, raster_size=5) 95 | mc = CHMmetric() 96 | rasters = rm.calc_custom_metrics(mc) 97 | 98 | from matplotlib import pyplot as plt 99 | plt.imshow(rasters.sel(val='chm')) 100 | plt.show() -------------------------------------------------------------------------------- /src/pyForMetrix/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lwiniwar/pyForMetrix/736d0de2faa156511cd97f8a1a0e7e6e06b6e5ea/src/pyForMetrix/utils/__init__.py -------------------------------------------------------------------------------- /src/pyForMetrix/utils/rasterizer.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | 3 | import functools 4 | 5 | import numpy as np 6 | 7 | class Rasterizer: 8 | def __init__(self, data, raster_size=(1, 1), method="random"): 9 | """ 10 | 11 | Args: 12 | data: 13 | raster_size: 14 | method: 15 | """ 16 | self.data = data 17 | if type(raster_size) is not tuple: 18 | raster_size = (raster_size, raster_size) 19 | self.voxel_size = raster_size 20 | self.method = method 21 | 22 | def rasterize(self, origin=None): 23 | """ 24 | Function to rasterize point cloud data 25 | Adapted from Glira (https://github.com/pglira/Point_cloud_tools_for_Matlab/ 26 | blob/master/classes/4pointCloud/uniformSampling.m) 27 | 28 | Args: 29 | origin: 30 | 31 | Returns: 32 | """ 33 | # No.of points 34 | noPoi = self.data.shape[0] 35 | 36 | if origin is None: 37 | # Find voxel centers 38 | # Point with smallest coordinates 39 | minPoi = np.min(self.data, axis=0) 40 | 41 | # Rounded local origin for voxel structure 42 | # (voxels of different pcs have coincident voxel centers if mod(100, voxelSize) == 0) 43 | # localOrigin = np.floor(minPoi / 100) * 100 44 | localOrigin = np.floor(minPoi / 1) * 1 45 | else: 46 | localOrigin = origin 47 | 48 | # Find 3 - dimensional indices of voxels in which points are lying 49 | idxVoxel = np.array([np.floor((self.data[:, 0] - localOrigin[0]) / self.voxel_size[0]), 50 | np.floor((self.data[:, 1] - localOrigin[1]) / self.voxel_size[1])]).T 51 | 52 | # Remove multiple voxels 53 | idxVoxelUnique, ic = np.unique(idxVoxel, axis=0, 54 | return_inverse=True) # ic contains "voxel index" for each point 55 | 56 | # Coordinates of voxel centers 57 | XVoxelCenter = [localOrigin[0] + self.voxel_size[0] / 2 + idxVoxelUnique[:, 0] * self.voxel_size[0], 58 | localOrigin[1] + self.voxel_size[1] / 2 + idxVoxelUnique[:, 1] * self.voxel_size[1]] 59 | 60 | # No.of voxel(equal to no.of selected points) 61 | noVoxel = len(XVoxelCenter[0]) 62 | 63 | # Prepare list for every output voxel 64 | XVoxelContains = [[] for i in range(noVoxel)] 65 | XClosestIndex = np.full((noVoxel,), np.nan, dtype=float) 66 | 67 | # Select points nearest to voxel centers - -------------------------------------- 68 | 69 | # Sort indices and points( in order to find points inside of voxels very fast in the next loop) 70 | idxSort = np.argsort(ic) 71 | ic = ic[idxSort] 72 | 73 | data_sorted = self.data[idxSort, :] 74 | idxJump, = np.nonzero(np.diff(ic)) 75 | idxJump += 1 76 | 77 | # Example (3 voxel) 78 | # ic = [1 1 1 2 2 2 3]'; 79 | # diff(ic) = [0 0 1 0 0 1]'; 80 | # idxJump = [3 6]'; 81 | # 82 | # idxInVoxel = [1 2 3]; for voxel 1 83 | # idxInVoxel = [4 5 6]; for voxel 2 84 | # idxInVoxel = [7]; for voxel 3 85 | 86 | for i in range(noVoxel): 87 | # Find indices of points inside of voxel(very, very fast this way) 88 | if i == 0: 89 | if i == noVoxel - 1: 90 | idxInVoxel = slice(0, noPoi) 91 | else: 92 | idxInVoxel = slice(0, idxJump[i]) 93 | elif i == noVoxel - 1: 94 | idxInVoxel = slice(idxJump[i - 1], noPoi) 95 | else: 96 | idxInVoxel = slice(idxJump[i - 1], idxJump[i]) 97 | 98 | # Fill voxel information 99 | XVoxelContains[i] = np.array(idxSort[idxInVoxel], dtype=int) 100 | 101 | # Get point closest to voxel center 102 | if self.method == "closest": 103 | distsSq = ((data_sorted[idxInVoxel, 0] - XVoxelCenter[0][i]) ** 2 + 104 | (data_sorted[idxInVoxel, 1] - XVoxelCenter[1][i]) ** 2) 105 | closestIdxInVoxel = np.argmin(distsSq) 106 | XClosestIndex[i] = idxSort[idxInVoxel.start + closestIdxInVoxel] 107 | elif self.method == "random": 108 | XClosestIndex[i] = np.random.choice(XVoxelContains[i]) 109 | 110 | return XVoxelCenter, XVoxelContains, idxVoxelUnique, XClosestIndex 111 | 112 | def to_matrix(self, reducer=np.mean): 113 | # if origin is None: 114 | # origin = np.floor(minPoi / 1) * 1 115 | assert self.voxel_size[0] == self.voxel_size[1], "to_matrix only works with square pixels" 116 | cells = self.voxel_size[0] 117 | XVoxelCenter, XVoxelContains, idxVoxelUnique, XClosestIndex = self.rasterize() 118 | min_cell_x = np.min(XVoxelCenter[0]) 119 | min_cell_y = np.min(XVoxelCenter[1]) 120 | data_range_x = int((np.max(XVoxelCenter[0]) - np.min(XVoxelCenter[0])) / cells) + 1 121 | data_range_y = int((np.max(XVoxelCenter[1]) - np.min(XVoxelCenter[1])) / cells) + 1 122 | data_z = np.full((data_range_x, data_range_y), np.nan) 123 | data_x = np.arange(np.min(XVoxelCenter[0]), np.max(XVoxelCenter[0]) + cells, cells) 124 | data_y = np.arange(np.min(XVoxelCenter[1]), np.max(XVoxelCenter[1]) + cells, cells) 125 | for xcenter, ycenter, contains in zip(XVoxelCenter[0], XVoxelCenter[1], XVoxelContains): 126 | cellX = int((xcenter - min_cell_x) / cells) 127 | cellY = int((ycenter - min_cell_y) / cells) 128 | data_z[cellX, cellY] = reducer(self.data[contains, 2]) 129 | return data_x, data_y, data_z 130 | 131 | 132 | def plot_result(data, voxelIdx, closestIdx, vox_size_x, vox_size_y, vox_size_z): 133 | import matplotlib.pyplot as plt 134 | # This import registers the 3D projection, but is otherwise unused. 135 | from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import 136 | mins, maxes = np.min(voxelIdx, axis=0), np.max(voxelIdx, axis=0) 137 | x_range = int((maxes[0] - mins[0]) / 1) + 1 138 | y_range = int((maxes[1] - mins[1]) / 1) + 1 139 | z_range = int((maxes[2] - mins[2]) / 1) + 1 140 | # prepare some coordinates 141 | voxels = np.zeros((x_range, y_range, z_range), np.bool) 142 | x, y, z = np.indices([v + 1 for v in voxels.shape]).astype(float) 143 | x = x * vox_size_x # - vox_size_x/2 144 | y = y * vox_size_y # - vox_size_y/2 145 | z = z * vox_size_z # - vox_size_z/2 146 | # create binary representation from point list 147 | for voxIdx in voxelIdx: 148 | idxlist = (voxIdx - mins).astype(int).tolist() 149 | voxels[idxlist[0], idxlist[1], idxlist[2]] = True 150 | # set the colors of each object 151 | colors = np.empty(voxels.shape, dtype=object) 152 | colors[voxels] = "red" 153 | # and plot everything 154 | fig = plt.figure() 155 | ax = fig.gca(projection='3d') 156 | ax.voxels(x, y, z, voxels, facecolors=colors, edgecolor='k', alpha=0.01) 157 | data -= np.floor(np.min(data, axis=0)) 158 | Xr = data[:, 0] 159 | X = data[closestIdx, 0] 160 | Yr = data[:, 1] 161 | Y = data[closestIdx, 1] 162 | Zr = data[:, 2] 163 | Z = data[closestIdx, 2] 164 | ax.scatter(X, Y, Z, facecolor='b', s=2, linewidths=0) 165 | ax.scatter(Xr, Yr, Zr, facecolor='g', s=0.1, linewidths=0) 166 | max_range = np.array([X.max() - X.min(), Y.max() - Y.min(), Z.max() - Z.min()]).max() 167 | Xb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][0].flatten() + 0.5 * (X.max() + X.min()) 168 | Yb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][1].flatten() + 0.5 * (Y.max() + Y.min()) 169 | Zb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][2].flatten() + 0.5 * (Z.max() + Z.min()) 170 | # Comment or uncomment following both lines to test the fake bounding box: 171 | for xb, yb, zb in zip(Xb, Yb, Zb): 172 | ax.plot([xb], [yb], [zb], 'w') 173 | plt.show() 174 | 175 | def save_subsample_pcloud(data, closestIdx, fname, delim=","): 176 | data_subs = data[closestIdx, :] 177 | np.savetxt(fname, data_subs, delimiter=delim, fmt="%.3f") 178 | 179 | if __name__ == '__main__': 180 | pass -------------------------------------------------------------------------------- /src/pyForMetrix/utils/voxelizer.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | import numpy as np 3 | 4 | 5 | class Voxelizer: 6 | def __init__(self, data, voxel_size=(1, 1, 1), method="random"): 7 | self.data = data 8 | if type(voxel_size) is not tuple: 9 | voxel_size = (voxel_size, voxel_size, voxel_size) 10 | self.voxel_size = voxel_size 11 | self.method = method 12 | 13 | def voxelize(self, origin=None): 14 | """ 15 | Function to voxelize point cloud data 16 | Adapted from Glira (https://github.com/pglira/Point_cloud_tools_for_Matlab/ 17 | blob/master/classes/4pointCloud/uniformSampling.m) 18 | 19 | :return: 20 | """ 21 | # No.of points 22 | noPoi = self.data.shape[0] 23 | 24 | if origin is None: 25 | # Find voxel centers 26 | # Point with smallest coordinates 27 | minPoi = np.min(self.data, axis=0) 28 | 29 | # Rounded local origin for voxel structure 30 | # (voxels of different pcs have coincident voxel centers if mod(100, voxelSize) == 0) 31 | # localOrigin = np.floor(minPoi / 100) * 100 32 | localOrigin = np.floor(minPoi / 1) * 1 33 | else: 34 | localOrigin = origin 35 | 36 | # Find 3 - dimensional indices of voxels in which points are lying 37 | idxVoxel = np.array([np.floor((self.data[:, 0] - localOrigin[0]) / self.voxel_size[0]), 38 | np.floor((self.data[:, 1] - localOrigin[1]) / self.voxel_size[1]), 39 | np.floor((self.data[:, 2] - localOrigin[2]) / self.voxel_size[2])]).T 40 | 41 | # Remove multiple voxels 42 | idxVoxelUnique, ic = np.unique(idxVoxel, axis=0, 43 | return_inverse=True) # ic contains "voxel index" for each point 44 | 45 | # Coordinates of voxel centers 46 | XVoxelCenter = [localOrigin[0] + self.voxel_size[0] / 2 + idxVoxelUnique[:, 0] * self.voxel_size[0], 47 | localOrigin[1] + self.voxel_size[1] / 2 + idxVoxelUnique[:, 1] * self.voxel_size[1], 48 | localOrigin[2] + self.voxel_size[2] / 2 + idxVoxelUnique[:, 2] * self.voxel_size[2]] 49 | 50 | # No.of voxel(equal to no.of selected points) 51 | noVoxel = len(XVoxelCenter[0]) 52 | 53 | # Prepare list for every output voxel 54 | XVoxelContains = [[] for i in range(noVoxel)] 55 | XClosestIndex = np.full((noVoxel,), np.nan, dtype=float) 56 | 57 | # Select points nearest to voxel centers - -------------------------------------- 58 | 59 | # Sort indices and points( in order to find points inside of voxels very fast in the next loop) 60 | idxSort = np.argsort(ic) 61 | ic = ic[idxSort] 62 | 63 | data_sorted = self.data[idxSort, :] 64 | idxJump, = np.nonzero(np.diff(ic)) 65 | idxJump += 1 66 | 67 | # Example (3 voxel) 68 | # ic = [1 1 1 2 2 2 3]'; 69 | # diff(ic) = [0 0 1 0 0 1]'; 70 | # idxJump = [3 6]'; 71 | # 72 | # idxInVoxel = [1 2 3]; for voxel 1 73 | # idxInVoxel = [4 5 6]; for voxel 2 74 | # idxInVoxel = [7]; for voxel 3 75 | 76 | for i in range(noVoxel): 77 | # Find indices of points inside of voxel(very, very fast this way) 78 | if i == 0: 79 | if i == noVoxel - 1: 80 | idxInVoxel = slice(0, noPoi) 81 | else: 82 | idxInVoxel = slice(0, idxJump[i]) 83 | elif i == noVoxel - 1: 84 | idxInVoxel = slice(idxJump[i - 1], noPoi) 85 | else: 86 | idxInVoxel = slice(idxJump[i - 1], idxJump[i]) 87 | 88 | # Fill voxel information 89 | XVoxelContains[i] = np.array(idxSort[idxInVoxel], dtype=int) 90 | 91 | # Get point closest to voxel center 92 | if self.method == "closest": 93 | distsSq = ((data_sorted[idxInVoxel, 0] - XVoxelCenter[0][i]) ** 2 + 94 | (data_sorted[idxInVoxel, 1] - XVoxelCenter[1][i]) ** 2 + 95 | (data_sorted[idxInVoxel, 2] - XVoxelCenter[2][i]) ** 2) 96 | closestIdxInVoxel = np.argmin(distsSq) 97 | XClosestIndex[i] = idxSort[idxInVoxel.start + closestIdxInVoxel] 98 | elif self.method == "random": 99 | XClosestIndex[i] = np.random.choice(XVoxelContains[i]) 100 | 101 | return XVoxelCenter, XVoxelContains, idxVoxelUnique, XClosestIndex -------------------------------------------------------------------------------- /tests/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lwiniwar/pyForMetrix/736d0de2faa156511cd97f8a1a0e7e6e06b6e5ea/tests/.gitkeep -------------------------------------------------------------------------------- /tests/requirements.txt: -------------------------------------------------------------------------------- 1 | geopandas 2 | laspy[laszip] 3 | wget -------------------------------------------------------------------------------- /tests/test_plot_metrics.py: -------------------------------------------------------------------------------- 1 | import geopandas, pathlib 2 | datadir = pathlib.Path(__file__).parent / '../demo' 3 | 4 | def ensure_netzkater_data(): 5 | import os, wget, zipfile 6 | if not os.path.exists(datadir / 'las_623_5718_1_th_2014-2019.laz'): 7 | if not os.path.exists(datadir / 'data_netzkater.zip'): 8 | print('Downloading file') 9 | wget.download( 10 | 'https://geoportal.geoportal-th.de/hoehendaten/LAS/las_2014-2019/las_623_5718_1_th_2014-2019.zip', 11 | str((datadir / 'data_netzkater.zip').absolute())) 12 | print('Unzipping file') 13 | zipfile.ZipFile(str((datadir / 'data_netzkater.zip').absolute())).extractall(str(datadir.absolute())) 14 | 15 | 16 | def test_paper_metrics(): 17 | from pyForMetrix.metricCalculators.publications import MCalc_Hollaus_et_al_2009, MCalc_White_et_al_2015, \ 18 | MCalc_Xu_et_al_2019, MCalc_Woods_et_al_2009 19 | from pyForMetrix.metrix import PlotMetrics 20 | ensure_netzkater_data() 21 | polys = geopandas.read_file(datadir / 'netzkater_polygons.gpkg') 22 | pm = PlotMetrics([datadir / 'las_623_5718_1_th_2014-2019.laz'], polys) 23 | mcs = [MCalc_Hollaus_et_al_2009(), MCalc_White_et_al_2015(), MCalc_Xu_et_al_2019(), MCalc_Woods_et_al_2009()] 24 | results = pm.calc_custom_metrics(mcs) 25 | assert results.shape == (7,62) 26 | print(results) 27 | 28 | def test_lidRmetrics(): 29 | from pyForMetrix.metricCalculators.types import \ 30 | MCalc_lidRmetrics_lad, \ 31 | MCalc_lidRmetrics_kde, \ 32 | MCalc_lidRmetrics_dispersion, \ 33 | MCalc_lidRmetrics_voxels, \ 34 | MCalc_lidRmetrics_HOME, \ 35 | MCalc_lidRmetrics_percabove, \ 36 | MCalc_lidRmetrics_echo, \ 37 | MCalc_lidRmetrics_basic, \ 38 | MCalc_lidRmetrics_Lmoments, \ 39 | MCalc_lidRmetrics_rumple, \ 40 | MCalc_lidRmetrics_percentiles, \ 41 | MCalc_lidRmetrics_interval, \ 42 | MCalc_lidRmetrics_canopydensity 43 | 44 | from pyForMetrix.metrix import PlotMetrics 45 | ensure_netzkater_data() 46 | polys = geopandas.read_file(datadir / 'netzkater_polygons.gpkg') 47 | pm = PlotMetrics([datadir / 'las_623_5718_1_th_2014-2019.laz'], polys) 48 | mcs = [MCalc_lidRmetrics_lad(), 49 | MCalc_lidRmetrics_kde(), 50 | MCalc_lidRmetrics_dispersion(), 51 | MCalc_lidRmetrics_voxels(), 52 | MCalc_lidRmetrics_HOME(), 53 | MCalc_lidRmetrics_percabove(), 54 | MCalc_lidRmetrics_echo(), 55 | MCalc_lidRmetrics_basic(), 56 | MCalc_lidRmetrics_rumple(), 57 | MCalc_lidRmetrics_percentiles(), 58 | MCalc_lidRmetrics_interval(), 59 | MCalc_lidRmetrics_canopydensity()] 60 | results = pm.calc_custom_metrics(mcs) 61 | assert results.shape == (7,78) 62 | print(results) -------------------------------------------------------------------------------- /tests/test_raster_metrics.py: -------------------------------------------------------------------------------- 1 | from test_plot_metrics import ensure_netzkater_data, datadir 2 | import laspy 3 | 4 | 5 | def test_group_metrics(): 6 | from pyForMetrix.metricCalculators.types import MCalc_VisMetrics, MCalc_DensityMetrics, \ 7 | MCalc_HeightMetrics, MCalc_EchoMetrics, MCalc_CoverMetrics, MCalc_VarianceMetrics 8 | from pyForMetrix.metrix import RasterMetrics 9 | from pyForMetrix.normalizer import normalize 10 | ensure_netzkater_data() 11 | data = laspy.read(datadir / 'las_623_5718_1_th_2014-2019.laz') 12 | points = { 13 | 'points': data.xyz, 14 | 'classification': data.classification, 15 | 'echo_number': data.return_number, 16 | 'scan_angle_rank': data.scan_angle_rank, 17 | 'pt_src_id': data.point_source_id 18 | } 19 | normalize(points) 20 | rm = RasterMetrics(points, raster_size=25) 21 | mcs = [MCalc_EchoMetrics(), MCalc_DensityMetrics(), MCalc_CoverMetrics(), 22 | MCalc_VarianceMetrics(), MCalc_HeightMetrics(), MCalc_VisMetrics()] 23 | results = rm.calc_custom_metrics(mcs) 24 | assert results.shape == (40, 40, 35) 25 | assert abs(results.sel({'val':'p100'}).data.max() - 105.63799999) < 0.0001 26 | 27 | print(results) 28 | 29 | def test_lidRmetrics_echo_metrics(): 30 | from pyForMetrix.metricCalculators.types import MCalc_lidRmetrics_echo 31 | from pyForMetrix.metrix import RasterMetrics 32 | from pyForMetrix.normalizer import normalize 33 | ensure_netzkater_data() 34 | data = laspy.read(datadir / 'las_623_5718_1_th_2014-2019.laz') 35 | points = { 36 | 'points': data.xyz, 37 | 'echo_number': data.return_number, 38 | 'number_of_echoes': data.number_of_returns, 39 | } 40 | normalize(points) 41 | rm = RasterMetrics(points, raster_size=25) 42 | mcs = [MCalc_lidRmetrics_echo()] 43 | results = rm.calc_custom_metrics(mcs) --------------------------------------------------------------------------------