├── examples ├── .gitkeep └── data_catalog.yml ├── tests ├── __init__.py ├── test_cli.py ├── data │ ├── conftest.py │ └── test_fetch.py ├── drivers │ ├── conftest.py │ ├── test_resolver.py │ └── test_osm_driver.py ├── system │ ├── conftest.py │ ├── test_system_grid.py │ └── test_system_geom.py ├── integration │ ├── conftest.py │ ├── test_io.py │ └── test_build.py ├── gis │ ├── conftest.py │ ├── test_raster_utils.py │ └── test_gis_utils.py ├── test_utils.py ├── workflows │ ├── test_utils.py │ ├── test_hazard.py │ ├── test_exposure_grid.py │ ├── conftest.py │ ├── test_vulnerability.py │ └── test_damage.py ├── components │ ├── conftest.py │ ├── test_component_utils.py │ ├── test_hazard_component.py │ ├── test_exposure_grid_component.py │ ├── test_config_component.py │ ├── test_grid_component.py │ └── test_vulnerability_component.py └── conftest.py ├── docs ├── _templates │ └── .gitkeep ├── dev │ ├── roadmap.rst │ └── index.rst ├── changelog.rst ├── user_guide │ └── index.rst ├── setup_guide │ └── index.rst ├── index.rst ├── api.rst ├── make.bat ├── _static │ ├── theme-deltares.css │ ├── deltares-blue.svg │ ├── deltares-white.svg │ └── fiat.svg └── conf.py ├── src └── hydromt_fiat │ ├── gis │ ├── __init__.py │ ├── raster_utils.py │ └── utils.py │ ├── __init__.py │ ├── data │ ├── __init__.py │ ├── registry.json │ └── fetch.py │ ├── drivers │ ├── __init__.py │ ├── resolvers.py │ └── osm_driver.py │ ├── version.py │ ├── errors.py │ ├── components │ ├── __init__.py │ ├── utils.py │ ├── grid.py │ ├── config.py │ └── region.py │ ├── workflows │ ├── __init__.py │ ├── utils.py │ ├── hazard.py │ ├── exposure_grid.py │ ├── damage.py │ └── vulnerability.py │ └── utils.py ├── .gitattributes ├── MANIFEST.in ├── .github ├── ISSUE_TEMPLATE │ ├── config.yml │ ├── feature_request.yaml │ ├── documentation.yaml │ └── bugs.yaml ├── workflows │ ├── ci_lint.yml │ ├── ci_test.yml │ ├── ci_pages.yml │ ├── ci_lock.yml │ ├── ci_sonar.yml │ ├── ci_docs.yml │ ├── test.yml │ └── pypi.yml ├── pull_request_template.md └── dependabot.yml ├── sonar-project.properties ├── .pre-commit-config.yaml ├── .gitignore └── README.rst /examples/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/test_cli.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/data/conftest.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/_templates/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/drivers/conftest.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/system/conftest.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/integration/conftest.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/system/test_system_grid.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/dev/roadmap.rst: -------------------------------------------------------------------------------- 1 | Roadmap 2 | ======= 3 | -------------------------------------------------------------------------------- /src/hydromt_fiat/gis/__init__.py: -------------------------------------------------------------------------------- 1 | """GIS submodule.""" 2 | -------------------------------------------------------------------------------- /docs/changelog.rst: -------------------------------------------------------------------------------- 1 | .. _changelog: 2 | 3 | ========= 4 | Changelog 5 | ========= 6 | -------------------------------------------------------------------------------- /docs/user_guide/index.rst: -------------------------------------------------------------------------------- 1 | .. _index_user_guide: 2 | 3 | ========== 4 | User guide 5 | ========== 6 | -------------------------------------------------------------------------------- /docs/setup_guide/index.rst: -------------------------------------------------------------------------------- 1 | .. _index_setup_guide: 2 | 3 | =========== 4 | Setup guide 5 | =========== 6 | -------------------------------------------------------------------------------- /src/hydromt_fiat/__init__.py: -------------------------------------------------------------------------------- 1 | """HydroMT plugin for Delft-FIAT.""" 2 | 3 | from .fiat import FIATModel 4 | from .version import __version__ 5 | -------------------------------------------------------------------------------- /src/hydromt_fiat/data/__init__.py: -------------------------------------------------------------------------------- 1 | """Data submodule for fetching and storing data.""" 2 | 3 | from .fetch import fetch_data 4 | 5 | __all__ = ["fetch_data"] 6 | -------------------------------------------------------------------------------- /src/hydromt_fiat/drivers/__init__.py: -------------------------------------------------------------------------------- 1 | """Custom HydroMT drivers.""" 2 | 3 | from .osm_driver import OSMDriver 4 | 5 | __all__ = ["OSMDriver"] 6 | 7 | __hydromt_eps__ = ["OSMDriver"] 8 | -------------------------------------------------------------------------------- /src/hydromt_fiat/version.py: -------------------------------------------------------------------------------- 1 | """HydroMT-FIAT version.""" 2 | 3 | major = 1 4 | minor = 0 5 | patch = 0 6 | suffix = ".dev" 7 | 8 | __version__ = f"{major}.{minor}.{patch}{suffix}" 9 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | * text eol=lf 2 | *.gpkg binary 3 | *.yaml text eol=lf 4 | *.yml text eol=lf 5 | *.png binary 6 | *.nc binary 7 | 8 | # GitHub syntax highlighting 9 | pixi.lock linguist-language=YAML 10 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../README.rst 2 | 3 | .. toctree:: 4 | :titlesonly: 5 | :hidden: 6 | 7 | setup_guide/index.rst 8 | user_guide/index.rst 9 | api.rst 10 | dev/index.rst 11 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | # No egg-info pls, except for sources.txt 2 | prune **/*.egg-info 3 | 4 | # Include extra in the sdist 5 | recursive-include tests * 6 | 7 | # Remove unwanted stuff from sdist 8 | recursive-exclude **/__pycache__ * 9 | -------------------------------------------------------------------------------- /src/hydromt_fiat/errors.py: -------------------------------------------------------------------------------- 1 | """Custom Hydromt FIAT errors.""" 2 | 3 | 4 | class MissingRegionError(Exception): 5 | """Exception class for missing region.""" 6 | 7 | def __init__(self, message): 8 | super().__init__(message) 9 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | contact_links: 3 | - name: Ask a question 4 | url: https://github.com/Deltares/hydromt_fiat/discussions 5 | about: Ask questions and discuss with other community members 6 | -------------------------------------------------------------------------------- /tests/drivers/test_resolver.py: -------------------------------------------------------------------------------- 1 | from hydromt_fiat.drivers.resolvers import OSMResolver 2 | 3 | 4 | def test_osm_resolver(): 5 | # Create the object 6 | obj = OSMResolver() 7 | 8 | # Return the correct uri 9 | res = obj.resolve("foo/bar/baz") 10 | 11 | # Assert the output 12 | assert isinstance(res, list) 13 | assert res[0] == "baz" 14 | -------------------------------------------------------------------------------- /.github/workflows/ci_lint.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Linting 3 | 4 | on: 5 | pull_request: 6 | push: 7 | branches: 8 | - main 9 | 10 | jobs: 11 | pre-commit: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v5 15 | - uses: actions/setup-python@v5 16 | - uses: pre-commit/action@2c7b3805fd2a0fd8c1884dcaebf91fc102a13ecd 17 | -------------------------------------------------------------------------------- /docs/dev/index.rst: -------------------------------------------------------------------------------- 1 | Developments 2 | ============ 3 | 4 | These pages provide guidelines on how to contribute to HydroMT-FIAT. 5 | Furthermore it informs you on planned developments and new features. 6 | 7 | .. toctree:: 8 | :caption: Table of Contents 9 | :maxdepth: 2 10 | 11 | ../changelog.rst 12 | roadmap.rst 13 | Developer's guide 14 | -------------------------------------------------------------------------------- /tests/gis/conftest.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | import xarray as xr 4 | 5 | 6 | @pytest.fixture 7 | def simple_raster() -> xr.DataArray: 8 | da = xr.DataArray( 9 | data=np.ones((8, 8)), 10 | coords={ 11 | "y": list(range(7, -1, -1)), 12 | "x": list(range(0, 8, 1)), 13 | }, 14 | dims=("y", "x"), 15 | ) 16 | da.raster.set_crs(4326) 17 | return da 18 | -------------------------------------------------------------------------------- /sonar-project.properties: -------------------------------------------------------------------------------- 1 | # Unique project key 2 | sonar.projectKey = Deltares_hydromt_fiat 3 | 4 | # Project settings 5 | sonar.organization = deltares 6 | sonar.projectName = hydromt_fiat 7 | sonar.projectVersion = 1.0.0 8 | sonar.python.version = 3.11, 3.12, 3.13 9 | 10 | # Pathing 11 | sonar.python.coverage.reportPaths = coverage.xml 12 | sonar.sources = src/hydromt_fiat 13 | sonar.tests = tests 14 | 15 | # Encoding 16 | sonar.sourceEncoding = UTF-8 17 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | ## Issue addressed 2 | Fixes # 3 | 4 | ## Explanation 5 | Explain how you addressed the bug/feature request, what choices you made and why. 6 | 7 | ## Checklist 8 | - [ ] Updated tests or added new tests 9 | - [ ] Branch is up to date with `master` 10 | - [ ] Tests & pre-commit hooks pass 11 | - [ ] Updated documentation if needed 12 | 13 | ## Additional Notes (optional) 14 | Add any additional notes or information that may be helpful. 15 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | # there is no conda support for dependabot so this is the closest analog 4 | # since the conda deps are also built from pyproject.toml it should work well enough 5 | - package-ecosystem: "pip" 6 | directory: "/" # Location of package manifests 7 | schedule: 8 | interval: "weekly" 9 | - package-ecosystem: "github-actions" 10 | directory: "/" 11 | schedule: 12 | # Check for updates to GitHub Actions every week 13 | interval: "weekly" 14 | -------------------------------------------------------------------------------- /src/hydromt_fiat/gis/raster_utils.py: -------------------------------------------------------------------------------- 1 | """Raster utility.""" 2 | 3 | import xarray as xr 4 | 5 | 6 | def force_ns( 7 | ds: xr.Dataset, 8 | ) -> xr.Dataset: 9 | """Force a raster in north-south orientation. 10 | 11 | Parameters 12 | ---------- 13 | ds : xr.Dataset | xr.DataArray 14 | The input dataset to check. 15 | 16 | Returns 17 | ------- 18 | xr.Dataset | xr.DataArray 19 | Data in north-south orientation. 20 | """ 21 | if ds.raster.res[1] > 0: 22 | ds = ds.raster.flipud() 23 | return ds 24 | -------------------------------------------------------------------------------- /src/hydromt_fiat/components/__init__.py: -------------------------------------------------------------------------------- 1 | """The custom HydroMT-FIAT components.""" 2 | 3 | from .config import ConfigComponent 4 | from .exposure_geom import ExposureGeomsComponent 5 | from .exposure_grid import ExposureGridComponent 6 | from .hazard import HazardComponent 7 | from .region import RegionComponent 8 | from .vulnerability import VulnerabilityComponent 9 | 10 | __all__ = [ 11 | "ConfigComponent", 12 | "ExposureGeomsComponent", 13 | "ExposureGridComponent", 14 | "HazardComponent", 15 | "RegionComponent", 16 | "VulnerabilityComponent", 17 | ] 18 | -------------------------------------------------------------------------------- /src/hydromt_fiat/drivers/resolvers.py: -------------------------------------------------------------------------------- 1 | """Resolve uri argument for OSM data catalog sources.""" 2 | 3 | from pathlib import Path 4 | 5 | from hydromt.data_catalog.uri_resolvers import URIResolver 6 | 7 | __all__ = ["OSMResolver"] 8 | 9 | __hydromt_eps__ = ["OSMResolver"] 10 | 11 | 12 | class OSMResolver(URIResolver): 13 | """Resolve uri argument for OSM data catalog sources.""" 14 | 15 | name = "osm_resolver" 16 | 17 | def resolve(self, uri, **kwargs) -> list[str]: 18 | """Resolve OSM uri argument.""" 19 | uri = Path(uri).stem 20 | return [uri] 21 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | repos: 3 | - repo: https://github.com/pre-commit/pre-commit-hooks 4 | rev: v5.0.0 5 | hooks: 6 | - id: trailing-whitespace 7 | - id: end-of-file-fixer 8 | - id: check-yaml 9 | - id: check-added-large-files 10 | - id: check-ast 11 | - id: check-json 12 | - id: debug-statements 13 | - id: mixed-line-ending 14 | - repo: https://github.com/astral-sh/ruff-pre-commit 15 | rev: v0.9.10 16 | hooks: 17 | - id: ruff 18 | args: [--fix, --exit-non-zero-on-fix] 19 | - id: ruff-format 20 | -------------------------------------------------------------------------------- /src/hydromt_fiat/data/registry.json: -------------------------------------------------------------------------------- 1 | { 2 | "data": { 3 | "fiat-model-c.tar.gz": "fb9669e77e469f6984d60c4499b8147a005945c8188ac7ce60c525fe4bcc1794", 4 | "fiat-model.tar.gz": "33ff877f1cfc37affa0bd2e798cc0f92c6b605041ab90fa6fbbdf1133a7d6cb7", 5 | "global-data.tar.gz": "c9d99af72010b20dd331d7ee9214c3a4b5c21fd8ab17ae25b04015d37f746d21", 6 | "osmnx.tar.gz": "3baee58a4ccaea5f1a523806b9f629d5f54a146ff475eb3514efd06531c54712", 7 | "test-build-data.tar.gz": "0d2c8576ba623c89679f2ed04b6290fcb017b7cc104accd87219b2c5babcf88a" 8 | }, 9 | "url": "https://zenodo.org/records/17727394/files/", 10 | "version": "v0.1.11" 11 | } 12 | -------------------------------------------------------------------------------- /src/hydromt_fiat/gis/utils.py: -------------------------------------------------------------------------------- 1 | """General gis utility.""" 2 | 3 | from pyproj.crs import CRS 4 | 5 | 6 | def crs_representation( 7 | crs: CRS | None = None, 8 | ) -> str | None: 9 | """Create string representation of CRS object. 10 | 11 | Parameters 12 | ---------- 13 | srs : CRS | None, optional 14 | The spatial reference system object, by default None. 15 | 16 | Returns 17 | ------- 18 | str | None 19 | Either a string representing the srs or None. 20 | """ 21 | if crs is None: 22 | return None 23 | auth = crs.to_authority() 24 | if auth is None: 25 | return crs.to_wkt() 26 | return ":".join(crs.to_authority()) 27 | -------------------------------------------------------------------------------- /src/hydromt_fiat/workflows/__init__.py: -------------------------------------------------------------------------------- 1 | """HydroMT-FIAT workflow function.""" 2 | 3 | from .damage import max_monetary_damage 4 | from .exposure_geom import ( 5 | exposure_geoms_add_columns, 6 | exposure_geoms_link_vulnerability, 7 | exposure_geoms_setup, 8 | ) 9 | from .exposure_grid import exposure_grid_setup 10 | from .hazard import hazard_setup 11 | from .vulnerability import process_vulnerability_linking, vulnerability_setup 12 | 13 | __all__ = [ 14 | "exposure_geoms_add_columns", 15 | "exposure_geoms_link_vulnerability", 16 | "exposure_geoms_setup", 17 | "exposure_grid_setup", 18 | "hazard_setup", 19 | "max_monetary_damage", 20 | "process_vulnerability_linking", 21 | "vulnerability_setup", 22 | ] 23 | -------------------------------------------------------------------------------- /tests/gis/test_raster_utils.py: -------------------------------------------------------------------------------- 1 | import xarray as xr 2 | 3 | from hydromt_fiat.gis.raster_utils import force_ns 4 | 5 | 6 | def test_force_ns(simple_raster: xr.DataArray): 7 | # Call the function 8 | da = force_ns(simple_raster) 9 | 10 | # Assert the output 11 | assert da.raster.res[1] < 1 12 | assert da.raster.res[1] == simple_raster.raster.res[1] # Nothing happened 13 | 14 | 15 | def test_force_ns_flip(simple_raster: xr.DataArray): 16 | # Flip the raster to make sure the function works 17 | raster = simple_raster.raster.flipud() 18 | assert raster.raster.res[1] > 0 19 | # Call the function 20 | da = force_ns(raster) 21 | 22 | # Assert the output 23 | assert da.raster.res[1] < 1 24 | assert da.raster.res[1] != raster.raster.res[1] # It got flipped 25 | -------------------------------------------------------------------------------- /tests/gis/test_gis_utils.py: -------------------------------------------------------------------------------- 1 | from pyproj.crs import CRS, CompoundCRS 2 | 3 | from hydromt_fiat.gis.utils import crs_representation 4 | 5 | 6 | def test_crs_representation(): 7 | # Call the function 8 | s = crs_representation(CRS.from_epsg(4326)) 9 | 10 | # Assert the output 11 | assert s == "EPSG:4326" 12 | 13 | 14 | def test_crs_representation_none(): 15 | # Call the function 16 | s = crs_representation(None) 17 | 18 | # Assert the output 19 | assert s is None 20 | 21 | 22 | def test_crs_representation_unknown(): 23 | # Call the function 24 | s = crs_representation( 25 | CompoundCRS( 26 | name="foo", 27 | components=[CRS.from_epsg(4326), CRS.from_epsg(7837)], 28 | ), 29 | ) 30 | 31 | # Assert the output 32 | assert s.startswith("COMPOUNDCRS") 33 | -------------------------------------------------------------------------------- /docs/api.rst: -------------------------------------------------------------------------------- 1 | .. currentmodule:: hydromt_fiat 2 | 3 | .. _api_reference: 4 | 5 | ############# 6 | API reference 7 | ############# 8 | 9 | .. _api_model: 10 | 11 | FIAT model class 12 | ================ 13 | 14 | Initialize 15 | ---------- 16 | 17 | .. autosummary:: 18 | :toctree: _generated 19 | 20 | FIATModel 21 | 22 | .. _setup_methods: 23 | 24 | Setup methods 25 | ------------- 26 | 27 | .. autosummary:: 28 | :toctree: _generated 29 | 30 | FIATModel.setup_config 31 | FIATModel.setup_region 32 | components.VulnerabilityComponent.setup 33 | 34 | I/O methods 35 | ----------- 36 | 37 | .. autosummary:: 38 | :toctree: _generated 39 | 40 | FIATModel.read 41 | FIATModel.write 42 | 43 | Attributes 44 | ---------- 45 | 46 | .. autosummary:: 47 | :toctree: _generated 48 | 49 | FIATModel.config 50 | FIATModel.region 51 | FIATModel.vulnerability 52 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | set SPHINXPROJ=hydromt_fiat 13 | 14 | if "%1" == "" goto help 15 | 16 | %SPHINXBUILD% >NUL 2>NUL 17 | if errorlevel 9009 ( 18 | echo. 19 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 20 | echo.installed, then set the SPHINXBUILD environment variable to point 21 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 22 | echo.may add the Sphinx directory to PATH. 23 | echo. 24 | echo.If you don't have Sphinx installed, grab it from 25 | echo.https://www.sphinx-doc.org/ 26 | exit /b 1 27 | ) 28 | 29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 30 | 31 | goto end 32 | 33 | :help 34 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 35 | 36 | :end 37 | popd 38 | -------------------------------------------------------------------------------- /docs/_static/theme-deltares.css: -------------------------------------------------------------------------------- 1 | /* Override the default color set in the original theme */ 2 | 3 | html[data-theme="light"] { 4 | /* NOTE: added after pydata v0.9 */ 5 | --pst-color-primary: #080c80 !important; 6 | 7 | /* hyperlinks */ 8 | --pst-color-link: rgb(13, 56, 224); 9 | 10 | /* panels */ 11 | --pst-color-preformatted-border: #080c80 !important; 12 | --pst-color-panel-background: #f0f0f075; 13 | 14 | /* navbar */ 15 | /* NOTE: does not work since v0.9 */ 16 | --pst-color-navbar-link: rgb(237, 237, 255); 17 | --pst-color-navbar-link-hover: #fff; 18 | --pst-color-navbar-link-active: #fff; 19 | 20 | 21 | /* sphinx design */ 22 | /* NOTE: does not work since v0.9 */ 23 | --sd-color-card-border-hover: #080c80; 24 | --sd-color-tabs-label-active: #080c80; 25 | --sd-color-tabs-label-hover: #080c80; 26 | --sd-color-tabs-underline-active: #080c80; 27 | } 28 | 29 | /* enlarge deltares & github icon size; only works with local/url svg files; not with fa icons */ 30 | img.icon-link-image { 31 | height: 2.5em !important; 32 | } 33 | -------------------------------------------------------------------------------- /.github/workflows/ci_test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Test Package 3 | on: 4 | workflow_dispatch: 5 | push: 6 | branches: 7 | - main 8 | paths: 9 | - src/hydromt_fiat/**/* 10 | - tests/**/* 11 | - pixi.lock 12 | - pyproject.toml 13 | - .github/workflows/ci_test.yml 14 | - .github/workflows/test.yml 15 | pull_request: 16 | branches: 17 | - main 18 | paths: 19 | - src/hydromt_fiat/**/* 20 | - tests/**/* 21 | - pixi.lock 22 | - pyproject.toml 23 | - .github/workflows/ci_test.yml 24 | - .github/workflows/test.yml 25 | 26 | jobs: 27 | test: 28 | name: "${{ matrix.label }} - py${{ matrix.python-version }}" 29 | strategy: 30 | fail-fast: false 31 | matrix: 32 | os: 33 | - "ubuntu-latest" 34 | - "windows-latest" 35 | python-version: ["311", "312", "313"] 36 | include: 37 | - os: ubuntu-latest 38 | label: linux-64 39 | - os: windows-latest 40 | label: win-64 41 | 42 | uses: ./.github/workflows/test.yml 43 | with: 44 | os: ${{ matrix.os }} 45 | python-version: ${{ matrix.python-version }} 46 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.yaml: -------------------------------------------------------------------------------- 1 | name: Feature Request 2 | description: Propose an improvement for HydroMT-FIAT 3 | # title: "Improvement: " 4 | labels: [enhancement] 5 | 6 | body: 7 | - type: dropdown 8 | id: checks 9 | attributes: 10 | description: What kind of feature request is this? 11 | label: Feature type 12 | options: 13 | - Adding new functionality 14 | - Changing existing functionality 15 | - Removing existing functionality 16 | - type: textarea 17 | id: description 18 | attributes: 19 | label: Improvement Description 20 | description: > 21 | Please provide a clear and concise description of what your proposed improvement is: 22 | validations: 23 | required: true 24 | - type: textarea 25 | id: feature 26 | attributes: 27 | label: Implementation Description 28 | description: > 29 | Please describe how this would be implemented: 30 | validations: 31 | required: false 32 | - type: textarea 33 | id: context 34 | attributes: 35 | label: Additional Context 36 | description: > 37 | Please add any other context about the improvement here: 38 | -------------------------------------------------------------------------------- /tests/test_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from barril.units import Scalar 3 | 4 | from hydromt_fiat.utils import create_query, standard_unit 5 | 6 | 7 | def test_create_query_single_type(): 8 | # Single values only 9 | query = create_query( 10 | var1="value", 11 | var2=2, 12 | ) 13 | assert query == "var1 == 'value' and var2 == 2" 14 | 15 | 16 | def test_create_query_combined_type(): 17 | # Single value and an iterator (list) 18 | query = create_query( 19 | var1="value", 20 | var2=[1, 2, 3], 21 | ) 22 | assert query == "var1 == 'value' and var2 in [1, 2, 3]" 23 | 24 | 25 | def test_create_query_iter_type(): 26 | # Lists only 27 | query = create_query( 28 | var1=["value1", "value2"], 29 | var2=[1, 2], 30 | ) 31 | assert query == "var1 in ['value1', 'value2'] and var2 in [1, 2]" 32 | 33 | 34 | def test_standard_unit_equal(): 35 | unit = Scalar(1.0, "m") 36 | scalar = standard_unit(unit) 37 | 38 | assert np.isclose(scalar.value, 1.0) 39 | assert scalar.unit == "m" 40 | 41 | 42 | def test_standard_unit_length(): 43 | unit = Scalar(1.0, "ft") 44 | scalar = standard_unit(unit) 45 | 46 | assert np.isclose(scalar.value, 0.3048) 47 | assert scalar.unit == "" 48 | -------------------------------------------------------------------------------- /.github/workflows/ci_pages.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Pages versioning 3 | 4 | on: 5 | repository_dispatch: 6 | types: 7 | - docs-updated 8 | workflow_dispatch: 9 | 10 | jobs: 11 | version: 12 | defaults: 13 | run: 14 | shell: bash -l {0} 15 | 16 | strategy: 17 | fail-fast: false 18 | matrix: 19 | os: 20 | - "ubuntu-latest" 21 | include: 22 | - os: ubuntu-latest 23 | label: linux-64 24 | 25 | name: ${{ matrix.os }} 26 | runs-on: ${{ matrix.os }} 27 | 28 | steps: 29 | - name: Checkout code 30 | uses: actions/checkout@v5 31 | with: 32 | ref: gh-pages 33 | 34 | - name: Install package(s) 35 | run: | 36 | sudo apt install -y jq 37 | 38 | - name: Run versioning script 39 | id: check 40 | run: | 41 | ./version.sh 42 | continue-on-error: true 43 | 44 | - name: Adjust stable symbolic link 45 | if: ${{ steps.check.outcome == 'success' }} 46 | run: | 47 | ln -sfn ${{ env.NEW_STABLE_VERSION }} stable 48 | 49 | - name: Commit and push changes 50 | if: ${{ steps.check.outcome == 'success' }} 51 | run: | 52 | git config --local user.email "action@github.com" 53 | git config --local user.name "GitHub Action" 54 | git add . 55 | git commit -m "Added version ${{ env.NEW_STABLE_VERSION }}" 56 | git push 57 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/documentation.yaml: -------------------------------------------------------------------------------- 1 | name: Documentation 2 | description: Missing or bad documentation. 3 | # title: "Docs: " 4 | labels: [documentation] 5 | 6 | body: 7 | - type: checkboxes 8 | attributes: 9 | label: HydroMT-FIAT version checks 10 | options: 11 | - label: > 12 | I have checked that the issue still exists on the latest versions of the docs 13 | on `master` [here](https://github.com/Deltares/HydroMT-FIAT) 14 | required: true 15 | - type: dropdown 16 | id: kind 17 | attributes: 18 | description: What kind of documentation issue is this? 19 | label: Kind of issue 20 | options: 21 | - Docs are wrong 22 | - Docs are unclear 23 | - Docs are missing 24 | - type: textarea 25 | id: location 26 | attributes: 27 | label: Location of the documentation 28 | description: > 29 | Please provide the URL of the documentation: 30 | validations: 31 | required: true 32 | - type: textarea 33 | id: problem 34 | attributes: 35 | label: Documentation problem 36 | description: > 37 | Please provide a description of the documentation problem: 38 | validations: 39 | required: true 40 | - type: textarea 41 | id: suggested-fix 42 | attributes: 43 | label: Suggested fix for documentation 44 | description: > 45 | Please explain your suggested fix and why it's better than the existing documentation: 46 | validations: 47 | required: false 48 | -------------------------------------------------------------------------------- /.github/workflows/ci_lock.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Update lockfile 3 | 4 | permissions: 5 | contents: write 6 | pull-requests: write 7 | 8 | on: 9 | workflow_dispatch: 10 | schedule: 11 | - cron: 0 5 1 * * # First of every month 12 | 13 | jobs: 14 | pixi-update: 15 | defaults: 16 | run: 17 | shell: bash -e -l {0} 18 | timeout-minutes: 30 19 | 20 | strategy: 21 | fail-fast: false 22 | matrix: 23 | os: 24 | - "ubuntu-latest" 25 | include: 26 | - os: ubuntu-latest 27 | label: linux-64 28 | 29 | name: ${{ matrix.label }} 30 | runs-on: ${{ matrix.os }} 31 | 32 | steps: 33 | - name: Checkout code 34 | uses: actions/checkout@v5 35 | 36 | - name: Set up pixi 37 | uses: prefix-dev/setup-pixi@28eb668aafebd9dede9d97c4ba1cd9989a4d0004 38 | with: 39 | run-install: false 40 | 41 | - name: Update lockfile 42 | run: | 43 | set -o pipefail 44 | pixi update --json | pixi exec pixi-diff-to-markdown >> diff.md 45 | 46 | - name: Create pull request 47 | uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e 48 | with: 49 | token: ${{ secrets.GITHUB_TOKEN }} 50 | commit-message: Update pixi lockfile 51 | title: Update pixi lockfile 52 | body-path: diff.md 53 | branch: update-pixi 54 | base: main 55 | labels: pixi 56 | delete-branch: true 57 | add-paths: pixi.lock 58 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bugs.yaml: -------------------------------------------------------------------------------- 1 | name: Bugs 2 | description: Broken elements within HydroMT-FIAT 3 | # title: "BUG: " 4 | labels: [bug] 5 | 6 | body: 7 | - type: checkboxes 8 | id: checks 9 | attributes: 10 | label: HydroMT-FIAT version checks 11 | options: 12 | - label: > 13 | I have checked that this issue has not already been reported. 14 | required: true 15 | - label: > 16 | I have checked that this bug exists on the latest version of HydroMT-FIAT. 17 | required: true 18 | - type: textarea 19 | id: example 20 | attributes: 21 | label: Reproducible Example 22 | description: > 23 | Please provide a minimal, copy-pastable example to reproduce the behavior: 24 | validations: 25 | required: true 26 | - type: textarea 27 | id: current-behaviour 28 | attributes: 29 | description: > 30 | Please provide a description of the incorrect behaviour shown in the reproducible example 31 | label: Current behaviour 32 | validations: 33 | required: true 34 | - type: textarea 35 | id: expected-behaviour 36 | attributes: 37 | description: > 38 | Please provide a description of what you think the behaviour should be 39 | label: Desired behaviour 40 | validations: 41 | required: true 42 | - type: textarea 43 | id: Additional 44 | attributes: 45 | label: Additional Context 46 | description: > 47 | Please add any other context about the bug here: 48 | validations: 49 | required: false 50 | -------------------------------------------------------------------------------- /.github/workflows/ci_sonar.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: SonarQube scan and coverage 3 | on: 4 | repository_dispatch: 5 | types: 6 | - sonarqube-trigger 7 | workflow_dispatch: 8 | 9 | jobs: 10 | sonarqube: 11 | name: SonarQube 12 | runs-on: ubuntu-latest 13 | 14 | steps: 15 | - name: Checkout code 16 | uses: actions/checkout@v5 17 | with: 18 | fetch-depth: 0 19 | ref: ${{ github.event.client_payload.branch }} 20 | 21 | - name: Download coverage 22 | run: | 23 | gh run download ${{ github.event.client_payload.run_id }} --name code-coverage 24 | env: 25 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 26 | 27 | - name: SonarQube Scan 28 | if: ${{ github.event.client_payload.pr_flag == 'false' }} 29 | uses: SonarSource/sonarqube-scan-action@fd88b7d7ccbaefd23d8f36f73b59db7a3d246602 30 | env: 31 | SONAR_TOKEN: ${{ secrets.SONARCLOUD_TOKEN_2025 }} 32 | with: 33 | args: > 34 | -Dsonar.branch.name=${{ github.event.client_payload.branch }} 35 | 36 | - name: SonarQube Scan (PR) 37 | if: ${{ github.event.client_payload.pr_flag == 'true' }} 38 | uses: SonarSource/sonarqube-scan-action@fd88b7d7ccbaefd23d8f36f73b59db7a3d246602 39 | env: 40 | SONAR_TOKEN: ${{ secrets.SONARCLOUD_TOKEN_2025 }} 41 | with: 42 | args: > 43 | -Dsonar.pullrequest.branch=${{ github.event.client_payload.branch }} 44 | -Dsonar.pullrequest.base=${{ github.event.client_payload.pr_base }} 45 | -Dsonar.pullrequest.key=${{ github.event.client_payload.pr_number }} 46 | -------------------------------------------------------------------------------- /tests/workflows/test_utils.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import pytest 4 | import xarray as xr 5 | 6 | from hydromt_fiat.workflows.utils import _merge_dataarrays, _process_dataarray 7 | 8 | 9 | def test__process_dataarray( 10 | hazard_event_data: xr.DataArray, 11 | ): 12 | # Call the function 13 | da = _process_dataarray(da=hazard_event_data, da_name="flood_dataarray") 14 | 15 | # Assert the output 16 | assert da.encoding["_FillValue"] is None 17 | assert da.name == "flood_dataarray" 18 | assert "grid_mapping" not in da.encoding.keys() 19 | 20 | 21 | def test__process_dataarray_rotated( 22 | caplog: pytest.LogCaptureFixture, 23 | rotated_grid: xr.DataArray, 24 | ): 25 | caplog.set_level(logging.WARNING) 26 | # Assert rotated grid 27 | assert "xc" in rotated_grid.coords 28 | assert rotated_grid.xc.shape == (2, 2) 29 | # Call the function 30 | da = _process_dataarray(da=rotated_grid, da_name="foo") 31 | 32 | # Assert the output 33 | assert "Hazard grid is rotated." in caplog.text 34 | assert "xc" not in da.coords 35 | 36 | 37 | def test__merge_dataarrays( 38 | caplog: pytest.LogCaptureFixture, 39 | hazard_event_data: xr.DataArray, 40 | ): 41 | caplog.set_level(logging.WARNING) 42 | # Small list of dataarray's 43 | das = [hazard_event_data, hazard_event_data] 44 | # Call the function 45 | ds = _merge_dataarrays(grid_like=None, dataarrays=das) 46 | 47 | # Assert the warning message and output 48 | warning_msg = "No known grid provided to reproject to, \ 49 | defaulting to first specified grid for transform and extent" 50 | assert warning_msg in caplog.text 51 | assert isinstance(ds, xr.Dataset) 52 | -------------------------------------------------------------------------------- /docs/_static/deltares-blue.svg: -------------------------------------------------------------------------------- 1 | Artboard 1 2 | -------------------------------------------------------------------------------- /src/hydromt_fiat/workflows/utils.py: -------------------------------------------------------------------------------- 1 | """Workflow utilities.""" 2 | 3 | import logging 4 | 5 | import xarray as xr 6 | from hydromt.model.processes.grid import grid_from_rasterdataset 7 | 8 | logger = logging.getLogger(f"hydromt.{__name__}") 9 | 10 | 11 | def _process_dataarray(da: xr.DataArray, da_name: str) -> xr.DataArray: 12 | # Convert to gdal compliant 13 | da.encoding["_FillValue"] = None 14 | da = da.raster.gdal_compliant() 15 | da = da.rename(da_name) 16 | 17 | # Check if map is rotated and if yes, reproject to a non-rotated grid 18 | if "xc" in da.coords: 19 | logger.warning( 20 | "Hazard grid is rotated. It will be reprojected" 21 | " to a non rotated grid using nearest neighbor" 22 | "interpolation" 23 | ) 24 | da = da.raster.reproject(dst_crs=da.rio.crs) 25 | if "grid_mapping" in da.encoding: 26 | _ = da.encoding.pop("grid_mapping") 27 | return da 28 | 29 | 30 | def _merge_dataarrays( 31 | grid_like: xr.Dataset | xr.DataArray | None, dataarrays: list[xr.DataArray] 32 | ) -> xr.Dataset: 33 | if grid_like is None: 34 | logger.warning( 35 | "No known grid provided to reproject to, \ 36 | defaulting to first specified grid for transform and extent" 37 | ) 38 | grid_like = dataarrays[0] 39 | 40 | # Reproject to gridlike 41 | if isinstance(grid_like, xr.DataArray): 42 | grid_like = grid_like.to_dataset() 43 | 44 | # Reproject if necessary 45 | for idx, da in enumerate(dataarrays): 46 | dataarrays[idx] = grid_from_rasterdataset(grid_like=grid_like, ds=da) 47 | 48 | ds = xr.merge(dataarrays) 49 | ds.attrs = {} # Ensure that the dataset doesnt copy a merged instance of 50 | # the data variables' attributes 51 | 52 | # Return the data 53 | return ds 54 | -------------------------------------------------------------------------------- /docs/_static/deltares-white.svg: -------------------------------------------------------------------------------- 1 | Artboard 1 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Dev stuff 2 | .vscode/ 3 | 4 | # Temporary files 5 | .cov/ 6 | .docs/ 7 | tmp/ 8 | 9 | # Geospatial temporary files 10 | *.aux.xml 11 | 12 | # pixi environments 13 | .pixi 14 | *.egg-info 15 | 16 | # Byte-compiled / optimized / DLL files 17 | __pycache__/ 18 | *.py[cod] 19 | *$py.class 20 | 21 | # C extensions 22 | *.so 23 | 24 | # Distribution / packaging 25 | .Python 26 | build/ 27 | develop-eggs/ 28 | dist/ 29 | downloads/ 30 | eggs/ 31 | .eggs/ 32 | lib/ 33 | lib64/ 34 | parts/ 35 | sdist/ 36 | var/ 37 | wheels/ 38 | share/python-wheels/ 39 | *.egg-info/ 40 | .installed.cfg 41 | *.egg 42 | MANIFEST 43 | 44 | # Installer logs 45 | pip-log.txt 46 | pip-delete-this-directory.txt 47 | 48 | # Unit test / coverage reports 49 | htmlcov/ 50 | .tox/ 51 | .nox/ 52 | .coverage 53 | .coverage.* 54 | .cache 55 | nosetests.xml 56 | coverage.xml 57 | *.cover 58 | *.py,cover 59 | .hypothesis/ 60 | .pytest_cache/ 61 | cover/ 62 | 63 | # Sphinx documentation 64 | docs/_build/ 65 | docs/_examples/ 66 | docs/_generated/ 67 | 68 | # PyBuilder 69 | .pybuilder/ 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # IPython 76 | profile_default/ 77 | ipython_config.py 78 | 79 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 80 | __pypackages__/ 81 | 82 | # Celery stuff 83 | celerybeat-schedule 84 | celerybeat.pid 85 | 86 | # SageMath parsed files 87 | *.sage.py 88 | 89 | # Environments 90 | .env 91 | .venv 92 | env/ 93 | venv/ 94 | ENV/ 95 | env.bak/ 96 | venv.bak/ 97 | environment.yml 98 | 99 | # Rope project settings 100 | .ropeproject 101 | 102 | # mkdocs documentation 103 | /site 104 | 105 | # mypy 106 | .mypy_cache/ 107 | .dmypy.json 108 | dmypy.json 109 | 110 | # Pyre type checker 111 | .pyre/ 112 | 113 | # pytype static type analyzer 114 | .pytype/ 115 | 116 | # Cython debug symbols 117 | cython_debug/ 118 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | .. _readme: 2 | 3 | ======================================== 4 | HydroMT-FIAT: Building Delft-FIAT models 5 | ======================================== 6 | 7 | |pypi| |conda forge| |quality| |coverage| |license| 8 | 9 | What is HydroMT-FIAT? 10 | --------------------- 11 | **HydroMT-FIAT** is an open-source python package for building for **Delft-FIAT** models. 12 | It is meant to make model building easier on the user by streamlining and generalizing 13 | data processing. For this purpose HydroMT-FIAT relies on the excellent **HydroMT** 14 | python library. 15 | 16 | Documentation 17 | ------------- 18 | Important documentation in regard to HydroMT-FIAT. 19 | 20 | - `HydroMT-FIAT `_ 21 | - `Delft-FIAT `_ 22 | - `HydroMT `_ 23 | 24 | Attribution 25 | ----------- 26 | HydroMT-FIAT relies on these excellent libraries: 27 | 28 | - barril 29 | - geopandas 30 | - hydromt 31 | - numpy 32 | - osmnx 33 | - pooch 34 | - pyproj 35 | - requests 36 | - shapely 37 | - tomlkit 38 | - xarray 39 | 40 | .. |pypi| image:: https://img.shields.io/pypi/v/hydromt-fiat 41 | :alt: PyPI 42 | :target: https://pypi.org/project/hydromt_fiat/ 43 | 44 | .. |conda forge| image:: https://img.shields.io/conda/v/conda-forge/hydromt_fiat 45 | :alt: Conda-Forge 46 | :target: https://anaconda.org/conda-forge/hydromt_fiat 47 | 48 | .. |quality| image:: https://sonarcloud.io/api/project_badges/measure?project=Deltares_hydromt_fiat&metric=alert_status 49 | :alt: SonarQube status 50 | :target: https://sonarcloud.io/summary/new_code?id=Deltares_hydromt_fiat 51 | 52 | .. |coverage| image:: https://sonarcloud.io/api/project_badges/measure?project=Deltares_hydromt_fiat&metric=coverage 53 | :alt: Coverage 54 | :target: https://sonarcloud.io/summary/new_code?id=Deltares_hydromt_fiat 55 | 56 | .. |license| image:: https://img.shields.io/github/license/Deltares/hydromt_fiat 57 | :alt: License 58 | :target: https://github.com/Deltares/hydromt_fiat/blob/main/LICENSE 59 | 60 | .. _Delft_FIAT: https://deltares.github.io/Delft-FIAT/stable 61 | .. _HydroMT_FIAT: https://deltares.github.io/hydromt_fiat/latest 62 | .. _HydroMT: https://deltares.github.io/hydromt/stable 63 | -------------------------------------------------------------------------------- /tests/integration/test_io.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import geopandas as gpd 4 | import numpy as np 5 | import pytest 6 | 7 | from hydromt_fiat import FIATModel 8 | from hydromt_fiat.utils import ( 9 | CURVES, 10 | EXPOSURE, 11 | EXPOSURE_GRID_SETTINGS, 12 | GEOM, 13 | HAZARD, 14 | MODEL_TYPE, 15 | REGION, 16 | SETTINGS, 17 | VAR_AS_BAND, 18 | VULNERABILITY, 19 | ) 20 | 21 | 22 | @pytest.mark.integration 23 | def test_model_io(tmp_path: Path, model_data_clipped_path: Path): 24 | # Create the model to read 25 | model = FIATModel(root=model_data_clipped_path) 26 | 27 | # Read the model 28 | model.read() 29 | 30 | # Assert its state 31 | assert len(model.config.data) == 4 32 | assert model.config.get(MODEL_TYPE) == GEOM 33 | assert isinstance(model.region, gpd.GeoDataFrame) 34 | np.testing.assert_almost_equal(model.region.total_bounds[0], 85675, decimal=0) 35 | assert len(model.exposure_geoms.data) == 1 36 | # Even thought the model type is geom, it will read it from it's default path 37 | assert "buildings" in model.exposure_geoms.data 38 | assert len(model.exposure_grid.data.data_vars) == 4 39 | assert "industrial_content" in model.exposure_grid.data 40 | assert len(model.hazard.data.data_vars) == 1 41 | assert "flood_event" in model.hazard.data 42 | assert not model.vulnerability.data.curves.empty 43 | assert not model.vulnerability.data.identifiers.empty 44 | 45 | # Set the root to a new location and in write mode 46 | model.root.set(path=tmp_path, mode="w") 47 | 48 | # Write the model to that location 49 | model.write() 50 | 51 | # Assert the output 52 | assert Path(tmp_path, f"{SETTINGS}.toml").is_file() 53 | assert Path(tmp_path, f"{REGION}.geojson").is_file() 54 | assert Path(tmp_path, EXPOSURE, "buildings.fgb").is_file() 55 | assert Path(tmp_path, EXPOSURE, "spatial.nc").is_file() 56 | assert Path(tmp_path, f"{HAZARD}.nc").is_file() 57 | assert Path(tmp_path, VULNERABILITY, f"{CURVES}.csv").is_file() 58 | assert Path(tmp_path, VULNERABILITY, f"{CURVES}_id.csv").is_file() 59 | # Assert the addition of some settings set during I/O 60 | assert model.config.get(f"{EXPOSURE_GRID_SETTINGS}.{VAR_AS_BAND}") 61 | -------------------------------------------------------------------------------- /tests/system/test_system_geom.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import pytest 4 | from fiat import Configurations, GeomModel, __version__ 5 | from packaging.version import Version 6 | 7 | from hydromt_fiat import FIATModel 8 | from hydromt_fiat.utils import GEOM, MODEL_TYPE 9 | 10 | 11 | @pytest.mark.skipif( 12 | Version(__version__) < Version("1"), 13 | reason="At least Delft-FIAT version 1.0.0 is required.", 14 | ) 15 | @pytest.mark.system 16 | def test_system_geom_model( 17 | tmp_path: Path, 18 | build_data_catalog_path: Path, 19 | global_data_catalog_path: Path, 20 | build_region_small: Path, 21 | ): 22 | ## HydroMT-FIAT 23 | # Setup the model 24 | model = FIATModel( 25 | root=tmp_path, 26 | mode="w+", 27 | data_libs=[build_data_catalog_path, global_data_catalog_path], 28 | ) 29 | 30 | # Add model type and region 31 | model.setup_config(**{MODEL_TYPE: GEOM}) 32 | model.setup_region(build_region_small) 33 | 34 | # Setup the vulnerability 35 | model.vulnerability.setup( 36 | "jrc_curves", 37 | "jrc_curves_link", 38 | unit="m", 39 | continent="europe", 40 | ) 41 | 42 | # Add an hazard layer 43 | model.hazard.setup( 44 | "flood_event", 45 | ) 46 | 47 | # Setup the exposure geometry data 48 | model.exposure_geoms.setup( 49 | exposure_fname="buildings", 50 | exposure_type_column="gebruiksdoel", 51 | exposure_link_fname="buildings_link", 52 | ) 53 | model.exposure_geoms.setup_max_damage( 54 | exposure_name="buildings", 55 | exposure_type="damage", 56 | exposure_cost_table_fname="jrc_damage", 57 | country="Netherlands", # Select the correct row from the data 58 | ) 59 | # Needed for flood calculations 60 | model.exposure_geoms.update_column( 61 | exposure_name="buildings", 62 | columns=["ref", "method"], 63 | values=[0, "centroid"], 64 | ) 65 | 66 | # Write the model 67 | model.write() 68 | 69 | ## FIAT 70 | # Read the config file 71 | cfg = Configurations.from_file(Path(model.root.path, model.config._filename)) 72 | # Read the data in the fiat model 73 | fmodel = GeomModel(cfg) 74 | 75 | # Execute 76 | fmodel.run() 77 | -------------------------------------------------------------------------------- /src/hydromt_fiat/workflows/hazard.py: -------------------------------------------------------------------------------- 1 | """Hazard workflows.""" 2 | 3 | import logging 4 | from typing import Any 5 | 6 | import xarray as xr 7 | from barril.units import Scalar 8 | 9 | from hydromt_fiat.utils import ANALYSIS, EVENT, RISK, RP, TYPE, standard_unit 10 | from hydromt_fiat.workflows.utils import _merge_dataarrays, _process_dataarray 11 | 12 | __all__ = ["hazard_setup"] 13 | 14 | logger = logging.getLogger(f"hydromt.{__name__}") 15 | 16 | 17 | def hazard_setup( 18 | grid_like: xr.Dataset | None, 19 | hazard_data: dict[str, xr.DataArray], 20 | hazard_type: str, 21 | *, 22 | return_periods: list[int] | None = None, 23 | risk: bool = False, 24 | unit: str = "m", 25 | ) -> xr.Dataset: 26 | """Read and transform hazard data. 27 | 28 | Parameters 29 | ---------- 30 | grid_like : xr.Dataset | None 31 | Grid dataset that serves as an example dataset for transforming the input data. 32 | hazard_data : dict[str, xr.DataArray] 33 | The hazard data in a dictionary with the names of the datasets as keys. 34 | hazard_type : str 35 | Type of hazard. 36 | return_periods : list[int], optional 37 | List of return periods, by default None. 38 | risk : bool, optional 39 | Designate hazard files for risk analysis, by default False. 40 | unit : str, optional 41 | The unit which the hazard data is in, by default 'm'. 42 | 43 | Returns 44 | ------- 45 | xr.Dataset 46 | Unified xarray dataset containing the hazard data. 47 | """ 48 | hazard_dataarrays = [] 49 | for idx, (da_name, da) in enumerate(hazard_data.items()): 50 | da = _process_dataarray(da=da, da_name=da_name) 51 | 52 | # Check for unit 53 | conversion = standard_unit(Scalar(1.0, unit)) 54 | da *= conversion.value 55 | 56 | attrs: dict[str, Any] = { 57 | "name": da_name, 58 | } 59 | if risk: 60 | assert return_periods is not None 61 | attrs[RP] = return_periods[idx] 62 | 63 | # Set the event data arrays to the hazard grid component 64 | da = da.assign_attrs(attrs) 65 | 66 | hazard_dataarrays.append(da) 67 | logger.info(f"Added {hazard_type} hazard map: {da_name}") 68 | 69 | # Reproject to gridlike 70 | ds = _merge_dataarrays(grid_like=grid_like, dataarrays=hazard_dataarrays) 71 | 72 | attrs = { 73 | TYPE: hazard_type, 74 | ANALYSIS: EVENT, 75 | } 76 | if risk: 77 | attrs[ANALYSIS] = RISK 78 | ds = ds.assign_attrs(attrs) 79 | 80 | return ds 81 | -------------------------------------------------------------------------------- /.github/workflows/ci_docs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Build Documentation 3 | on: 4 | workflow_dispatch: 5 | push: 6 | branches: 7 | - main 8 | tags: 9 | - v* 10 | paths: 11 | - docs/**/* 12 | - examples/* 13 | - pyproject.toml 14 | - .github/workflows/ci_docs.yml 15 | pull_request: 16 | branches: 17 | - main 18 | paths: 19 | - docs/**/* 20 | - examples/* 21 | - pyproject.toml 22 | - .github/workflows/ci_docs.yml 23 | 24 | jobs: 25 | docs: 26 | defaults: 27 | run: 28 | shell: bash -e -l {0} 29 | env: 30 | branch_name: ${{ github.head_ref || github.ref_name }} 31 | timeout-minutes: 30 32 | 33 | strategy: 34 | fail-fast: false 35 | matrix: 36 | os: 37 | - "ubuntu-latest" 38 | include: 39 | - os: ubuntu-latest 40 | label: linux-64 41 | 42 | name: ${{ matrix.label }} 43 | runs-on: ${{ matrix.os }} 44 | 45 | steps: 46 | - name: Checkout code 47 | uses: actions/checkout@v5 48 | 49 | - name: Setup pixi env 50 | uses: prefix-dev/setup-pixi@28eb668aafebd9dede9d97c4ba1cd9989a4d0004 51 | with: 52 | pixi-version: "v0.58.0" 53 | environments: docs 54 | locked: false 55 | cache: true 56 | cache-write: ${{ github.event_name == 'push' && github.ref_name == 'main' }} 57 | 58 | # if we're not publishing we don't have to write them, so we might as well 59 | # save ourself a bunch of IO time 60 | - name: Generate dummy docs 61 | if: ${{ github.event_name == 'pull_request' }} 62 | run: | 63 | pixi run -e docs docs-dummy 64 | 65 | - name: Generate docs 66 | if: ${{ github.event_name != 'pull_request' && !github.event.act }} 67 | run: | 68 | pixi run -e docs docs-html 69 | echo "DOC_VERSION=$(pixi run --locked -e docs python -c 'from hydromt_fiat import __version__ as v; print("dev" if "dev" in v else "v"+v.replace(".dev",""))')" >> $GITHUB_ENV 70 | 71 | # - name: Upload to GitHub Pages 72 | # if: ${{ github.event_name != 'pull_request' && !github.event.act }} 73 | # uses: peaceiris/actions-gh-pages@v3.9.3 74 | # with: 75 | # github_token: ${{ secrets.GITHUB_TOKEN }} 76 | # publish_dir: ./docs/_build/html 77 | # exclude_assets: .buildinfo,_sources/*,_examples/*.ipynb 78 | # destination_dir: ./${{ env.DOC_VERSION }} 79 | # keep_files: false 80 | # full_commit_message: Deploy ${{ env.DOC_VERSION }} to GitHub Pages 81 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Test 3 | on: 4 | workflow_call: 5 | inputs: 6 | os: { type: string, required: true } 7 | python-version: { type: string, required: true } 8 | 9 | jobs: 10 | unit: 11 | defaults: 12 | run: 13 | shell: bash -e -l {0} 14 | runs-on: ${{ inputs.os }} 15 | timeout-minutes: 30 16 | steps: 17 | - name: Checkout code 18 | uses: actions/checkout@v5 19 | 20 | - name: Setup pixi env 21 | uses: prefix-dev/setup-pixi@28eb668aafebd9dede9d97c4ba1cd9989a4d0004 22 | with: 23 | pixi-version: "v0.58.0" 24 | environments: test-py${{ inputs.python-version }} 25 | locked: false 26 | cache: true 27 | cache-write: ${{ github.event_name == 'push' && github.ref_name == 'main' }} 28 | 29 | - name: Test 30 | run: | 31 | pixi run -e test-py${{ inputs.python-version }} test-cov 32 | 33 | - name: Upload coverage 34 | if: inputs.os == 'ubuntu-latest' && inputs.python-version == '313' 35 | uses: actions/upload-artifact@v5 36 | with: 37 | name: code-coverage 38 | path: coverage.xml 39 | 40 | - name: Signal SonarQube 41 | if: inputs.os == 'ubuntu-latest' && inputs.python-version == '313' 42 | uses: peter-evans/repository-dispatch@5fc4efd1a4797ddb68ffd0714a238564e4cc0e6f 43 | with: 44 | token: ${{ secrets.GITHUB_TOKEN }} 45 | repository: ${{ github.repository }} 46 | event-type: sonarqube-trigger 47 | client-payload: | 48 | { 49 | "run_id": "${{ github.run_id }}", 50 | "branch": "${{ github.head_ref || github.ref_name }}", 51 | "pr_flag": "${{ github.event_name == 'pull_request' }}", 52 | "pr_base": "${{ github.event.pull_request.base.ref || '' }}", 53 | "pr_number": "${{ github.event.pull_request.number || '' }}" 54 | } 55 | 56 | integration: 57 | defaults: 58 | run: 59 | shell: bash -e -l {0} 60 | runs-on: ${{ inputs.os }} 61 | needs: unit 62 | timeout-minutes: 30 63 | steps: 64 | - name: Checkout code 65 | uses: actions/checkout@v5 66 | 67 | - name: Setup pixi env 68 | uses: prefix-dev/setup-pixi@28eb668aafebd9dede9d97c4ba1cd9989a4d0004 69 | with: 70 | pixi-version: "v0.58.0" 71 | environments: test-py${{ inputs.python-version }} 72 | locked: false 73 | cache: true 74 | cache-write: false 75 | 76 | - name: Test 77 | run: | 78 | pixi run -e test-py${{ inputs.python-version }} test-integration 79 | -------------------------------------------------------------------------------- /tests/workflows/test_hazard.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | import xarray as xr 4 | 5 | from hydromt_fiat.utils import EVENT, RISK 6 | from hydromt_fiat.workflows import hazard_setup 7 | 8 | 9 | def test_hazard_setup_risk(hazard_event_data_highres: xr.DataArray): 10 | # test hazard risk 11 | hazard_data = {"flood_event_highres": hazard_event_data_highres} 12 | ds = hazard_setup( 13 | grid_like=None, 14 | hazard_data=hazard_data, 15 | hazard_type="flooding", 16 | return_periods=[50000], 17 | risk=True, 18 | ) 19 | assert isinstance(ds, xr.Dataset) 20 | assert "flood_event_highres" in ds.data_vars 21 | da = ds.flood_event_highres 22 | assert ds.analysis == RISK 23 | assert da.name == "flood_event_highres" 24 | assert da.rp == 50000 25 | 26 | 27 | def test_hazard_setup_event(hazard_event_data: xr.DataArray): 28 | # Test hazard event 29 | hazard_data = {"flood_event": hazard_event_data} 30 | ds = hazard_setup( 31 | grid_like=None, 32 | hazard_data=hazard_data, 33 | hazard_type="flooding", 34 | risk=False, 35 | ) 36 | assert isinstance(ds, xr.Dataset) 37 | assert "flood_event" in ds.data_vars 38 | da = ds.flood_event 39 | assert ds.analysis == EVENT 40 | assert da.name == "flood_event" 41 | assert "rp" not in da.attrs.keys() 42 | 43 | 44 | def test_hazard_setup_reproj( 45 | hazard_event_data: xr.DataArray, 46 | hazard_event_data_highres: xr.DataArray, 47 | ): 48 | # assert the shapes at the start 49 | assert hazard_event_data.shape == (5, 4) 50 | assert hazard_event_data_highres.shape == (94, 93) 51 | 52 | # Setup with a grid_like 53 | hazard_data = {"event": hazard_event_data_highres} 54 | ds = hazard_setup( 55 | grid_like=hazard_event_data, 56 | hazard_data=hazard_data, 57 | hazard_type="flooding", 58 | risk=False, 59 | ) 60 | 61 | assert ds.event.name == "event" 62 | # More importantly, check the shape 63 | assert ds.event.shape == (5, 4) # Should be the same as the hazard event data 64 | 65 | 66 | def test_hazard_setup_unit_default(hazard_event_data: xr.DataArray): 67 | # Call the workflow function 68 | hazard_data = {"event": hazard_event_data} 69 | ds = hazard_setup( 70 | grid_like=None, 71 | hazard_data=hazard_data, 72 | hazard_type="flooding", 73 | ) 74 | 75 | # Assert the avg level 76 | avg_level = ds.event.mean().values 77 | assert np.isclose(avg_level, 1.7947) 78 | 79 | 80 | def test_hazard_setup_unit_differ( 81 | caplog: pytest.LogCaptureFixture, 82 | hazard_event_data: xr.DataArray, 83 | ): 84 | hazard_data = {"event": hazard_event_data} 85 | # Suppose it's in a different unit 86 | ds = hazard_setup( 87 | grid_like=None, 88 | hazard_data=hazard_data, 89 | hazard_type="flooding", 90 | unit="ft", 91 | ) 92 | 93 | # Assert the avg level 94 | avg_level_ft = ds.event.mean().values 95 | assert ( 96 | "Given unit (ft) does not match the standard unit (m) for length" in caplog.text 97 | ) 98 | assert np.isclose(avg_level_ft, 0.547029) 99 | -------------------------------------------------------------------------------- /.github/workflows/pypi.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Pypi release 3 | 4 | on: 5 | release: 6 | types: 7 | - published 8 | push: 9 | tags: 10 | - v* 11 | workflow_dispatch: 12 | 13 | jobs: 14 | build: 15 | runs-on: ubuntu-latest 16 | 17 | steps: 18 | - name: Checkout code 19 | uses: actions/checkout@v5 20 | with: 21 | fetch-depth: 0 22 | 23 | - uses: actions/setup-python@v6 24 | name: Install Python 25 | with: 26 | python-version: "3.13" 27 | 28 | - name: Setup build packages 29 | run: | 30 | pip install build twine wheel 31 | 32 | - name: Build sdist/ wheel 33 | run: | 34 | python -m build --sdist 35 | python -m build --wheel 36 | python -m twine check dist/* 37 | 38 | - name: Upload sdist/ wheel 39 | uses: actions/upload-artifact@v5 40 | with: 41 | name: source-distribution 42 | path: dist 43 | 44 | test-build: 45 | needs: build 46 | defaults: 47 | run: 48 | shell: bash -l {0} 49 | strategy: 50 | fail-fast: false 51 | matrix: 52 | os: 53 | - "ubuntu-latest" 54 | include: 55 | - os: ubuntu-latest 56 | label: linux-64 57 | runs-on: ${{ matrix.os }} 58 | 59 | steps: 60 | - name: Checkout code 61 | uses: actions/checkout@v5 62 | 63 | - name: Setup pixi env 64 | uses: prefix-dev/setup-pixi@28eb668aafebd9dede9d97c4ba1cd9989a4d0004 65 | with: 66 | pixi-version: "v0.58.0" 67 | environments: slim 68 | locked: false 69 | cache: true 70 | cache-write: false 71 | 72 | - name: Download artifact 73 | uses: actions/download-artifact@v6 74 | with: 75 | name: source-distribution 76 | path: dist 77 | 78 | - name: List contents of built dist 79 | run: | 80 | ls -ltrh 81 | ls -ltrh dist 82 | 83 | - name: Verify dist/ wheel 84 | run: | 85 | pixi run python -m pip install dist/hydromt_fiat*.whl 86 | pixi run hydromt --models 87 | 88 | upload-testpypi: 89 | needs: test-build 90 | if: github.event_name == 'push' 91 | environment: 92 | name: testpypi 93 | url: https://test.pypi.org/p/hydromt-fiat 94 | runs-on: ubuntu-latest 95 | permissions: 96 | id-token: write 97 | 98 | steps: 99 | - name: Download sdist/ wheel 100 | uses: actions/download-artifact@v6 101 | with: 102 | name: source-distribution 103 | path: dist 104 | 105 | - name: Publish TestPyPI 106 | uses: pypa/gh-action-pypi-publish@release/v1 107 | with: 108 | repository-url: https://test.pypi.org/legacy/ 109 | 110 | upload-pypi: 111 | needs: test-build 112 | if: ${{ github.event_name == 'release' && !github.event.act }} 113 | runs-on: ubuntu-latest 114 | environment: 115 | name: pypi 116 | url: https://pypi.org/p/hydromt-fiat 117 | 118 | permissions: 119 | id-token: write 120 | 121 | steps: 122 | - name: Download sdist/ wheel 123 | uses: actions/download-artifact@v6 124 | with: 125 | name: source-distribution 126 | path: dist 127 | 128 | - name: Publish PyPI 129 | uses: pypa/gh-action-pypi-publish@release/v1 130 | -------------------------------------------------------------------------------- /src/hydromt_fiat/utils.py: -------------------------------------------------------------------------------- 1 | """HydroMT-FIAT utility.""" 2 | 3 | import logging 4 | from typing import Any 5 | 6 | from barril.units import Scalar, UnitDatabase 7 | 8 | __all__ = ["create_query"] 9 | 10 | # GLOBAL STRINGS 11 | ## BASE 12 | ANALYSIS = "analysis" 13 | CONFIG = "config" 14 | CURVE = "curve" 15 | DAMAGE = "damage" 16 | EVENT = "event" 17 | EXPOSURE = "exposure" 18 | FILE = "file" 19 | FN = "fn" 20 | GEOM = "geom" 21 | GRID = "grid" 22 | HAZARD = "hazard" 23 | MAX = "max" 24 | MODEL = "model" 25 | OBJECT = "object" 26 | REGION = "region" 27 | RISK = "risk" 28 | RP = "rp" 29 | SETTINGS = "settings" 30 | SRS = "srs" 31 | TYPE = "type" 32 | VULNERABILITY = "vulnerability" 33 | 34 | ## Delft-FIAT 35 | EXPOSURE_GEOM = f"{EXPOSURE}.{GEOM}" 36 | EXPOSURE_GEOM_FILE = f"{EXPOSURE_GEOM}.{FILE}" 37 | EXPOSURE_GRID = f"{EXPOSURE}.{GRID}" 38 | EXPOSURE_GRID_FILE = f"{EXPOSURE_GRID}.{FILE}" 39 | EXPOSURE_GRID_SETTINGS = f"{EXPOSURE_GRID}.{SETTINGS}" 40 | FN_CURVE = f"{FN}_{CURVE}" 41 | HAZARD_FILE = f"{HAZARD}.{FILE}" 42 | HAZARD_RP = f"{HAZARD}.{RP}" 43 | HAZARD_SETTINGS = f"{HAZARD}.{SETTINGS}" 44 | MODEL_RISK = f"{MODEL}.{RISK}" 45 | MODEL_TYPE = f"{MODEL}.{TYPE}" 46 | VAR_AS_BAND = "var_as_band" 47 | VULNERABILITY_FILE = f"{VULNERABILITY}.{FILE}" 48 | 49 | ## HydroMT-FIAT 50 | COST_TYPE = f"cost_{TYPE}" 51 | CURVE_ID = f"{CURVE}_id" 52 | CURVES = f"{CURVE}s" 53 | EXPOSURE_LINK = f"{EXPOSURE}_link" 54 | EXPOSURE_TYPE = f"{EXPOSURE}_{TYPE}" 55 | IDENTIFIERS = "identifiers" 56 | OBJECT_TYPE = f"{OBJECT}_{TYPE}" 57 | OBJECT_ID = f"{OBJECT}_id" 58 | SUBTYPE = f"sub{TYPE}" 59 | 60 | # Unit database init 61 | UNIT_DATABASE = UnitDatabase.GetSingleton() 62 | 63 | logger = logging.getLogger(f"hydromt.{__name__}") 64 | 65 | 66 | def create_query( 67 | **kwargs: dict[str, Any], 68 | ) -> str: 69 | """Generate a query for a pandas DataFrame. 70 | 71 | Parameters 72 | ---------- 73 | kwargs : dict 74 | Keyword arguments that are processed to a query. N.b. these are additive. 75 | 76 | Returns 77 | ------- 78 | str 79 | A string containing the pandas dataframe query. 80 | """ 81 | sub_queries = [] 82 | for key, item in kwargs.items(): 83 | if isinstance(item, (list, tuple)): 84 | sub_queries.append(f"{key} in {str(item)}") 85 | continue 86 | if isinstance(item, str): 87 | item = f"'{item}'" 88 | sub_queries.append(f"{key} == {str(item)}") 89 | query = " and ".join(sub_queries) 90 | return query 91 | 92 | 93 | def standard_unit(unit: Scalar) -> Scalar: 94 | """Translate unit to standard unit for category. 95 | 96 | Accepted units are listed on the website of barril: 97 | https://barril.readthedocs.io/en/latest/units.html 98 | 99 | Parameters 100 | ---------- 101 | unit : Scalar 102 | A unit. 103 | 104 | Returns 105 | ------- 106 | Scalar 107 | Scaling factor in Scalar structure (unitless). 108 | """ 109 | # Check for the dafault unit 110 | default_unit = UNIT_DATABASE.GetDefaultUnit(unit.category) 111 | if default_unit == unit.unit: 112 | return unit 113 | 114 | # Setup for scaling 115 | default_scalar = Scalar(1.0, default_unit) 116 | logger.warning( 117 | f"Given unit ({unit.unit}) does not match \ 118 | the standard unit ({default_unit}) for {unit.category}" 119 | ) 120 | translate = unit / default_scalar 121 | 122 | return translate 123 | -------------------------------------------------------------------------------- /src/hydromt_fiat/workflows/exposure_grid.py: -------------------------------------------------------------------------------- 1 | """Exposure workflows.""" 2 | 3 | import logging 4 | 5 | import pandas as pd 6 | import xarray as xr 7 | 8 | from hydromt_fiat.utils import CURVE, EXPOSURE_LINK, FN_CURVE, OBJECT_TYPE, SUBTYPE 9 | from hydromt_fiat.workflows.utils import _merge_dataarrays, _process_dataarray 10 | 11 | __all__ = ["exposure_grid_setup"] 12 | 13 | logger = logging.getLogger(f"hydromt.{__name__}") 14 | 15 | 16 | def exposure_grid_setup( 17 | grid_like: xr.Dataset | None, 18 | exposure_data: dict[str, xr.DataArray], 19 | vulnerability: pd.DataFrame, 20 | exposure_linking: pd.DataFrame | None = None, 21 | ) -> xr.Dataset: 22 | """Read and transform exposure grid data. 23 | 24 | Parameters 25 | ---------- 26 | grid_like : xr.Dataset | None 27 | Xarray dataset that is used to transform exposure data with. If set to None, 28 | the first data array in exposure_data is used to transform the data. 29 | exposure_data : dict[str, xr.DataArray] 30 | Dictionary containing name of exposure file and associated data 31 | vulnerability : pd.DataFrame 32 | A Table containing valid vulnerability curve id's an their 33 | presumed link to the exposure. 34 | exposure_linking : pd.DataFrame, optional 35 | Table containing the names of the exposure files and corresponding 36 | vulnerability curves. 37 | 38 | Returns 39 | ------- 40 | xr.Dataset 41 | Transformed and unified exposure grid. 42 | """ 43 | exposure_dataarrays = [] 44 | 45 | # Log the fact that there is not linking table 46 | if exposure_linking is None: 47 | logger.warning( 48 | "No exposure linking provided, \ 49 | defaulting to the name of the exposure layer" 50 | ) 51 | # Construct a dummy dataframe from the names 52 | entries = list(exposure_data.keys()) 53 | exposure_linking = pd.DataFrame( 54 | data={ 55 | EXPOSURE_LINK: entries, 56 | OBJECT_TYPE: entries, 57 | } 58 | ) 59 | 60 | # Check if linking table columns are named according to convention 61 | for col_name in [EXPOSURE_LINK, OBJECT_TYPE]: 62 | if col_name not in exposure_linking.columns: 63 | raise ValueError( 64 | f"Missing column, '{col_name}' in exposure grid linking table" 65 | ) 66 | 67 | # Get the unique exposure types 68 | headers = vulnerability[EXPOSURE_LINK] 69 | if SUBTYPE in vulnerability: 70 | headers = vulnerability[EXPOSURE_LINK] + "_" + vulnerability[SUBTYPE] 71 | 72 | # Loop through the the supplied data arrays 73 | for da_name, da in exposure_data.items(): 74 | if da_name not in exposure_linking[EXPOSURE_LINK].values: 75 | link_name = da_name 76 | else: 77 | link_name = exposure_linking.loc[ 78 | exposure_linking[EXPOSURE_LINK] == da_name, OBJECT_TYPE 79 | ].values[0] 80 | 81 | # Check if in vulnerability curves link table 82 | link = vulnerability[headers == link_name] 83 | if link.empty: 84 | logger.warning(f"Couldn't link '{da_name}' to vulnerability, skipping...") 85 | continue 86 | 87 | # Get the vulnerability curve ID 88 | fn_curve = link[CURVE].values[0] 89 | 90 | # Process the arrays, .e.g make gdal compliant 91 | da = _process_dataarray(da=da, da_name=da_name) 92 | da = da.assign_attrs({FN_CURVE: fn_curve}) 93 | exposure_dataarrays.append(da) 94 | 95 | if len(exposure_dataarrays) == 0: 96 | return xr.Dataset() 97 | 98 | return _merge_dataarrays(grid_like=grid_like, dataarrays=exposure_dataarrays) 99 | -------------------------------------------------------------------------------- /tests/data/test_fetch.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | 4 | import pytest 5 | from pooch.processors import ExtractorProcessor 6 | 7 | from hydromt_fiat.data import fetch_data 8 | from hydromt_fiat.data.fetch import _fetch_registry, _unpack_processor 9 | 10 | 11 | def test__fetch_registry_local(): 12 | # Call the function 13 | db = _fetch_registry(local_registry=True) 14 | 15 | # Assert the output 16 | assert isinstance(db, dict) 17 | assert "data" in db 18 | 19 | 20 | def test__fetch_registry_remote(): 21 | # Call the function 22 | db = _fetch_registry(local_registry=False) 23 | 24 | # Assert the output 25 | assert isinstance(db, dict) 26 | assert "data" in db 27 | 28 | 29 | def test__unpack_processor_known(): 30 | # Call the function 31 | up = _unpack_processor(suffix="tar.gz") 32 | 33 | # Assert the output 34 | assert isinstance(up, ExtractorProcessor) 35 | 36 | 37 | def test__unpack_processor_unknown(): 38 | # Call the function 39 | up = _unpack_processor(suffix="foo") 40 | 41 | # Assert the output 42 | assert up is None 43 | 44 | 45 | def test_fetch_data(): 46 | # Call the function in it's default state 47 | path = fetch_data(data="fiat-model-c") 48 | 49 | # Get the cache dir location 50 | cache_dir = Path("~", ".cache", "hydromt_fiat").expanduser() 51 | data_dir = Path(cache_dir, "fiat-model-c") 52 | 53 | # Assert the output 54 | assert Path(cache_dir, "fiat-model-c.tar.gz").is_file() 55 | assert data_dir.is_dir() 56 | assert data_dir == path 57 | assert Path(data_dir, "exposure").is_dir() 58 | assert Path(data_dir, "settings.toml").is_file() 59 | 60 | 61 | def test_fetch_data_directory(tmp_path: Path): 62 | # Call the function in it's default state 63 | path = fetch_data(data="fiat-model-c", output_dir=tmp_path) 64 | 65 | # Get the cache dir location 66 | cache_dir = Path("~", ".cache", "hydromt_fiat").expanduser() 67 | data_dir = Path(tmp_path, "fiat-model-c") 68 | 69 | # Assert the output 70 | assert Path(cache_dir, "fiat-model-c.tar.gz").is_file() 71 | assert data_dir.is_dir() 72 | assert data_dir == path 73 | assert Path(data_dir, "exposure").is_dir() 74 | assert Path(data_dir, "settings.toml").is_file() 75 | 76 | 77 | def test_fetch_data_no_subdir(tmp_path: Path): 78 | # Call the function in it's default state 79 | path = fetch_data(data="fiat-model-c", output_dir=tmp_path, sub_dir=False) 80 | 81 | # Get the cache dir location 82 | cache_dir = Path("~", ".cache", "hydromt_fiat").expanduser() 83 | 84 | # Assert the output 85 | assert Path(cache_dir, "fiat-model-c.tar.gz").is_file() 86 | assert tmp_path == path 87 | assert Path(tmp_path, "exposure").is_dir() 88 | assert Path(tmp_path, "settings.toml").is_file() 89 | 90 | 91 | def test_fetch_data_relative(tmp_path: Path): 92 | # Set the cwd 93 | cur_cwd = Path.cwd() 94 | os.chdir(tmp_path) 95 | 96 | # Call the function in it's default state 97 | path = fetch_data(data="fiat-model-c", output_dir="data", sub_dir=False) 98 | 99 | # Get the cache dir location 100 | cache_dir = Path("~", ".cache", "hydromt_fiat").expanduser() 101 | data_dir = Path(tmp_path, "data") 102 | 103 | # Assert the output 104 | assert Path(cache_dir, "fiat-model-c.tar.gz").is_file() 105 | assert data_dir == path 106 | assert Path(data_dir, "exposure").is_dir() 107 | assert Path(data_dir, "settings.toml").is_file() 108 | 109 | # Change the cwd back 110 | os.chdir(cur_cwd) 111 | 112 | 113 | def test_fetch_data_errors(): 114 | # Call the function while requesting something that isnt there 115 | with pytest.raises( 116 | ValueError, 117 | match="Choose one of the following: ", 118 | ): 119 | fetch_data(data="foobar") 120 | -------------------------------------------------------------------------------- /tests/components/conftest.py: -------------------------------------------------------------------------------- 1 | import platform 2 | import shutil 3 | from pathlib import Path 4 | from typing import Callable 5 | from unittest.mock import MagicMock, PropertyMock 6 | 7 | import geopandas as gpd 8 | import pandas as pd 9 | import pytest 10 | from hydromt import DataCatalog 11 | from hydromt.model import ModelRoot 12 | from pyproj.crs import CRS 13 | from pytest_mock import MockerFixture 14 | 15 | from hydromt_fiat import FIATModel 16 | from hydromt_fiat.components import ConfigComponent 17 | 18 | 19 | ## OS related fixture 20 | @pytest.fixture(scope="session") 21 | def mount_string() -> str: 22 | if platform.system().lower() == "windows": 23 | return "d:/" 24 | return "/d/" # Posix paths 25 | 26 | 27 | ## Models and Mocked objects 28 | @pytest.fixture 29 | def model_exposure_setup( 30 | model_with_region: FIATModel, 31 | vulnerability_curves: pd.DataFrame, 32 | vulnerability_identifiers: pd.DataFrame, 33 | ) -> FIATModel: 34 | model = model_with_region 35 | model.vulnerability.set( 36 | vulnerability_curves, 37 | name="curves", 38 | ) 39 | model.vulnerability.set( 40 | vulnerability_identifiers, 41 | name="identifiers", 42 | ) 43 | return model 44 | 45 | 46 | @pytest.fixture 47 | def mock_model_factory( 48 | mocker: MockerFixture, tmp_path: Path 49 | ) -> Callable[[Path, str], FIATModel]: 50 | def _factory(path: Path = tmp_path, mode: str = "w") -> MagicMock: 51 | model = mocker.create_autospec(FIATModel) 52 | model.root = ModelRoot(path, mode=mode) 53 | model.data_catalog = mocker.create_autospec(DataCatalog) 54 | model.crs = CRS.from_epsg(4326) 55 | return model 56 | 57 | return _factory 58 | 59 | 60 | @pytest.fixture 61 | def mock_model(mock_model_factory: Callable[[Path, str], FIATModel]) -> MagicMock: 62 | model = mock_model_factory() 63 | return model 64 | 65 | 66 | @pytest.fixture 67 | def mock_model_config( 68 | mock_model_factory: Callable[[Path, str], FIATModel], 69 | ) -> MagicMock: 70 | model = mock_model_factory() 71 | config = ConfigComponent(model) 72 | type(model).config = PropertyMock(side_effect=lambda: config) 73 | return model 74 | 75 | 76 | ## Extra data structures 77 | @pytest.fixture 78 | def config_dummy(tmp_path: Path) -> dict: 79 | data = { 80 | "foo": "bar", 81 | "baz": { 82 | "file1": Path(tmp_path, "tmp.txt"), 83 | "file2": "tmp/tmp.txt", 84 | }, 85 | "spooky": {"ghost": [1, 2, 3]}, 86 | "multi": [{"file": "tmp/tmp.txt"}, {"file": "boo.txt"}], 87 | } 88 | return data 89 | 90 | 91 | ## Extra data paths 92 | @pytest.fixture 93 | def exposure_cost_link_path( 94 | tmp_path: Path, 95 | exposure_cost_link: pd.DataFrame, 96 | ) -> Path: 97 | p = Path(tmp_path, "cost_link.csv") 98 | exposure_cost_link.to_csv(p, index=False) 99 | assert p.is_file() 100 | return p 101 | 102 | 103 | @pytest.fixture 104 | def exposure_vector_clipped_csv_path( 105 | tmp_path: Path, 106 | exposure_vector_clipped: gpd.GeoDataFrame, 107 | ) -> Path: 108 | p = Path(tmp_path, "foo.fgb") 109 | # Seperate the geometry data 110 | geom = exposure_vector_clipped.loc[:, ["object_id", "geometry"]] 111 | geom.to_file(p) 112 | assert p.is_file() 113 | # Separate the tabular data 114 | cols = exposure_vector_clipped.columns.values.tolist() 115 | cols.remove("geometry") 116 | data = exposure_vector_clipped.loc[:, cols] 117 | data.to_csv(p.with_suffix(".csv"), index=False) 118 | assert p.with_suffix(".csv").is_file() 119 | return p 120 | 121 | 122 | @pytest.fixture 123 | def vulnerability_curves_only_path( 124 | tmp_path: Path, 125 | model_data_path: Path, 126 | ) -> Path: 127 | p = Path(tmp_path, "curves.csv") 128 | shutil.copy2( 129 | src=Path(model_data_path, "vulnerability", "curves.csv"), 130 | dst=p, 131 | ) 132 | assert p.is_file() 133 | return p 134 | -------------------------------------------------------------------------------- /src/hydromt_fiat/components/utils.py: -------------------------------------------------------------------------------- 1 | """Component utilities.""" 2 | 3 | import os 4 | import re 5 | from os.path import relpath 6 | from pathlib import Path 7 | from typing import Any 8 | 9 | from hydromt._utils.naming_convention import _expand_uri_placeholders 10 | 11 | MOUNT_PATTERN = re.compile(r"(^\/(\w+)\/|^(\w+):\/).*$") 12 | 13 | 14 | ## Config/ pathing related 15 | def _mount( 16 | value: str, 17 | ) -> str | None: 18 | """Get the mount of a path.""" 19 | m = MOUNT_PATTERN.match(value) 20 | if m is None: 21 | return None 22 | return m.group(1) 23 | 24 | 25 | def _relpath( 26 | value: Any, 27 | root: Path, 28 | ) -> str | Any: 29 | """Generate a relative path.""" 30 | if not isinstance(value, Path): 31 | return value 32 | value = Path(value) 33 | if _mount(value.as_posix()) == _mount(root.as_posix()): 34 | value = Path(relpath(value, root)) 35 | return value.as_posix() 36 | 37 | 38 | def make_config_paths_relative( 39 | data: dict[str, Any], 40 | root: Path, 41 | ) -> dict[str, Any]: 42 | """Make the configurations path relative to the root. 43 | 44 | This only concerns itself with paths that are absolute and on 45 | the same mount. 46 | 47 | Parameters 48 | ---------- 49 | data : dict 50 | The configurations in a dictionary format. 51 | root : Path 52 | The root to which the paths are made relative. 53 | Most of the time, this will be the parent directory of the 54 | configurations file. 55 | """ 56 | for key, val in data.items(): 57 | if isinstance(val, list) and all([isinstance(item, dict) for item in val]): 58 | for item in val: 59 | make_config_paths_relative(item, root) 60 | if isinstance(val, dict): 61 | data.update({key: make_config_paths_relative(val, root)}) 62 | else: 63 | data.update({key: _relpath(val, root)}) 64 | return data 65 | 66 | 67 | def get_item( 68 | parts: list[str], 69 | current: dict[str, Any], 70 | root: Path | str, 71 | fallback: Any | None = None, 72 | abs_path: bool = False, 73 | ) -> Any | None: 74 | """Get item from a dictionary.""" 75 | num_parts = len(parts) 76 | for i, part in enumerate(parts): 77 | if isinstance(current, list): 78 | return [ 79 | get_item(parts[i:], item, root, fallback, abs_path) for item in current 80 | ] 81 | if i < num_parts - 1: 82 | current = current.get(part, {}) 83 | else: 84 | value = current.get(part, fallback) 85 | if abs_path and isinstance(value, (Path, str)): 86 | value = Path(root, value) 87 | return value 88 | return None 89 | 90 | 91 | def pathing_expand( 92 | root: Path, filename: str | None = None 93 | ) -> tuple[list[Path], list[str]] | None: 94 | """Sort the pathing on reading based on a wildcard.""" 95 | # If the filename is None, do nothing 96 | if filename is None: 97 | return None 98 | # Expand 99 | path_glob, _, regex = _expand_uri_placeholders(filename) 100 | p = list(Path(root).glob(path_glob)) 101 | n = [] 102 | # Get the unique names 103 | for item in p: 104 | rel = Path(os.path.relpath(item, root)) 105 | name = ".".join(regex.match(rel.as_posix()).groups()) 106 | n.append(name) 107 | return p, n 108 | 109 | 110 | def pathing_config( 111 | p: list[Path] | Path | str | None, 112 | ) -> tuple[list[Path], list[str]] | None: 113 | """Sort pathing based on config entries (i.e. a list).""" 114 | if p is None: 115 | return None 116 | # Handling legacy configs 117 | if not isinstance(p, list): 118 | p = [Path(p)] 119 | # If no files return None 120 | if all([item is None for item in p]): 121 | return None 122 | # Remove entries with no files and get the names of the remaining ones 123 | p = [Path(item) for item in p if item is not None] 124 | n = [item.stem for item in p] 125 | return p, n 126 | -------------------------------------------------------------------------------- /examples/data_catalog.yml: -------------------------------------------------------------------------------- 1 | meta: 2 | version: v2024.4 3 | name: hydromt-fiat_test-data 4 | 5 | bag: 6 | data_type: GeoDataFrame 7 | uri: buildings/bag.fgb 8 | driver: 9 | name: pyogrio 10 | filesystem: local 11 | metadata: 12 | crs: 28992 13 | url: https://service.pdok.nl/lv/bag/atom/bag.xml 14 | 15 | bag_link: 16 | data_type: DataFrame 17 | uri: buildings/bag-jrc_map.csv 18 | driver: 19 | name: pandas 20 | filesystem: local 21 | metadata: 22 | notes: Made by dalmijn 23 | 24 | flood_event: 25 | data_type: RasterDataset 26 | uri: floodmaps/event.tif 27 | driver: 28 | name: rasterio 29 | filesystem: local 30 | options: 31 | chunks: 32 | x: 1500 33 | y: 1500 34 | metadata: 35 | category: flood maps 36 | crs: 28992 37 | notes: Made by Rijkswaterstaat/ Deltares 38 | 39 | flood_50000: 40 | data_type: RasterDataset 41 | uri: floodmaps/rp_50000.tif 42 | driver: 43 | name: rasterio 44 | filesystem: local 45 | options: 46 | chunks: 47 | x: 1500 48 | y: 1500 49 | metadata: 50 | category: flood maps 51 | crs: 28992 52 | notes: Made by Rijkswaterstaat/ Deltares 53 | 54 | jrc_damage_values: 55 | data_type: DataFrame 56 | uri: vulnerability/jrc_base_damage_values.csv 57 | driver: 58 | name: pandas 59 | filesystem: local 60 | metadata: 61 | category: vulnerability 62 | notes: Base damage values from the JRC publicated Excel from the tab "MaxDamage-Data", processed into a handy format for HydroMT-FIAT. 63 | url: https://publications.jrc.ec.europa.eu/repository/handle/JRC105688 64 | paper_ref: Huizinga, J., De Moel, H. and Szewczyk, W., Global flood depth-damage functions - Methodology and the database with guidelines, EUR 28552 EN, Publications Office of the European Union, Luxembourg, 2017, ISBN 978-92-79-67781-6, doi:10.2760/16510, JRC105688. 65 | paper_doi: https://dx.doi.org/10.2760/16510 66 | 67 | jrc_vulnerability_curves: 68 | data_type: DataFrame 69 | uri: vulnerability/jrc_damage_functions.csv 70 | driver: 71 | name: pandas 72 | filesystem: local 73 | metadata: 74 | category: vulnerability 75 | notes: JRC depth-damage functions for flooding, processed into a handy format for HydroMT-FIAT. 76 | url: https://publications.jrc.ec.europa.eu/repository/handle/JRC105688 77 | paper_ref: Huizinga, J., De Moel, H. and Szewczyk, W., Global flood depth-damage functions - Methodology and the database with guidelines, EUR 28552 EN, Publications Office of the European Union, Luxembourg, 2017, ISBN 978-92-79-67781-6, doi:10.2760/16510, JRC105688. 78 | paper_doi: https://dx.doi.org/10.2760/16510 79 | 80 | jrc_vulnerability_curves_linking: 81 | data_type: DataFrame 82 | uri: vulnerability/jrc_damage_functions_linking.csv 83 | driver: 84 | name: pandas 85 | filesystem: local 86 | metadata: 87 | category: vulnerability 88 | notes: Default linking table for the JRC damage functions (e.g., the residential damage function links to residential buildings). 89 | 90 | osm_buildings: 91 | uri: building 92 | data_type: GeoDataFrame 93 | uri_resolver: osm_resolver 94 | driver: 95 | name: osm 96 | options: 97 | geom_type: 98 | - MultiPolygon 99 | - Polygon 100 | 101 | osm_roads: 102 | uri: highway 103 | data_type: GeoDataFrame 104 | uri_resolver: osm_resolver 105 | driver: 106 | name: osm 107 | options: 108 | geom_type: 109 | - LineString 110 | - MultiLineString 111 | tags: 112 | - motorway 113 | - primary 114 | - secondary 115 | - tertiary 116 | 117 | osm_landuse: 118 | uri: landuse 119 | data_type: GeoDataFrame 120 | uri_resolver: osm_resolver 121 | driver: 122 | name: osm 123 | options: 124 | geom_type: 125 | - MultiPolygon 126 | - Polygon 127 | 128 | osm_amenity: 129 | uri: amenity 130 | data_type: GeoDataFrame 131 | uri_resolver: osm_resolver 132 | driver: 133 | name: osm 134 | options: 135 | geom_type: 136 | - MultiPolygon 137 | - Polygon 138 | -------------------------------------------------------------------------------- /src/hydromt_fiat/data/fetch.py: -------------------------------------------------------------------------------- 1 | """Data for examples and testing of HydroMT-FIAT.""" 2 | 3 | import json 4 | import logging 5 | from pathlib import Path 6 | from typing import Any 7 | 8 | import pooch 9 | import requests 10 | from pooch.processors import ExtractorProcessor 11 | 12 | __all__ = ["fetch_data"] 13 | 14 | logger = logging.getLogger(__name__) 15 | 16 | PROCESSORS = { 17 | "tar.gz": pooch.Untar, 18 | "zip": pooch.Unzip, 19 | } 20 | REMOTE_REGISTRY = "https://raw.githubusercontent.com/Deltares/hydromt_fiat/refs/heads/main/src/hydromt_fiat/data/registry.json" 21 | 22 | 23 | def _fetch_registry( 24 | local_registry: bool = True, 25 | ) -> dict[str, Any]: 26 | """Fetch the registry.""" 27 | # Get the data either from the local repo or remote repo 28 | if local_registry: 29 | with open(Path(__file__).parent / "registry.json", "r") as f: 30 | data = f.read() 31 | else: 32 | r = requests.get(REMOTE_REGISTRY, timeout=5) 33 | data = r.text 34 | 35 | # Load the json data 36 | database = json.loads(data) 37 | return database 38 | 39 | 40 | def _unpack_processor( 41 | suffix: str, 42 | extract_dir: Path | str = "./", 43 | ) -> ExtractorProcessor: 44 | """Select the right processor for unpacking.""" 45 | if suffix not in PROCESSORS: 46 | return None 47 | processor = PROCESSORS[suffix](members=None, extract_dir=extract_dir) 48 | return processor 49 | 50 | 51 | def fetch_data( 52 | data: str, 53 | local_registry: bool = True, 54 | sub_dir: bool = True, 55 | output_dir: Path | str | None = None, 56 | ) -> Path: 57 | """Fetch data by simply calling the function. 58 | 59 | Parameters 60 | ---------- 61 | data : str 62 | The data to fetch. 63 | local_registry : bool, optional 64 | If True, the registry is taken from the current library location. 65 | Otherwise, it is taken from the remote 'main' branch on github, by default True. 66 | sub_dir : bool 67 | Whether to place the fetched data in a sub directory of the same name. 68 | I.e. if the (tarred) dataset is named 'custom-data' a directory named 69 | 'custom-data' is created in which the data are placed. By default True. 70 | output_dir : Path | str | None 71 | The output directory to store the data. 72 | If None, the data will be stored in ~/.cache/hydromt_fiat/. 73 | 74 | Returns 75 | ------- 76 | Path 77 | The output directory where the data is stored. 78 | """ 79 | # Open the registry 80 | # update the base URL and registry with new versions of the data 81 | # use create_artifact.py script to create the build-data/ test-data archives 82 | database = _fetch_registry(local_registry=local_registry) 83 | base_url: str = database["url"] 84 | registry: dict[str, str] = database["data"] 85 | # Set the cache directory, for at the very least the tarball 86 | cache_dir = Path("~", ".cache", "hydromt_fiat").expanduser() 87 | cache_dir.mkdir(parents=True, exist_ok=True) 88 | 89 | if output_dir is None: 90 | output_dir = cache_dir 91 | output_dir = Path(output_dir) 92 | if not output_dir.is_absolute(): 93 | output_dir = Path(Path.cwd(), output_dir) 94 | output_dir.mkdir(parents=True, exist_ok=True) 95 | 96 | # Quick check whether the data can be found 97 | choices_raw = list(registry.keys()) 98 | choices = [item.split(".", 1)[0] for item in choices_raw] 99 | if data not in choices: 100 | raise ValueError(f"Choose one of the following: {choices}") 101 | idx = choices.index(data) 102 | 103 | # Setup Pooch 104 | retriever = pooch.create( 105 | path=cache_dir, # store archive to cache 106 | base_url=base_url, 107 | registry=registry, 108 | retry_if_failed=10, 109 | ) 110 | 111 | # Set the way of unpacking it 112 | suffix = choices_raw[idx].split(".", 1)[1] 113 | extract_dir = output_dir 114 | if sub_dir: 115 | extract_dir = Path(extract_dir, data) 116 | processor = _unpack_processor(suffix, extract_dir=extract_dir) 117 | # Retrieve the data 118 | retriever.fetch(choices_raw[idx], processor=processor) 119 | 120 | return extract_dir 121 | -------------------------------------------------------------------------------- /tests/workflows/test_exposure_grid.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import pandas as pd 4 | import pytest 5 | import xarray as xr 6 | 7 | from hydromt_fiat.utils import EXPOSURE_LINK, FN_CURVE, OBJECT_TYPE 8 | from hydromt_fiat.workflows import exposure_grid_setup 9 | 10 | 11 | def test_exposure_grid_setup( 12 | exposure_grid_data_ind: xr.DataArray, 13 | vulnerability_linking: pd.DataFrame, 14 | ): 15 | # Call the function 16 | ds = exposure_grid_setup( 17 | grid_like=None, 18 | exposure_data={"industrial_content": exposure_grid_data_ind}, 19 | vulnerability=vulnerability_linking, 20 | ) 21 | 22 | # Assert the output 23 | assert isinstance(ds, xr.Dataset) 24 | assert len(ds.data_vars) == 1 25 | assert ds.industrial_content.attrs.get(FN_CURVE) == "in2" 26 | 27 | 28 | def test_exposure_grid_setup_linking( 29 | exposure_grid_data_ind: xr.DataArray, 30 | vulnerability_linking: pd.DataFrame, 31 | exposure_grid_link: pd.DataFrame, 32 | ): 33 | # Call the function, bit stupid is this table just returns the same 34 | ds = exposure_grid_setup( 35 | grid_like=None, 36 | exposure_data={"industrial_content": exposure_grid_data_ind}, 37 | vulnerability=vulnerability_linking, 38 | exposure_linking=exposure_grid_link, 39 | ) 40 | 41 | # Assert the output 42 | assert isinstance(ds, xr.Dataset) 43 | assert ds.industrial_content.attrs.get(FN_CURVE) == "in2" 44 | 45 | 46 | def test_exposure_grid_setup_link_no( 47 | exposure_grid_data_ind: xr.DataArray, 48 | vulnerability_linking: pd.DataFrame, 49 | ): 50 | # Call the function, bit stupid is this table just returns the same 51 | ds = exposure_grid_setup( 52 | grid_like=None, 53 | exposure_data={"industrial_content": exposure_grid_data_ind}, 54 | vulnerability=vulnerability_linking, 55 | exposure_linking=pd.DataFrame(data={EXPOSURE_LINK: [], OBJECT_TYPE: []}), 56 | ) 57 | 58 | # Assert the output 59 | assert isinstance(ds, xr.Dataset) 60 | assert ds.industrial_content.attrs.get(FN_CURVE) == "in2" 61 | 62 | 63 | def test_exposure_grid_setup_alt( 64 | caplog: pytest.LogCaptureFixture, 65 | exposure_grid_data_ind: xr.DataArray, 66 | vulnerability_linking_alt: pd.DataFrame, 67 | ): 68 | caplog.set_level(logging.WARNING) 69 | # Call the function, shouldn't be able to link to the vulnerability 70 | ds = exposure_grid_setup( 71 | grid_like=None, 72 | exposure_data={"industrial_content": exposure_grid_data_ind}, 73 | vulnerability=vulnerability_linking_alt, 74 | ) 75 | 76 | # Assert the output 77 | assert isinstance(ds, xr.Dataset) 78 | assert len(ds.data_vars) == 0 79 | # Assert the logging 80 | assert ( 81 | "Couldn't link 'industrial_content' to vulnerability, skipping..." 82 | in caplog.text 83 | ) 84 | 85 | 86 | def test_exposure_grid_setup_alt_link( 87 | exposure_grid_data_ind: xr.DataArray, 88 | vulnerability_linking_alt: pd.DataFrame, 89 | ): 90 | # Call the function, shouldn't be able to link to the vulnerability 91 | ds = exposure_grid_setup( 92 | grid_like=None, 93 | exposure_data={"industrial_content": exposure_grid_data_ind}, 94 | vulnerability=vulnerability_linking_alt, 95 | exposure_linking=pd.DataFrame( 96 | data={ 97 | EXPOSURE_LINK: ["industrial_content"], 98 | OBJECT_TYPE: ["industrial"], 99 | } 100 | ), 101 | ) 102 | 103 | # Assert the output 104 | assert isinstance(ds, xr.Dataset) 105 | assert len(ds.data_vars) == 1 106 | 107 | 108 | def test_exposure_grid_setup_errors( 109 | exposure_grid_data_ind: xr.DataArray, 110 | vulnerability_linking: pd.DataFrame, 111 | ): 112 | # Assert an error on the missing necessary columns 113 | with pytest.raises( 114 | ValueError, 115 | match="Missing column, 'exposure_link' in exposure grid linking table", 116 | ): 117 | # Call the function 118 | _ = exposure_grid_setup( 119 | grid_like=None, 120 | exposure_data={"industrial_content": exposure_grid_data_ind}, 121 | vulnerability=vulnerability_linking, 122 | exposure_linking=pd.DataFrame(), 123 | ) 124 | -------------------------------------------------------------------------------- /tests/workflows/conftest.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import geopandas as gpd 4 | import numpy as np 5 | import pandas as pd 6 | import pytest 7 | import xarray as xr 8 | from hydromt import DataCatalog 9 | from hydromt.gis import full 10 | 11 | 12 | ## Data from the data catalog 13 | @pytest.fixture 14 | def buildings_data( 15 | build_data_catalog: DataCatalog, build_region_small: gpd.GeoDataFrame 16 | ) -> gpd.GeoDataFrame: 17 | gdf = build_data_catalog.get_geodataframe("buildings", geom=build_region_small) 18 | return gdf 19 | 20 | 21 | @pytest.fixture(scope="session") 22 | def buildings_link_table(build_data_catalog: DataCatalog) -> pd.DataFrame: 23 | df = build_data_catalog.get_dataframe("buildings_link") 24 | return df 25 | 26 | 27 | @pytest.fixture(scope="session") 28 | def exposure_cost_table(global_data_catalog: DataCatalog) -> pd.DataFrame: 29 | df = global_data_catalog.get_dataframe("jrc_damage") 30 | return df 31 | 32 | 33 | @pytest.fixture 34 | def exposure_grid_data_ind( 35 | build_data_catalog: DataCatalog, build_region_small: gpd.GeoDataFrame 36 | ) -> xr.DataArray: 37 | ds = build_data_catalog.get_rasterdataset( 38 | "industrial_content", 39 | geom=build_region_small, 40 | ) 41 | return ds 42 | 43 | 44 | @pytest.fixture 45 | def exposure_grid_link(build_data_catalog: DataCatalog) -> pd.DataFrame: 46 | df = build_data_catalog.get_dataframe("exposure_grid_link") 47 | return df 48 | 49 | 50 | @pytest.fixture 51 | def hazard_event_data( 52 | build_data_catalog: DataCatalog, build_region_small: gpd.GeoDataFrame 53 | ) -> xr.DataArray: 54 | ds = build_data_catalog.get_rasterdataset("flood_event", geom=build_region_small) 55 | return ds 56 | 57 | 58 | @pytest.fixture 59 | def hazard_event_data_highres( 60 | build_data_catalog: DataCatalog, 61 | build_region_small: gpd.GeoDataFrame, 62 | ) -> xr.DataArray: 63 | ds = build_data_catalog.get_rasterdataset( 64 | "flood_event_highres", 65 | geom=build_region_small, 66 | ) 67 | return ds 68 | 69 | 70 | @pytest.fixture 71 | def vulnerability_data(global_data_catalog: DataCatalog) -> pd.DataFrame: 72 | df = global_data_catalog.get_dataframe("jrc_curves") 73 | assert len(df) != 0 74 | return df 75 | 76 | 77 | @pytest.fixture 78 | def vulnerability_data_row_oriented(vulnerability_data) -> pd.DataFrame: 79 | vulnerability_data = vulnerability_data.transpose() 80 | vulnerability_data.columns = vulnerability_data.iloc[0] 81 | vulnerability_data.drop(0, inplace=True) 82 | return vulnerability_data 83 | 84 | 85 | @pytest.fixture 86 | def vulnerability_linking(global_data_catalog: DataCatalog) -> pd.DataFrame: 87 | df = global_data_catalog.get_dataframe("jrc_curves_link") 88 | assert len(df) != 0 89 | return df 90 | 91 | 92 | @pytest.fixture 93 | def vulnerability_linking_alt(global_data_catalog: DataCatalog) -> pd.DataFrame: 94 | df = global_data_catalog.get_dataframe("jrc_curves_link_alt") 95 | assert len(df) != 0 96 | return df 97 | 98 | 99 | ## Data from a prebuild model 100 | @pytest.fixture 101 | def exposure_geom_data_alt(model_data_clipped_path: Path) -> gpd.GeoDataFrame: 102 | p = Path(model_data_clipped_path, "exposure", "buildings_alt.fgb") 103 | assert p.is_file() 104 | gdf = gpd.read_file(p) 105 | assert len(gdf) != 0 106 | return gdf 107 | 108 | 109 | @pytest.fixture 110 | def exposure_geom_data_link( 111 | exposure_vector_clipped_for_damamge: gpd.geodataframe, 112 | ) -> gpd.GeoDataFrame: 113 | exposure_vector_clipped_for_damamge.drop( 114 | [ 115 | "object_id", 116 | "fn_damage_structure", 117 | "fn_damage_content", 118 | ], 119 | axis=1, 120 | inplace=True, 121 | ) 122 | return exposure_vector_clipped_for_damamge 123 | 124 | 125 | @pytest.fixture(scope="session") 126 | def vulnerability_curves_alt(model_data_path: Path) -> pd.DataFrame: 127 | p = Path(model_data_path, "vulnerability", "curves_alt.csv") 128 | assert p.is_file() 129 | df = pd.read_csv(p) 130 | assert len(df) != 0 131 | return df 132 | 133 | 134 | @pytest.fixture(scope="session") 135 | def vulnerability_identifiers_alt(model_data_path: Path) -> pd.DataFrame: 136 | p = Path(model_data_path, "vulnerability", "curves_alt_id.csv") 137 | assert p.is_file() 138 | df = pd.read_csv(p) 139 | assert len(df) != 0 140 | return df 141 | 142 | 143 | ## Extra data structure 144 | @pytest.fixture(scope="session") 145 | def rotated_grid() -> xr.DataArray: 146 | # Create coordinates 147 | yc = xr.DataArray(data=np.array([[2.0, 1.5], [1.0, 0.5]]), dims=["y", "x"]) 148 | xc = xr.DataArray(data=np.array([[0.5, 1.5], [0.0, 1.0]]), dims=["y", "x"]) 149 | # Build using 'full' from core 150 | da = full(coords={"yc": yc, "xc": xc}, nodata=-1, crs=4326) 151 | return da 152 | -------------------------------------------------------------------------------- /src/hydromt_fiat/components/grid.py: -------------------------------------------------------------------------------- 1 | """Custom grid component for HydroMT-FIAT.""" 2 | 3 | import logging 4 | from abc import abstractmethod 5 | 6 | import geopandas as gpd 7 | import xarray as xr 8 | from hydromt.model.components import GridComponent 9 | from hydromt.model.steps import hydromt_step 10 | from pyproj.crs import CRS 11 | 12 | from hydromt_fiat.gis.raster_utils import force_ns 13 | 14 | __all__ = ["CustomGridComponent"] 15 | 16 | logger = logging.getLogger(f"hydromt.{__name__}") 17 | 18 | 19 | class CustomGridComponent(GridComponent): 20 | """Base class for FIAT grid based components.""" 21 | 22 | ## Mutating methods 23 | @hydromt_step 24 | def clear(self) -> None: 25 | """Clear the gridded data.""" 26 | self._data = None 27 | self._initialize_grid(skip_read=True) 28 | 29 | @hydromt_step 30 | def clip( 31 | self, 32 | geom: gpd.GeoDataFrame, 33 | buffer: int = 1, 34 | inplace: bool = False, 35 | ) -> xr.Dataset | None: 36 | """Clip the gridded data. 37 | 38 | Parameters 39 | ---------- 40 | geom : gpd.GeoDataFrame 41 | The area to clip the data to. 42 | buffer : int, optional 43 | A buffer of cells around the clipped area to keep, by default 1. 44 | inplace : bool, optional 45 | Whether to do the clipping in place or return a new xr.Dataset, 46 | by default False. 47 | 48 | Returns 49 | ------- 50 | xr.Dataset | None 51 | Return a dataset if the inplace is False. 52 | """ 53 | try: 54 | self.data.raster.set_spatial_dims() 55 | except ValueError: 56 | return None 57 | 58 | # If so, clip the data 59 | data = self.data.raster.clip_geom(geom, buffer=buffer) 60 | # If inplace, just set the data and return nothing 61 | if inplace: 62 | self._data = data 63 | return None 64 | return data 65 | 66 | @hydromt_step 67 | def reproject( 68 | self, 69 | crs: CRS | int | str, 70 | inplace: bool = False, 71 | ) -> xr.Dataset | None: 72 | """Reproject the gridded data. 73 | 74 | Parameters 75 | ---------- 76 | crs : CRS | int | str 77 | The coordinate system to reproject to. 78 | inplace : bool, optional 79 | Whether to do the reprojection in place or return a new xr.Dataset, 80 | by default False. 81 | 82 | Returns 83 | ------- 84 | xr.Dataset | None 85 | Return a dataset if the inplace is False. 86 | """ 87 | # Check for the crs's 88 | if self.crs is None: 89 | return None 90 | if not isinstance(crs, CRS): 91 | crs = CRS.from_user_input(crs) 92 | 93 | # No need for reprojecting if this is the case 94 | if crs == self.crs: 95 | return None 96 | 97 | # Reproject the data 98 | data = self.data.raster.reproject(dst_crs=crs) 99 | # If inplace, just set the data and return nothing 100 | if inplace: 101 | self._data = data 102 | return None 103 | return data 104 | 105 | def set( 106 | self, 107 | data: xr.Dataset | xr.DataArray, 108 | name: str | None = None, 109 | ) -> None: 110 | """Set gridded data in the component. 111 | 112 | Parameters 113 | ---------- 114 | data : xr.Dataset | xr.DataArray 115 | The data to set. 116 | name : str | None, optional 117 | The name of the data when data is of type DataArray and the DataArray 118 | has not name yet, by default None. 119 | """ 120 | # Make sure the grid exists 121 | self._initialize_grid() 122 | assert self._data is not None 123 | 124 | # First check the input and typing 125 | if isinstance(data, xr.DataArray): 126 | if data.name is None and name is None: 127 | raise ValueError("DataArray can't be set without a name") 128 | data.name = name 129 | data = data.to_dataset() 130 | if not isinstance(data, xr.Dataset): 131 | raise TypeError(f"Wrong input data type: '{data.__class__.__name__}'") 132 | 133 | # Force ns orientation 134 | data = force_ns(data) 135 | # Set thet data 136 | if len(self._data) == 0: # empty grid 137 | self._data = data 138 | else: 139 | for dvar in data.data_vars: 140 | if dvar in self._data: 141 | logger.warning(f"Replacing grid map: '{dvar}'") 142 | self._data[dvar] = data[dvar] 143 | 144 | ## Setup methods 145 | @abstractmethod 146 | def setup(self, *args, **kwargs) -> None: 147 | """Set up method.""" 148 | ... 149 | -------------------------------------------------------------------------------- /tests/integration/test_build.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import pytest 4 | 5 | from hydromt_fiat import FIATModel 6 | from hydromt_fiat.utils import ( 7 | CURVES, 8 | DAMAGE, 9 | EXPOSURE, 10 | FN_CURVE, 11 | GEOM, 12 | GRID, 13 | HAZARD, 14 | MAX, 15 | MODEL_TYPE, 16 | REGION, 17 | SETTINGS, 18 | VULNERABILITY, 19 | ) 20 | 21 | 22 | @pytest.mark.integration 23 | def test_build_model_geom( 24 | tmp_path: Path, 25 | build_data_catalog_path: Path, 26 | global_data_catalog_path: Path, 27 | build_region_small: Path, 28 | ): 29 | ## HydroMT-FIAT 30 | # Setup the model 31 | model = FIATModel( 32 | root=tmp_path, 33 | mode="w+", 34 | data_libs=[build_data_catalog_path, global_data_catalog_path], 35 | ) 36 | 37 | # Add model type and region 38 | model.setup_config(**{MODEL_TYPE: GEOM}) 39 | model.setup_region(build_region_small) 40 | 41 | # Setup the vulnerability 42 | model.vulnerability.setup( 43 | "jrc_curves", 44 | "jrc_curves_link", 45 | unit="m", 46 | continent="europe", 47 | ) 48 | 49 | # Add an hazard layer 50 | model.hazard.setup( 51 | "flood_event", 52 | ) 53 | 54 | # Setup the exposure geometry data 55 | model.exposure_geoms.setup( 56 | exposure_fname="buildings", 57 | exposure_type_column="gebruiksdoel", 58 | exposure_link_fname="buildings_link", 59 | exposure_type_fill="unknown", 60 | ) 61 | model.exposure_geoms.setup_max_damage( 62 | exposure_name="buildings", 63 | exposure_type=DAMAGE, 64 | exposure_cost_table_fname="jrc_damage", 65 | country="Netherlands", # Select the correct row from the data 66 | ) 67 | # Needed for flood calculations 68 | model.exposure_geoms.update_column( 69 | exposure_name="buildings", 70 | columns=["ref", "method"], 71 | values=[0, "centroid"], 72 | ) 73 | 74 | # Assert the state 75 | assert model.region is not None # Can't build otherwise but still 76 | assert model.config.get(MODEL_TYPE) == GEOM 77 | assert len(model.vulnerability.data.curves) == 1001 78 | assert "rs1" in model.vulnerability.data.curves.columns 79 | assert "flood_event" in model.hazard.data.data_vars 80 | assert model.hazard.data["flood_event"].shape == (5, 4) 81 | assert "buildings" in model.exposure_geoms.data # Kind of obvious 82 | assert len(model.exposure_geoms.data["buildings"]) == 12 83 | assert f"{MAX}_{DAMAGE}_structure" in model.exposure_geoms.data["buildings"].columns 84 | 85 | # Write the model 86 | model.write() 87 | 88 | # Assert the written output (paths) 89 | assert Path(tmp_path, f"{REGION}.geojson").is_file() 90 | assert Path(tmp_path, f"{SETTINGS}.toml").is_file() 91 | assert Path(tmp_path, VULNERABILITY, f"{CURVES}.csv").is_file() 92 | assert Path(tmp_path, f"{HAZARD}.nc").is_file() 93 | assert Path(tmp_path, EXPOSURE, "buildings.fgb").is_file() 94 | 95 | 96 | @pytest.mark.integration 97 | def test_build_model_grid( 98 | tmp_path: Path, 99 | build_data_catalog_path: Path, 100 | global_data_catalog_path: Path, 101 | build_region_small: Path, 102 | ): 103 | ## HydroMT-FIAT 104 | # Setup the model 105 | model = FIATModel( 106 | root=tmp_path, 107 | mode="w+", 108 | data_libs=[build_data_catalog_path, global_data_catalog_path], 109 | ) 110 | 111 | # Add model type and region 112 | model.setup_config(**{MODEL_TYPE: GRID}) 113 | model.setup_region(build_region_small) 114 | 115 | # Setup the vulnerability 116 | model.vulnerability.setup( 117 | "jrc_curves", 118 | "jrc_curves_link", 119 | unit="m", 120 | continent="europe", 121 | ) 122 | 123 | # Add an hazard layer 124 | model.hazard.setup( 125 | "flood_event", 126 | ) 127 | 128 | # Setup the exposure grid data 129 | model.exposure_grid.setup( 130 | exposure_fnames=["commercial_structure", "commercial_content"], 131 | exposure_link_fname="exposure_grid_link", 132 | ) 133 | 134 | # Assert the state 135 | assert model.region is not None # Can't build otherwise but still 136 | assert model.config.get(MODEL_TYPE) == GRID 137 | assert len(model.vulnerability.data.curves) == 1001 138 | assert "rs1" in model.vulnerability.data.curves.columns 139 | assert "flood_event" in model.hazard.data.data_vars 140 | assert model.hazard.data["flood_event"].shape == (5, 4) 141 | assert len(model.exposure_grid.data.data_vars) == 2 142 | assert "commercial_content" in model.exposure_grid.data.data_vars 143 | assert model.exposure_grid.data["commercial_content"].attrs[FN_CURVE] == "cm1" 144 | 145 | # Write the model 146 | model.write() 147 | 148 | # Assert the written output (paths) 149 | assert Path(tmp_path, f"{REGION}.geojson").is_file() 150 | assert Path(tmp_path, f"{SETTINGS}.toml").is_file() 151 | assert Path(tmp_path, VULNERABILITY, f"{CURVES}.csv").is_file() 152 | assert Path(tmp_path, f"{HAZARD}.nc").is_file() 153 | assert Path(tmp_path, EXPOSURE, "spatial.nc").is_file() 154 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # For the full list of built-in configuration values, see the documentation: 4 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 5 | 6 | # -- Python imports ---------------------------------------------------------- 7 | import os 8 | import shutil 9 | import sys 10 | from distutils.dir_util import copy_tree 11 | 12 | import hydromt_fiat 13 | 14 | # -- Python functions -------------------------------------------------------- 15 | pwd = os.path.dirname(__file__) 16 | sys.path.insert(0, os.path.abspath(os.path.join(pwd, ".."))) 17 | 18 | 19 | def remove_dir_content(path: str) -> None: 20 | """Remove directory content.""" 21 | for root, dirs, files in os.walk(path): 22 | for f in files: 23 | os.unlink(os.path.join(root, f)) 24 | for d in dirs: 25 | shutil.rmtree(os.path.join(root, d)) 26 | if os.path.isdir(path): 27 | shutil.rmtree(path) 28 | 29 | 30 | # Copy necessary example notebooks 31 | if os.path.isdir("_examples"): 32 | remove_dir_content("_examples") 33 | os.makedirs("_examples") 34 | copy_tree("../examples", "_examples") 35 | 36 | # -- Project information ----------------------------------------------------- 37 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information 38 | 39 | project = "hydromt_fiat" 40 | copyright = "2024, Deltares" 41 | author = "Deltares" 42 | version = hydromt_fiat.__version__ 43 | 44 | # -- General configuration --------------------------------------------------- 45 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration 46 | 47 | extensions = [ 48 | "sphinx_design", 49 | "sphinx.ext.autodoc", 50 | "sphinx.ext.viewcode", 51 | "sphinx.ext.todo", 52 | "sphinx.ext.napoleon", 53 | "sphinx.ext.autosummary", 54 | "sphinx.ext.githubpages", 55 | "sphinx.ext.intersphinx", 56 | "sphinx_autosummary_accessors", 57 | "IPython.sphinxext.ipython_directive", 58 | "IPython.sphinxext.ipython_console_highlighting", 59 | "nbsphinx", 60 | ] 61 | 62 | autosummary_generate = True 63 | source_suffix = ".rst" 64 | templates_path = ["_templates"] 65 | 66 | exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] 67 | language = "en" 68 | master_doc = "index" 69 | pygments_style = "sphinx" 70 | todo_include_todos = False 71 | 72 | # -- Options for HTML output ------------------------------------------------- 73 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output 74 | 75 | autodoc_member_order = "bysource" 76 | autoclass_content = "both" 77 | 78 | bare_version = hydromt_fiat.__version__ 79 | doc_version = bare_version[: bare_version.find("dev") - 1] 80 | 81 | html_context = { 82 | "github_url": "https://github.com", 83 | "github_user": "Deltares", 84 | "github_repo": "hydromt_fiat", 85 | "github_version": "main", # FIXME 86 | "doc_path": "docs", 87 | "default_mode": "light", 88 | } 89 | 90 | html_css_files = ["theme-deltares.css"] 91 | html_favicon = "_static/hydromt-icon.svg" 92 | html_logo = "_static/hydromt-icon.svg" 93 | html_static_path = ["_static"] 94 | html_theme = "pydata_sphinx_theme" 95 | 96 | html_theme_options = { 97 | "show_nav_level": 2, 98 | "navbar_align": "content", 99 | "use_edit_page_button": True, 100 | "icon_links": [ 101 | { 102 | "name": "GitHub", 103 | "url": "https://github.com/Deltares/hydromt_fiat", # required 104 | "icon": "https://upload.wikimedia.org/wikipedia/commons/9/91/Octicons-mark-github.svg", 105 | "type": "url", 106 | }, 107 | { 108 | "name": "Delft-FIAT", 109 | "url": "https://deltares.github.io/Delft-FIAT/latest/", 110 | "icon": "_static/fiat.svg", 111 | "type": "local", 112 | }, 113 | ], 114 | "external_links": [ 115 | { 116 | "name": "HydroMT core", 117 | "url": "https://deltares.github.io/hydromt/latest/index.html", 118 | }, 119 | ], 120 | "logo": { 121 | "text": "HydroMT FIAT", 122 | }, 123 | "navbar_end": ["navbar-icon-links", "version-switcher"], # remove dark mode switch 124 | "switcher": { 125 | "json_url": "https://raw.githubusercontent.com/Deltares/hydromt_fiat/gh-pages/switcher.json", 126 | "version_match": doc_version, 127 | }, 128 | } 129 | 130 | remove_from_toctrees = ["_generated/*"] 131 | 132 | # -- Options for manual page output --------------------------------------- 133 | 134 | man_pages = [ 135 | (master_doc, "hydromt_fiat", "HydroMT FIAT plugin Documentation", [author], 1) 136 | ] 137 | 138 | 139 | # -- INTERSPHINX ----------------------------------------------------------- 140 | 141 | intersphinx_mapping = { 142 | "python": ("https://docs.python.org/3/", None), 143 | "pandas": ("https://pandas.pydata.org/pandas-docs/stable", None), 144 | # "numpy": ("https://numpy.org/doc/stable", None), 145 | "scipy": ("https://docs.scipy.org/doc/scipy", None), 146 | # "numba": ("https://numba.pydata.org/numba-doc/latest", None), 147 | # "matplotlib": ("https://matplotlib.org/stable/", None), 148 | # "dask": ("https://docs.dask.org/en/latest", None), 149 | "rasterio": ("https://rasterio.readthedocs.io/en/latest", None), 150 | "geopandas": ("https://geopandas.org/en/stable", None), 151 | "xarray": ("https://xarray.pydata.org/en/stable", None), 152 | "hydromt": ("https://deltares.github.io/hydromt/latest/", None), 153 | } 154 | -------------------------------------------------------------------------------- /tests/components/test_component_utils.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from hydromt_fiat.components.utils import ( 4 | _mount, 5 | _relpath, 6 | get_item, 7 | make_config_paths_relative, 8 | pathing_config, 9 | pathing_expand, 10 | ) 11 | 12 | 13 | def test__mount(): 14 | # Call the function on unix path 15 | m = _mount("/d/tmp/foo") 16 | # Assert the mount 17 | assert m == "/d/" 18 | 19 | # Call the function on windows path 20 | m = _mount("d:/tmp/foo") 21 | # Assert the mount 22 | assert m == "d:/" 23 | 24 | # Call the function on a relative path 25 | m = _mount("tmp/foo") 26 | # Assert that it's None 27 | assert m is None 28 | 29 | 30 | def test__relpath_abs(tmp_path: Path): 31 | # Call the function 32 | p = _relpath(Path(tmp_path, "tmp/tmp.txt"), tmp_path) 33 | 34 | # Assert the output 35 | assert isinstance(p, str) 36 | assert p == "tmp/tmp.txt" 37 | 38 | # Path one above the current, also pass as a string 39 | in_p = Path(tmp_path.parent, "tmp.txt") 40 | p = _relpath(in_p, tmp_path) 41 | 42 | # Assert the output 43 | assert p == "../tmp.txt" 44 | 45 | 46 | def test__relpath_rel(tmp_path: Path): 47 | # Call the function on a path that is already relative 48 | p = _relpath("tmp/tmp.txt", tmp_path) 49 | 50 | # Assert the output is just the same 51 | assert p == "tmp/tmp.txt" 52 | 53 | 54 | def test__relpath_mount(tmp_path: Path, mount_string: str): 55 | # Call the function on a path that is located on another mount 56 | p = _relpath(Path(mount_string, "tmp", "tmp.txt"), tmp_path) 57 | 58 | # Assert the output is just the same 59 | assert p == f"{mount_string}tmp/tmp.txt" 60 | 61 | 62 | def test__relpath_other(tmp_path: Path): 63 | # Call the function on value that could not be paths 64 | p = _relpath([2, 2], tmp_path) # E.g. a list 65 | 66 | # Assert that the list is returned 67 | assert p == [2, 2] 68 | 69 | 70 | def test_make_config_paths_relative( 71 | tmp_path: Path, 72 | config_dummy: dict, 73 | ): 74 | # Assert that a full path is present 75 | p = config_dummy["baz"]["file1"] 76 | assert Path(p).is_absolute() 77 | assert config_dummy["spooky"]["ghost"] == [1, 2, 3] 78 | assert config_dummy["baz"]["file2"] == "tmp/tmp.txt" 79 | 80 | # Call the function 81 | cfg = make_config_paths_relative(config_dummy, tmp_path) 82 | 83 | # Assert the outcome 84 | # Assert that a full path is present 85 | p = cfg["baz"]["file1"] 86 | assert not Path(p).is_absolute() # Not anymore 87 | assert cfg["spooky"]["ghost"] == [1, 2, 3] 88 | assert cfg["baz"]["file2"] == "tmp/tmp.txt" 89 | 90 | 91 | def test_get_item( 92 | config_dummy: dict, 93 | ): 94 | # Call the function 95 | res = get_item(["foo"], config_dummy, "") 96 | # Assert the output 97 | assert res == "bar" 98 | 99 | # With multiple parts 100 | res = get_item(["spooky", "ghost"], config_dummy, "") 101 | # Assert the output 102 | assert res == [1, 2, 3] 103 | 104 | # Get an entry that doesnt exists, return fallback 105 | res = get_item(["No"], config_dummy, "", fallback=2) 106 | # Assert the entry 107 | assert res == 2 108 | 109 | 110 | def test_get_item_path( 111 | tmp_path: Path, 112 | config_dummy: dict, 113 | ): 114 | # Call the function 115 | res = get_item(["baz", "file2"], config_dummy, root=tmp_path, abs_path=True) 116 | # Assert the output 117 | assert res == Path(tmp_path, "tmp/tmp.txt") 118 | 119 | 120 | def test_get_item_multi( 121 | config_dummy: dict, 122 | ): 123 | # Call the function 124 | res = get_item(["multi", "file"], config_dummy, "") 125 | # Assert the output 126 | assert isinstance(res, list) 127 | assert len(res) == 2 128 | assert res[0] == "tmp/tmp.txt" 129 | 130 | 131 | def test_get_item_none( 132 | config_dummy: dict, 133 | ): 134 | # Call the function 135 | res = get_item([], config_dummy, "") 136 | # Assert the output 137 | assert res is None 138 | 139 | 140 | def test_pathing_expand( 141 | model_data_clipped_path: Path, 142 | ): 143 | # Call the function 144 | paths, names = pathing_expand( 145 | root=model_data_clipped_path, filename="exposure/{name}.fgb" 146 | ) 147 | # Assert the output 148 | assert len(paths) == 2 149 | assert len(names) == 2 150 | assert paths[0].suffix == ".fgb" 151 | 152 | # To a directory with no data 153 | # Call the function 154 | paths, names = pathing_expand( 155 | root=model_data_clipped_path, filename="foo/{name}.fgb" 156 | ) 157 | # Assert the output 158 | assert len(paths) == 0 159 | assert len(names) == 0 160 | 161 | 162 | def test_pathing_expand_none( 163 | model_data_clipped_path: Path, 164 | ): 165 | # Call the function 166 | out = pathing_expand(root=model_data_clipped_path, filename=None) 167 | # Assert the output 168 | assert out is None 169 | 170 | 171 | def test_pathing_config(): 172 | # Call the function 173 | paths, names = pathing_config(["tmp/tmp.txt", None, "foo.txt"]) 174 | # Assert the output 175 | assert all([isinstance(item, Path) for item in paths]) 176 | assert names == ["tmp", "foo"] 177 | 178 | 179 | def test_pathing_config_none(): 180 | # Call the function 181 | out = pathing_config(None) 182 | # Assert the output 183 | assert out is None 184 | 185 | # Call the function 186 | out = pathing_config([None, None]) 187 | # Assert the output 188 | assert out is None 189 | -------------------------------------------------------------------------------- /docs/_static/fiat.svg: -------------------------------------------------------------------------------- 1 | 2 | 15 | 17 | 36 | 40 | 47 | 54 | 61 | 68 | 75 | 82 | 89 | 96 | 103 | 110 | 115 | 125 | 133 | 141 | 147 | 155 | 156 | -------------------------------------------------------------------------------- /src/hydromt_fiat/workflows/damage.py: -------------------------------------------------------------------------------- 1 | """Calculate max potential damage based on exposure type.""" 2 | 3 | import logging 4 | from itertools import product 5 | from typing import Any 6 | 7 | import geopandas as gpd 8 | import pandas as pd 9 | from hydromt.gis import utm_crs 10 | 11 | from hydromt_fiat.utils import ( 12 | COST_TYPE, 13 | EXPOSURE_LINK, 14 | EXPOSURE_TYPE, 15 | MAX, 16 | OBJECT_TYPE, 17 | SUBTYPE, 18 | create_query, 19 | ) 20 | 21 | __all__ = ["max_monetary_damage"] 22 | 23 | logger = logging.getLogger(f"hydromt.{__name__}") 24 | 25 | 26 | def max_monetary_damage( 27 | exposure_data: gpd.GeoDataFrame, 28 | exposure_cost_table: pd.DataFrame, 29 | exposure_type: str, 30 | vulnerability: pd.DataFrame, 31 | exposure_cost_link: pd.DataFrame | None = None, 32 | **select: dict[str, Any], 33 | ) -> gpd.GeoDataFrame: 34 | """Determine maximum monetary damage per object. 35 | 36 | The maximum potential monetary damage is calculated based on the area (footprint) 37 | of the objects. The exposure cost table should therefore contain values per square 38 | meter. 39 | 40 | Parameters 41 | ---------- 42 | exposure_data : gpd.GeoDataFrame 43 | The existing exposure data. 44 | exposure_cost_table : pd.DataFrame 45 | The cost table. 46 | exposure_type : str 47 | Type of exposure data, e.g. 'damage'. 48 | vulnerability : pd.DataFrame 49 | The vulnerability identifier table. 50 | exposure_cost_link : pd.DataFrame, optional 51 | A linking table to connect the exposure data to the exposure cost data. 52 | By default None. 53 | **select : dict, optional 54 | Keyword arguments to select data from the cost table. 55 | The key corresponds to the column and the value to value in that column. 56 | 57 | Returns 58 | ------- 59 | gpd.GeoDataFrame 60 | The resulting exposure data with the maximum damage included. 61 | """ 62 | if exposure_cost_table is None: 63 | raise ValueError("Exposure costs table cannot be None") 64 | 65 | # Create a query from the kwargs 66 | if len(select) != 0: 67 | query = create_query(**select) 68 | exposure_cost_table = exposure_cost_table.query(query) 69 | 70 | if len(exposure_cost_table) == 0: 71 | raise ValueError(f"Select kwargs ({select}) resulted in no remaining data") 72 | 73 | # If not cost link table is defined, define it self 74 | if exposure_cost_link is None: 75 | exposure_cost_link = pd.DataFrame( 76 | data={ 77 | OBJECT_TYPE: vulnerability[EXPOSURE_LINK].values, 78 | COST_TYPE: vulnerability[EXPOSURE_LINK].values, 79 | } 80 | ) 81 | 82 | # Check for the necessary columns 83 | if not all(item in exposure_cost_link.columns for item in [OBJECT_TYPE, COST_TYPE]): 84 | raise ValueError(f"Cost link table either missing {OBJECT_TYPE} or {COST_TYPE}") 85 | # Leave only the necessary columns 86 | exposure_cost_link = exposure_cost_link[[OBJECT_TYPE, COST_TYPE]] 87 | exposure_cost_link = exposure_cost_link.drop_duplicates(subset=OBJECT_TYPE) 88 | 89 | # Get the unique headers corresponding to the 'exposure_type' 90 | if SUBTYPE not in vulnerability.columns: 91 | headers = [""] 92 | else: 93 | headers = vulnerability[vulnerability[EXPOSURE_TYPE] == exposure_type] 94 | headers = ["_" + str(item) for item in headers[SUBTYPE].unique()] 95 | 96 | # If not headers were found, log and return 97 | if len(headers) == 0: 98 | raise ValueError( 99 | f"Exposure type ({exposure_type}) not found in vulnerability data" 100 | ) 101 | 102 | # Get unique linking names 103 | unique_link = exposure_cost_link[COST_TYPE].unique().tolist() 104 | unique_link = [f"{x}{y}" for x, y in product(unique_link, headers)] 105 | # Transpose the cost table, rename index to object_type to easily merge 106 | # This is not the object type, but the specific max costs of that element 107 | exposure_cost_table = exposure_cost_table.T.reset_index(names=COST_TYPE) 108 | # Index the cost table 109 | exposure_cost_table = exposure_cost_table[ 110 | exposure_cost_table[COST_TYPE].isin(unique_link) 111 | ] 112 | 113 | # Link the cost type to the exposure data 114 | data_or_size = len(exposure_data) # For size check later 115 | exposure_data[COST_TYPE] = exposure_data[[OBJECT_TYPE]].merge( 116 | exposure_cost_link, 117 | on=OBJECT_TYPE, 118 | how="inner", 119 | )[COST_TYPE] 120 | 121 | # Drop the data that cannnot be linked 122 | exposure_data.dropna(subset=COST_TYPE, inplace=True) 123 | 124 | # Get the area, make sure its a projected crs 125 | old_crs = exposure_data.crs 126 | if old_crs.is_geographic: 127 | crs = utm_crs(exposure_data.total_bounds) 128 | exposure_data.to_crs(crs, inplace=True) 129 | area = exposure_data.area 130 | 131 | # Loop through the headers to set the max damage per subtype (or not) 132 | for header in headers: 133 | data = exposure_data[COST_TYPE] + header 134 | # Get the costs per object 135 | costs_per = data.to_frame().merge(exposure_cost_table, on=COST_TYPE) 136 | costs_per.drop(COST_TYPE, axis=1, inplace=True) 137 | costs_per = costs_per.squeeze() 138 | # Multiply by the area 139 | costs_per *= area 140 | 141 | exposure_data[f"{MAX}_{exposure_type}{header}"] = costs_per.astype(float) 142 | 143 | # Check data length afterwards 144 | data_m_size = len(exposure_data) 145 | if data_or_size != data_m_size: 146 | logger.warning( 147 | f"{data_or_size - data_m_size} features could not be linked to the \ 148 | damage values, these were removed" 149 | ) 150 | 151 | return exposure_data 152 | -------------------------------------------------------------------------------- /src/hydromt_fiat/drivers/osm_driver.py: -------------------------------------------------------------------------------- 1 | """Driver to read OSM data with the OSMnx API.""" 2 | 3 | import logging 4 | from pathlib import Path 5 | from typing import Any, ClassVar, Set 6 | 7 | import geopandas as gpd 8 | import osmnx as ox 9 | from hydromt.data_catalog.drivers import GeoDataFrameDriver 10 | from hydromt.typing import StrPath 11 | from osmnx._errors import InsufficientResponseError 12 | from pyproj.crs import CRS 13 | from shapely.geometry import MultiPolygon, Polygon 14 | 15 | CACHE_DIR = Path.home() / ".cache" / "hydromt_fiat" / "osmnx" 16 | CACHE_DIR.mkdir(parents=True, exist_ok=True) 17 | ox.settings.cache_folder = CACHE_DIR 18 | 19 | logger = logging.getLogger(f"hydromt.{__name__}") 20 | 21 | 22 | class OSMDriver(GeoDataFrameDriver): 23 | """Driver to read OSM data with the OSMnx API.""" 24 | 25 | name = "osm" 26 | supports_writing = True 27 | _supported_extensions: ClassVar[Set[str]] = {".gpkg", ".shp", ".geojson", ".fgb"} 28 | 29 | def read( 30 | self, 31 | uris: list[str], 32 | mask: gpd.GeoDataFrame | gpd.GeoSeries, 33 | *, 34 | tags: list[str] | None = None, 35 | geom_type: list[str] | None = None, 36 | **kwargs, 37 | ) -> gpd.GeoDataFrame: 38 | """Read OSM data with the OSMnx API. 39 | 40 | Parameters 41 | ---------- 42 | uris : list[str] 43 | List containing single OSM asset type. 44 | mask : gpd.GeoDataFrame | gpd.GeoSeries 45 | GeoDataFrame containing the region of interest. 46 | tags : list[str], optional 47 | Additional tags to filter the OSM data by, by default None. 48 | geom_type : list[str], optional 49 | List of geometry types to filter data with, 50 | i.e. ['MultiPolygon', 'Polygon'], by default None. 51 | 52 | Returns 53 | ------- 54 | gpd.GeoDataFrame 55 | The resulting data. 56 | """ 57 | if len(uris) > 1: 58 | raise ValueError("Cannot use multiple uris for reading OSM data.") 59 | 60 | if mask is None: 61 | raise ValueError("Mask is required to retrieve OSM data") 62 | 63 | if not isinstance(mask, (gpd.GeoDataFrame, gpd.GeoSeries)): 64 | raise TypeError( 65 | f"Wrong type: {type(mask)} -> should be GeoDataFrame or GeoSeries" 66 | ) 67 | uri = uris[0] 68 | if len(mask) > 1: 69 | logger.warning( 70 | "Received multiple geometries for mask, geometries will " 71 | "be dissolved into single geometry." 72 | ) 73 | mask = mask.dissolve() 74 | 75 | # Quick check on the crs. If not in WGS84, reproject 76 | crs = CRS.from_epsg(4326) 77 | if not mask.crs.equals(crs): 78 | mask = mask.to_crs(crs) # WGS84 79 | polygon = mask.geometry[0] 80 | 81 | # If tags and geom_types are none check if these are supplied as driver options 82 | options = self.options.to_dict() 83 | if options.get("geom_type") and not geom_type: 84 | geom_type = options.get("geom_type") 85 | if options.get("tags") and not tags: 86 | tags = options.get("tags") 87 | 88 | if tags: 89 | tag: dict[str, Any] = {uri: tags} 90 | else: 91 | tag = {uri: True} 92 | logger.info("Retrieving %s data from OSM API", uri) 93 | return self.get_osm_data(polygon=polygon, tag=tag, geom_type=geom_type) 94 | 95 | def write(self, path: StrPath, gdf: gpd.GeoDataFrame, **kwargs) -> StrPath: 96 | """Write OSMNx data to file. 97 | 98 | Parameters 99 | ---------- 100 | path : StrPath 101 | Path to write osm data to. 102 | gdf : gpd.GeoDataFrame 103 | GeoDataFrame containing OSM data. 104 | 105 | Returns 106 | ------- 107 | StrPath 108 | Path to the file. 109 | """ 110 | path = Path(path) 111 | ext = path.suffix 112 | if ext not in self._supported_extensions: 113 | logger.warning( 114 | f"driver {self.name} has no support for extension {ext}" 115 | "switching to .fgb." 116 | ) 117 | path = path.parent / (path.stem + ".fgb") 118 | gdf.to_file(path, **kwargs) 119 | return path 120 | 121 | @staticmethod 122 | def get_osm_data( 123 | polygon: MultiPolygon | Polygon, 124 | tag: dict[str, Any], 125 | geom_type: list[str] | None, 126 | ) -> gpd.GeoDataFrame: 127 | """Retrieve OSM data with the OSMnx api. 128 | 129 | Parameters 130 | ---------- 131 | polygon : MultiPolygon | Polygon 132 | Area of interest. 133 | tag : dict 134 | OSM tag to filter data with, i.e. {'building': True}. 135 | geom_type : list[str] | None 136 | List of geometry types to filter data with, 137 | i.e. ['MultiPolygon', 'Polygon']. 138 | 139 | Returns 140 | ------- 141 | gpd.GeoDataFrame 142 | GeoDataFrame with OSM data. 143 | 144 | """ 145 | if not isinstance(polygon, (Polygon, MultiPolygon)): 146 | raise TypeError("Given geometry is not a (multi)polygon") 147 | 148 | try: 149 | footprints = ox.features.features_from_polygon(polygon, tag) 150 | except InsufficientResponseError as err: 151 | logger.error(f"No OSM data retrieved with the following tags: {tag}") 152 | raise err 153 | 154 | tag_key = list(tag.keys())[0] 155 | 156 | if footprints.empty: 157 | logger.warning(f"No {tag_key} features found for polygon") 158 | return None 159 | 160 | logger.info(f"Total number of {tag_key} found from OSM: {len(footprints)}") 161 | 162 | if geom_type: 163 | footprints = footprints.loc[footprints.geometry.type.isin(geom_type)] 164 | 165 | # Remove multi index 166 | footprints = footprints.reset_index(drop=True) 167 | return footprints[["geometry", tag_key]] 168 | -------------------------------------------------------------------------------- /src/hydromt_fiat/workflows/vulnerability.py: -------------------------------------------------------------------------------- 1 | """Vulnerability workflows.""" 2 | 3 | import logging 4 | from typing import Any 5 | 6 | import numpy as np 7 | import numpy.typing as npt 8 | import pandas as pd 9 | from barril.units import Scalar 10 | 11 | from hydromt_fiat.utils import ( 12 | CURVE, 13 | CURVE_ID, 14 | DAMAGE, 15 | EXPOSURE_LINK, 16 | EXPOSURE_TYPE, 17 | create_query, 18 | standard_unit, 19 | ) 20 | 21 | __all__ = ["process_vulnerability_linking", "vulnerability_setup"] 22 | 23 | logger = logging.getLogger(f"hydromt.{__name__}") 24 | 25 | 26 | def process_vulnerability_linking( 27 | types: list[str] | tuple[str] | npt.NDArray[np.str_], 28 | vulnerability_linking: pd.DataFrame | None = None, 29 | ) -> pd.DataFrame: 30 | """Process the vulnerability linking table. 31 | 32 | Is created based on the vulnerability data if no initial table is provided. 33 | 34 | Parameters 35 | ---------- 36 | types : list | tuple | np.ndarray, 37 | Types of vulnerability curves. 38 | vulnerability_linking : pd.DataFrame, optional 39 | The vulnerability linking table, by default None. 40 | 41 | Returns 42 | ------- 43 | pd.DataFrame 44 | Vulnerability linking table. 45 | """ 46 | # Construct if not provided 47 | if vulnerability_linking is None: 48 | logger.warning("No linking table provided, inferred from vulnerability data") 49 | vulnerability_linking = pd.DataFrame( 50 | data={ 51 | EXPOSURE_LINK: types, 52 | CURVE: types, 53 | } 54 | ) 55 | # Drop completely duplicate rows 56 | vulnerability_linking.drop_duplicates(inplace=True) 57 | if CURVE not in vulnerability_linking: 58 | raise KeyError(f"The '{CURVE}' column in not present in the linking table") 59 | if EXPOSURE_TYPE not in vulnerability_linking: # default to damage 60 | vulnerability_linking[EXPOSURE_TYPE] = DAMAGE 61 | 62 | # Query the linking data 63 | vulnerability_linking.loc[:, CURVE_ID] = vulnerability_linking[CURVE] 64 | types = list(types) # Ensure list type for the query 65 | vulnerability_linking = vulnerability_linking.query(f"{CURVE} in {str(types)}") 66 | 67 | return vulnerability_linking 68 | 69 | 70 | def vulnerability_setup( 71 | vulnerability_data: pd.DataFrame, 72 | vulnerability_linking: pd.DataFrame | None = None, 73 | *, 74 | unit: str = "m", 75 | index_name: str = "water depth", 76 | column_oriented: bool = True, 77 | **select: dict[str, Any], 78 | ) -> tuple[pd.DataFrame, pd.DataFrame]: 79 | """Create vulnerability curves from raw data. 80 | 81 | Warning 82 | ------- 83 | If not default exposure type is present in the vulnerability linking, the 84 | default exposure type is assumed to be 'damage'. 85 | 86 | Parameters 87 | ---------- 88 | vulnerability_data : pd.DataFrame 89 | The raw vulnerability dataset. 90 | vulnerability_linking : pd.DataFrame, optional 91 | The vulnerability linking table, by default None. 92 | unit : str, optional 93 | The unit of the vulnerability dataset index, by default "m". 94 | index_name : str, optional 95 | The name of the outgoing vulnerability curves dataset index, 96 | by default "water depth". 97 | column_oriented : bool, optional 98 | Whether the vulnerability data is column oriented, i.e. the values of a curve 99 | are in the same column spanning multiple rows. If False, the values are ought 100 | to be in the same row spanning multiple columns. By default True. 101 | **select : dict, optional 102 | Keyword arguments to select data from 'vulnerability_data'. 103 | 104 | Returns 105 | ------- 106 | tuple[pd.DataFrame] 107 | A tuple containing the the vulnerability curves and updated link table. 108 | """ 109 | # Transpose the data if columns oriented 110 | if column_oriented: 111 | vulnerability_data = vulnerability_data.transpose() 112 | vulnerability_data.columns = vulnerability_data.iloc[0] 113 | vulnerability_data.drop(0, inplace=True) 114 | # Quick check on the data 115 | if CURVE not in vulnerability_data: 116 | raise KeyError("The 'curve' column in not present in the vulnerability data") 117 | # Build a query from the index kwargs 118 | if len(select) != 0: 119 | query = create_query(**select) 120 | vulnerability_data = vulnerability_data.query(query) 121 | 122 | # Sort the linking table 123 | vulnerability_linking = process_vulnerability_linking( 124 | types=vulnerability_data[CURVE].values, 125 | vulnerability_linking=vulnerability_linking, 126 | ) 127 | 128 | # Set a separate column with the curve id's for merging 129 | vulnerability_data = pd.merge( 130 | vulnerability_data, 131 | vulnerability_linking.drop_duplicates(subset=CURVE_ID), 132 | on=CURVE, 133 | how="inner", 134 | validate="many_to_many", 135 | ) 136 | 137 | # Reshape the vulnerability data 138 | columns = list(set(list(select.keys()) + vulnerability_linking.columns.to_list())) 139 | columns.remove(CURVE_ID) 140 | vulnerability_data = vulnerability_data.drop(columns, axis=1) 141 | vulnerability_data = vulnerability_data.transpose() 142 | vulnerability_data = vulnerability_data.rename( 143 | columns=vulnerability_data.loc[CURVE_ID] 144 | ) 145 | vulnerability_data = vulnerability_data.drop(CURVE_ID) 146 | vulnerability_data.index.name = index_name 147 | 148 | # Again query the linking table based on the vulnerability curves 149 | # But this time on the curve ID 150 | types = vulnerability_data.columns.tolist() 151 | vulnerability_linking = vulnerability_linking.query(f"{CURVE_ID} in {str(types)}") 152 | 153 | # At last reset the index 154 | vulnerability_data.reset_index(inplace=True) 155 | vulnerability_data = vulnerability_data.astype(float) 156 | 157 | # Scale the data according to the unit 158 | conversion = standard_unit(Scalar(1.0, unit)) 159 | vulnerability_data[index_name] *= conversion.value 160 | 161 | return vulnerability_data, vulnerability_linking 162 | -------------------------------------------------------------------------------- /tests/components/test_hazard_component.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from pathlib import Path 3 | from unittest.mock import MagicMock, PropertyMock 4 | 5 | import pytest 6 | import xarray as xr 7 | from hydromt.model import ModelRoot 8 | 9 | from hydromt_fiat import FIATModel 10 | from hydromt_fiat.components import HazardComponent 11 | from hydromt_fiat.errors import MissingRegionError 12 | from hydromt_fiat.utils import ( 13 | HAZARD, 14 | HAZARD_FILE, 15 | HAZARD_RP, 16 | HAZARD_SETTINGS, 17 | MODEL_RISK, 18 | VAR_AS_BAND, 19 | ) 20 | 21 | 22 | def test_hazard_component_empty( 23 | mock_model: MagicMock, 24 | ): 25 | # Set up the component 26 | component = HazardComponent(model=mock_model) 27 | 28 | # Assert some very basic stuff 29 | assert component._filename == f"{HAZARD}.nc" 30 | assert len(component.data) == 0 31 | assert isinstance(component.data, xr.Dataset) 32 | 33 | 34 | def test_hazard_component_read( 35 | mock_model_config: MagicMock, 36 | model_data_clipped_path: Path, 37 | ): 38 | type(mock_model_config).root = PropertyMock( 39 | side_effect=lambda: ModelRoot(model_data_clipped_path, mode="r"), 40 | ) 41 | # Setup the component 42 | component = HazardComponent(model=mock_model_config) 43 | # Assert current state 44 | assert component._data is None 45 | 46 | # Read the data by calling 'data' property 47 | # This will fall back on the config 48 | component.data 49 | 50 | # Assert the state 51 | assert component._data is not None 52 | assert len(component.data.data_vars) == 1 53 | 54 | 55 | def test_hazard_component_read_sig( 56 | mock_model_config: MagicMock, 57 | model_data_clipped_path: Path, 58 | ): 59 | type(mock_model_config).root = PropertyMock( 60 | side_effect=lambda: ModelRoot(model_data_clipped_path, mode="r"), 61 | ) 62 | # Setup the component 63 | component = HazardComponent(model=mock_model_config) 64 | 65 | # Read data with the 'read' method 66 | component.read(f"{HAZARD}.nc") 67 | 68 | # Assert the state 69 | assert len(component.data.data_vars) == 1 70 | 71 | 72 | def test_hazard_component_read_nothing( 73 | tmp_path: Path, 74 | mock_model_config: MagicMock, 75 | ): 76 | type(mock_model_config).root = PropertyMock( 77 | side_effect=lambda: ModelRoot(tmp_path, mode="r"), 78 | ) 79 | # Setup the component 80 | component = HazardComponent(model=mock_model_config) 81 | # Assert current state 82 | assert len(component.data) == 0 83 | 84 | # Read the data (nothing) 85 | component.read() 86 | # Assert still no data 87 | assert len(component.data) == 0 88 | 89 | 90 | def test_hazard_component_write( 91 | tmp_path: Path, 92 | mock_model_config: MagicMock, 93 | hazard_clipped: xr.Dataset, 94 | ): 95 | # Setup the component 96 | component = HazardComponent(model=mock_model_config) 97 | # Set data like a dummy 98 | component._data = hazard_clipped 99 | 100 | # Write the data 101 | component.write() 102 | 103 | # Assert the output 104 | assert Path(tmp_path, f"{HAZARD}.nc").is_file() 105 | 106 | # Assert the config file 107 | assert component.model.config.get(HAZARD_FILE) == Path(tmp_path, f"{HAZARD}.nc") 108 | assert not component.model.config.get(f"{HAZARD_SETTINGS}.{VAR_AS_BAND}") 109 | 110 | 111 | def test_hazard_component_write_sig( 112 | tmp_path: Path, 113 | mock_model_config: MagicMock, 114 | hazard_clipped: xr.Dataset, 115 | ): 116 | # Setup the component 117 | component = HazardComponent(model=mock_model_config) 118 | # Set data like a dummy 119 | component._data = hazard_clipped 120 | component._data["flood_event2"] = hazard_clipped["flood_event"] 121 | 122 | # Write the data using the argument of the read method 123 | component.write("other/baz.nc") 124 | 125 | # Assert the output 126 | assert Path(tmp_path, "other", "baz.nc").is_file() 127 | 128 | # Assert the config file 129 | assert component.model.config.get(HAZARD_FILE) == Path(tmp_path, "other", "baz.nc") 130 | assert component.model.config.get(f"{HAZARD_SETTINGS}.{VAR_AS_BAND}") 131 | 132 | 133 | def test_hazard_component_setup( 134 | caplog: pytest.LogCaptureFixture, 135 | model_with_region: FIATModel, 136 | ): 137 | # Setup the component 138 | component = HazardComponent(model=model_with_region) 139 | # Test hazard event 140 | caplog.set_level(logging.INFO) 141 | component.setup(hazard_fnames="flood_event") 142 | 143 | assert "Added water_depth hazard map: flood_event" in caplog.text 144 | assert "flood_event" in component.data.data_vars 145 | 146 | 147 | def test_hazard_component_setup_multi( 148 | model_with_region: FIATModel, 149 | ): 150 | # Setup the component 151 | component = HazardComponent(model=model_with_region) 152 | 153 | # Test setting data to hazard grid with data 154 | component.setup(hazard_fnames=["flood_event", "flood_event_highres"]) 155 | 156 | # Check if both ds are still there 157 | assert "flood_event" in component.data.data_vars 158 | assert "flood_event_highres" in component.data.data_vars 159 | 160 | 161 | def test_hazard_component_setup_risk( 162 | model_with_region: FIATModel, 163 | ): 164 | # Setup the compoentn 165 | component = HazardComponent(model=model_with_region) 166 | 167 | # Test hazard with return period 168 | component.setup( 169 | hazard_fnames=["flood_event_highres"], 170 | risk=True, 171 | return_periods=[50000], 172 | ) 173 | 174 | assert isinstance(component.data, xr.Dataset) 175 | assert model_with_region.config.get(MODEL_RISK) 176 | assert model_with_region.config.get(HAZARD_RP) == [50000] 177 | 178 | 179 | def test_hazard_component_setup_errors(model: FIATModel): 180 | # Setup the component 181 | component = HazardComponent(model=model) 182 | 183 | # Assert the errors 184 | with pytest.raises( 185 | ValueError, match="Cannot perform risk analysis without return periods" 186 | ): 187 | component.setup(hazard_fnames="test.nc", risk=True) 188 | 189 | with pytest.raises( 190 | ValueError, match="Return periods do not match the number of hazard files" 191 | ): 192 | component.setup( 193 | hazard_fnames=["test1.nc", "test2.nc"], 194 | risk=True, 195 | return_periods=[1, 2, 3], 196 | ) 197 | 198 | with pytest.raises( 199 | MissingRegionError, 200 | match=("Region component is missing for setting up hazard data."), 201 | ): 202 | component.setup(hazard_fnames=["flood_event"]) 203 | -------------------------------------------------------------------------------- /tests/workflows/test_vulnerability.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import pytest 3 | 4 | from hydromt_fiat.utils import ( 5 | CURVE, 6 | CURVE_ID, 7 | DAMAGE, 8 | EXPOSURE_LINK, 9 | EXPOSURE_TYPE, 10 | SUBTYPE, 11 | ) 12 | from hydromt_fiat.workflows import process_vulnerability_linking, vulnerability_setup 13 | 14 | 15 | def test_process_vulnerability_linking( 16 | vulnerability_data_row_oriented: pd.DataFrame, 17 | vulnerability_linking: pd.DataFrame, 18 | ): 19 | # Execute the function 20 | link = process_vulnerability_linking( 21 | types=vulnerability_data_row_oriented[CURVE].values, 22 | vulnerability_linking=vulnerability_linking, 23 | ) 24 | 25 | # Assert the output 26 | assert len(link) == 8 27 | assert SUBTYPE in link.columns 28 | assert CURVE_ID in link.columns 29 | assert not all(link[EXPOSURE_LINK] == link[CURVE]) 30 | assert "roads" not in link[CURVE].values 31 | 32 | 33 | def test_process_vulnerability_linking_no_subtype( 34 | vulnerability_data_row_oriented: pd.DataFrame, 35 | vulnerability_linking_alt: pd.DataFrame, 36 | ): 37 | # Execute the function 38 | link = process_vulnerability_linking( 39 | types=vulnerability_data_row_oriented[CURVE].values, 40 | vulnerability_linking=vulnerability_linking_alt, 41 | ) 42 | 43 | # Assert the output 44 | assert len(link) == 5 45 | assert SUBTYPE not in link.columns 46 | 47 | 48 | def test_process_vulnerability_linking_no_link( 49 | vulnerability_data_row_oriented: pd.DataFrame, 50 | ): 51 | # Execute the function 52 | link = process_vulnerability_linking( 53 | types=vulnerability_data_row_oriented[CURVE].values, 54 | vulnerability_linking=None, # Explicit so its easy to follow 55 | ) 56 | 57 | # Assert the output 58 | assert len(link) == 6 59 | assert all(link[EXPOSURE_LINK] == link[CURVE]) 60 | assert "roads" in link[CURVE].values 61 | assert all(link[EXPOSURE_TYPE] == DAMAGE) 62 | 63 | 64 | def test_process_vulnerability_linking_error( 65 | vulnerability_data_row_oriented: pd.DataFrame, 66 | vulnerability_linking: pd.DataFrame, 67 | ): 68 | # No 'curve' column in the provided linking table 69 | with pytest.raises( 70 | KeyError, 71 | match="The 'curve' column in not present in the linking table", 72 | ): 73 | _ = process_vulnerability_linking( 74 | types=vulnerability_data_row_oriented[CURVE].values, 75 | vulnerability_linking=vulnerability_linking.drop(CURVE, axis=1), 76 | ) 77 | 78 | 79 | def test_vulnerability_setup( 80 | vulnerability_data: pd.DataFrame, 81 | vulnerability_linking: pd.DataFrame, 82 | ): 83 | # Execute the workflow function 84 | vuln_curves, vuln_id = vulnerability_setup( 85 | vulnerability_data=vulnerability_data, 86 | vulnerability_linking=vulnerability_linking, 87 | continent="europe", 88 | ) 89 | 90 | assert len(vuln_curves.columns) == 6 91 | assert len(vuln_id.columns) == 5 92 | assert "rs1" in vuln_curves.columns 93 | assert "in1" in vuln_curves.columns 94 | assert "rs1" in vuln_id[CURVE_ID].values 95 | assert "in1" in vuln_id[CURVE_ID].values 96 | 97 | 98 | def test_vulnerability_setup_typing( 99 | vulnerability_data: pd.DataFrame, 100 | vulnerability_linking: pd.DataFrame, 101 | ): 102 | # Check for selecting specific types 103 | vuln_curves, vuln_id = vulnerability_setup( 104 | vulnerability_data=vulnerability_data, 105 | vulnerability_linking=vulnerability_linking, 106 | continent="europe", 107 | curve=["rs1", "cm1"], 108 | ) 109 | 110 | assert len(vuln_curves.columns) == 3 111 | assert "rs1" in vuln_curves.columns 112 | assert "rs1" in vuln_id[CURVE_ID].values 113 | assert "in1" not in vuln_curves.columns 114 | assert "in1" not in vuln_id[CURVE_ID].values 115 | 116 | 117 | def test_vulnerability_setup_no_linking(vulnerability_data: pd.DataFrame): 118 | # No linking at all 119 | vuln_curves, vuln_id = vulnerability_setup( 120 | vulnerability_data=vulnerability_data, 121 | continent="europe", 122 | curve=["rs1", "cm1"], 123 | ) 124 | assert len(vuln_curves.columns) == 3 125 | assert (vuln_id[EXPOSURE_LINK].values == vuln_id[CURVE_ID].values).all() 126 | 127 | 128 | def test_vulnerability_setup_row_oriented( 129 | vulnerability_data_row_oriented: pd.DataFrame, 130 | vulnerability_linking: pd.DataFrame, 131 | ): 132 | # Execute the workflow function 133 | vuln_curves, vuln_id = vulnerability_setup( 134 | vulnerability_data=vulnerability_data_row_oriented, 135 | vulnerability_linking=vulnerability_linking, 136 | continent="europe", 137 | column_oriented=False, 138 | ) 139 | 140 | assert len(vuln_curves.columns) == 6 141 | assert len(vuln_id.columns) == 5 142 | assert "rs1" in vuln_curves.columns 143 | assert "in1" in vuln_curves.columns 144 | assert "rs1" in vuln_id[CURVE_ID].values 145 | assert "in1" in vuln_id[CURVE_ID].values 146 | 147 | 148 | def test_vulnerability_setup_units( 149 | caplog: pytest.LogCaptureFixture, 150 | vulnerability_data: pd.DataFrame, 151 | vulnerability_linking: pd.DataFrame, 152 | ): 153 | # First with meters 154 | vuln_curves, _ = vulnerability_setup( 155 | vulnerability_data=vulnerability_data, 156 | vulnerability_linking=vulnerability_linking, 157 | continent="europe", 158 | curve=["residential", "commercial"], 159 | ) 160 | mean_val = int(vuln_curves["water depth"].mean() * 100) 161 | assert mean_val == 500 162 | 163 | # Assert that different units work (in this case feet) 164 | vuln_curves, _ = vulnerability_setup( 165 | vulnerability_data=vulnerability_data, 166 | vulnerability_linking=vulnerability_linking, 167 | unit="ft", 168 | continent="europe", 169 | curve=["residential", "commercial"], 170 | ) 171 | mean_val_ft = int(vuln_curves["water depth"].mean() * 100) 172 | assert mean_val > 3 * mean_val_ft 173 | assert ( 174 | "Given unit (ft) does not match the standard unit (m) for length" in caplog.text 175 | ) 176 | 177 | 178 | def test_vulnerability_setup_error( 179 | vulnerability_data: pd.DataFrame, 180 | vulnerability_linking: pd.DataFrame, 181 | ): 182 | # No 'type' column in datasets, drop the first row in which it is located 183 | vuln_no_type = vulnerability_data.drop(1) 184 | with pytest.raises( 185 | KeyError, 186 | match="The 'curve' column in not present in the vulnerability data", 187 | ): 188 | _ = vulnerability_setup( 189 | vulnerability_data=vuln_no_type, 190 | continent="europe", 191 | curve=["residential", "commercial"], 192 | ) 193 | -------------------------------------------------------------------------------- /tests/workflows/test_damage.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import geopandas as gpd 4 | import pandas as pd 5 | import pytest 6 | 7 | from hydromt_fiat.utils import DAMAGE, MAX 8 | from hydromt_fiat.workflows import max_monetary_damage 9 | 10 | 11 | def test_max_monetary_damage( 12 | exposure_vector_clipped_for_damamge: gpd.GeoDataFrame, 13 | exposure_cost_table: pd.DataFrame, 14 | vulnerability_identifiers: pd.DataFrame, 15 | ): 16 | # Assert that maximum damage is not already in the dataset 17 | assert f"{MAX}_{DAMAGE}_structure" not in exposure_vector_clipped_for_damamge 18 | 19 | # Alterations should be inplace, i.e. id before == id after 20 | id_before = id(exposure_vector_clipped_for_damamge) 21 | 22 | # Call the function 23 | exposure_vector = max_monetary_damage( 24 | exposure_data=exposure_vector_clipped_for_damamge, 25 | exposure_cost_table=exposure_cost_table, 26 | exposure_type=DAMAGE, 27 | vulnerability=vulnerability_identifiers, 28 | country="World", # Select kwargs 29 | ) 30 | id_after = id(exposure_vector) 31 | 32 | # Assert that is was inplace 33 | assert id_before == id_after 34 | 35 | # Assert the content 36 | assert f"{MAX}_{DAMAGE}_structure" in exposure_vector_clipped_for_damamge 37 | assert int(exposure_vector[f"{MAX}_{DAMAGE}_structure"].mean()) == 663194 38 | 39 | 40 | def test_max_monetary_damage_link( 41 | exposure_vector_clipped_for_damamge: gpd.GeoDataFrame, 42 | exposure_cost_table: pd.DataFrame, 43 | vulnerability_identifiers: pd.DataFrame, 44 | exposure_cost_link: pd.DataFrame, 45 | ): 46 | # Call the function 47 | exposure_vector = max_monetary_damage( 48 | exposure_data=exposure_vector_clipped_for_damamge, 49 | exposure_cost_table=exposure_cost_table, 50 | exposure_type=DAMAGE, 51 | vulnerability=vulnerability_identifiers, 52 | exposure_cost_link=exposure_cost_link, 53 | country="World", # Select kwargs 54 | ) 55 | 56 | # Assert the content 57 | assert len(exposure_vector) == 12 58 | assert f"{MAX}_{DAMAGE}_structure" in exposure_vector_clipped_for_damamge 59 | assert int(exposure_vector[f"{MAX}_{DAMAGE}_structure"].mean()) == 663194 60 | 61 | 62 | def test_max_monetary_damage_link_partial( 63 | caplog: pytest.LogCaptureFixture, 64 | exposure_vector_clipped_for_damamge: gpd.GeoDataFrame, 65 | exposure_cost_table: pd.DataFrame, 66 | vulnerability_identifiers: pd.DataFrame, 67 | exposure_cost_link: pd.DataFrame, 68 | ): 69 | caplog.set_level(logging.WARNING) 70 | # Remove a row from the linking table 71 | exposure_cost_link.drop(2, inplace=True) # 2 is industrial 72 | # Call the function 73 | exposure_vector = max_monetary_damage( 74 | exposure_data=exposure_vector_clipped_for_damamge, 75 | exposure_cost_table=exposure_cost_table, 76 | exposure_type=DAMAGE, 77 | vulnerability=vulnerability_identifiers, 78 | exposure_cost_link=exposure_cost_link, 79 | country="World", # Select kwargs 80 | ) 81 | 82 | # Assert the logging 83 | assert "4 features could not be linked to" in caplog.text 84 | 85 | # Assert the content 86 | assert len(exposure_vector) == 8 87 | assert int(exposure_vector[f"{MAX}_{DAMAGE}_structure"].mean()) == 822446 88 | 89 | 90 | def test_max_monetary_damage_geo_crs( 91 | exposure_vector_clipped_for_damamge: gpd.GeoDataFrame, 92 | exposure_cost_table: pd.DataFrame, 93 | vulnerability_identifiers: pd.DataFrame, 94 | ): 95 | # Call the function 96 | exposure_vector = max_monetary_damage( 97 | exposure_data=exposure_vector_clipped_for_damamge.to_crs(4326), 98 | exposure_cost_table=exposure_cost_table, 99 | exposure_type=DAMAGE, 100 | vulnerability=vulnerability_identifiers, 101 | country="World", # Select kwargs 102 | ) 103 | 104 | # Assert the content 105 | assert int(exposure_vector[f"{MAX}_{DAMAGE}_structure"].mean()) == 662887 106 | 107 | 108 | def test_max_monetary_damage_no_subtype( 109 | exposure_geom_data_alt: gpd.GeoDataFrame, 110 | exposure_cost_table: pd.DataFrame, 111 | vulnerability_identifiers_alt: pd.DataFrame, 112 | ): 113 | # Assert that maximum damage is not already in the dataset 114 | assert f"{MAX}_{DAMAGE}" not in exposure_geom_data_alt 115 | 116 | # Alterations should be inplace, i.e. id before == id after 117 | id_before = id(exposure_geom_data_alt) 118 | 119 | # Call the function 120 | exposure_vector = max_monetary_damage( 121 | exposure_data=exposure_geom_data_alt, 122 | exposure_cost_table=exposure_cost_table, 123 | exposure_type=DAMAGE, 124 | vulnerability=vulnerability_identifiers_alt, 125 | country="World", # Select kwargs 126 | ) 127 | id_after = id(exposure_vector) 128 | 129 | # Assert that is was inplace 130 | assert id_before == id_after 131 | 132 | # Assert the content 133 | assert f"{MAX}_{DAMAGE}" in exposure_geom_data_alt 134 | assert int(exposure_vector[f"{MAX}_{DAMAGE}"].mean()) == 1363905 135 | 136 | 137 | def test_max_monetary_damage_errors( 138 | exposure_vector_clipped_for_damamge: gpd.GeoDataFrame, 139 | exposure_cost_table: pd.DataFrame, 140 | vulnerability_identifiers: pd.DataFrame, 141 | ): 142 | # Supply none for the cost table 143 | with pytest.raises( 144 | ValueError, 145 | match="Exposure costs table cannot be None", 146 | ): 147 | _ = max_monetary_damage( 148 | exposure_data=exposure_vector_clipped_for_damamge, 149 | exposure_cost_table=None, 150 | exposure_type=DAMAGE, 151 | vulnerability=vulnerability_identifiers, 152 | ) 153 | 154 | # Select kwargs leave no data 155 | with pytest.raises( 156 | ValueError, 157 | match=r"Select kwargs \(\{'country': 'Unknown'\}\) resulted in no remaining", 158 | ): 159 | _ = max_monetary_damage( 160 | exposure_data=exposure_vector_clipped_for_damamge, 161 | exposure_cost_table=exposure_cost_table, 162 | exposure_type=DAMAGE, 163 | vulnerability=vulnerability_identifiers, 164 | country="Unknown", 165 | ) 166 | 167 | # Select kwargs leave no data 168 | with pytest.raises( 169 | ValueError, 170 | match=r"Exposure type \(affected\) not found in vulnerability data", 171 | ): 172 | _ = max_monetary_damage( 173 | exposure_data=exposure_vector_clipped_for_damamge, 174 | exposure_cost_table=exposure_cost_table, 175 | exposure_type="affected", 176 | vulnerability=vulnerability_identifiers, 177 | country="World", 178 | ) 179 | 180 | # Exposure cost link table missing columns 181 | with pytest.raises( 182 | ValueError, 183 | match="Cost link table either missing object_type or cost_type", 184 | ): 185 | _ = max_monetary_damage( 186 | exposure_data=exposure_vector_clipped_for_damamge, 187 | exposure_cost_table=exposure_cost_table, 188 | exposure_type=DAMAGE, 189 | vulnerability=vulnerability_identifiers, 190 | country="World", 191 | exposure_cost_link=pd.DataFrame(), 192 | ) 193 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import geopandas as gpd 4 | import pandas as pd 5 | import pytest 6 | import xarray as xr 7 | from hydromt import DataCatalog 8 | from shapely.geometry import box 9 | 10 | from hydromt_fiat import FIATModel 11 | from hydromt_fiat.data import fetch_data 12 | 13 | 14 | ## Build data 15 | @pytest.fixture(scope="session") 16 | def build_data_path() -> Path: # The HydroMT-FIAT build data w/ catalog 17 | # Fetch the data 18 | p = fetch_data("test-build-data") 19 | assert Path(p, "buildings", "buildings.fgb").is_file() 20 | return p 21 | 22 | 23 | @pytest.fixture(scope="session") 24 | def build_data_catalog_path(build_data_path: Path) -> Path: 25 | p = Path(build_data_path, "data_catalog.yml") 26 | assert p.is_file() 27 | return p 28 | 29 | 30 | @pytest.fixture(scope="session") 31 | def build_region_path(build_data_path: Path) -> Path: 32 | p = Path(build_data_path, "region.geojson") 33 | assert p.is_file() 34 | return p 35 | 36 | 37 | @pytest.fixture(scope="session") 38 | def build_region_small_path(build_data_path: Path) -> Path: 39 | p = Path(build_data_path, "region_small.geojson") 40 | assert p.is_file() 41 | return p 42 | 43 | 44 | @pytest.fixture 45 | def build_region(build_region_path: Path) -> gpd.GeoDataFrame: 46 | gdf = gpd.read_file(build_region_path) 47 | assert len(gdf) == 1 48 | return gdf 49 | 50 | 51 | @pytest.fixture 52 | def build_region_small(build_region_small_path: Path) -> gpd.GeoDataFrame: 53 | gdf = gpd.read_file(build_region_small_path) 54 | assert len(gdf) == 1 55 | return gdf 56 | 57 | 58 | @pytest.fixture(scope="session") 59 | def build_data_catalog(build_data_catalog_path: Path) -> DataCatalog: 60 | dc = DataCatalog(build_data_catalog_path) 61 | assert "buildings" in dc.sources 62 | return dc 63 | 64 | 65 | ## Global data 66 | @pytest.fixture(scope="session") 67 | def global_data_path() -> Path: # The HydroMT-FIAT build data w/ catalog 68 | # Fetch the data 69 | p = fetch_data("global-data") 70 | assert Path(p, "exposure", "jrc_damage_values.csv").is_file() 71 | return p 72 | 73 | 74 | @pytest.fixture(scope="session") 75 | def global_data_catalog_path(global_data_path: Path) -> Path: 76 | p = Path(global_data_path, "data_catalog.yml") 77 | assert p.is_file() 78 | return p 79 | 80 | 81 | @pytest.fixture(scope="session") 82 | def global_data_catalog(global_data_catalog_path: Path) -> DataCatalog: 83 | dc = DataCatalog(global_data_catalog_path) 84 | assert "osm_buildings" in dc.sources 85 | return dc 86 | 87 | 88 | ## Model data 89 | @pytest.fixture(scope="session") 90 | def model_data_path() -> Path: 91 | # Fetch the data 92 | p = fetch_data("fiat-model") 93 | assert len(list(p.iterdir())) != 0 94 | return p 95 | 96 | 97 | @pytest.fixture 98 | def exposure_vector(model_data_path: Path) -> gpd.GeoDataFrame: 99 | p = Path(model_data_path, "exposure", "buildings.fgb") 100 | assert p.is_file() 101 | gdf = gpd.read_file(p) 102 | assert len(gdf) != 0 103 | return gdf 104 | 105 | 106 | @pytest.fixture 107 | def exposure_grid(model_data_path: Path) -> xr.Dataset: 108 | p = Path(model_data_path, "exposure", "spatial.nc") 109 | assert p.is_file() 110 | ds = xr.open_dataset(p) 111 | assert len(ds.data_vars) != 0 112 | return ds 113 | 114 | 115 | @pytest.fixture 116 | def hazard(model_data_path: Path) -> xr.Dataset: 117 | p = Path( 118 | model_data_path, 119 | "hazard.nc", 120 | ) 121 | assert p.is_file() 122 | ds = xr.open_dataset(p) 123 | assert len(ds.data_vars) != 0 124 | return ds 125 | 126 | 127 | @pytest.fixture(scope="session") 128 | def vulnerability_curves(model_data_path: Path) -> pd.DataFrame: 129 | p = Path(model_data_path, "vulnerability", "curves.csv") 130 | assert p.is_file() 131 | df = pd.read_csv(p) 132 | assert len(df) != 0 133 | return df 134 | 135 | 136 | @pytest.fixture(scope="session") 137 | def vulnerability_identifiers(model_data_path: Path) -> pd.DataFrame: 138 | p = Path(model_data_path, "vulnerability", "curves_id.csv") 139 | assert p.is_file() 140 | df = pd.read_csv(p) 141 | assert len(df) != 0 142 | return df 143 | 144 | 145 | ## Model data (clipped) 146 | @pytest.fixture(scope="session") 147 | def model_data_clipped_path() -> Path: 148 | # Fetch the data 149 | p = fetch_data("fiat-model-c") 150 | assert len(list(p.iterdir())) != 0 151 | return p 152 | 153 | 154 | @pytest.fixture 155 | def exposure_vector_clipped(model_data_clipped_path: Path) -> gpd.GeoDataFrame: 156 | p = Path(model_data_clipped_path, "exposure", "buildings.fgb") 157 | assert p.is_file() 158 | gdf = gpd.read_file(p) 159 | assert len(gdf) != 0 160 | return gdf 161 | 162 | 163 | @pytest.fixture 164 | def exposure_vector_clipped_for_damamge( 165 | exposure_vector_clipped: gpd.GeoDataFrame, 166 | ) -> gpd.GeoDataFrame: 167 | exposure_vector_clipped.drop( 168 | [ 169 | "cost_type", 170 | "max_damage_structure", 171 | "max_damage_content", 172 | "ref", 173 | "method", 174 | ], 175 | axis=1, 176 | inplace=True, 177 | ) 178 | return exposure_vector_clipped 179 | 180 | 181 | @pytest.fixture 182 | def exposure_grid_clipped(model_data_clipped_path: Path) -> xr.Dataset: 183 | p = Path(model_data_clipped_path, "exposure", "spatial.nc") 184 | assert p.is_file() 185 | ds = xr.open_dataset(p) 186 | assert len(ds.data_vars) != 0 187 | return ds 188 | 189 | 190 | @pytest.fixture 191 | def hazard_clipped(model_data_clipped_path: Path) -> xr.Dataset: 192 | p = Path( 193 | model_data_clipped_path, 194 | "hazard.nc", 195 | ) 196 | assert p.is_file() 197 | ds = xr.open_dataset(p) 198 | assert len(ds.data_vars) != 0 199 | return ds 200 | 201 | 202 | ## OSM data 203 | @pytest.fixture(scope="session") 204 | def osm_data_path() -> Path: 205 | # Fetch the data 206 | p = fetch_data("osmnx") 207 | assert len(list(p.iterdir())) != 0 208 | return p 209 | 210 | 211 | ## Models and mocked objects 212 | @pytest.fixture 213 | def model( 214 | tmp_path: Path, 215 | build_data_catalog_path: Path, 216 | global_data_catalog_path: Path, 217 | ) -> FIATModel: 218 | model = FIATModel( 219 | tmp_path, 220 | mode="w", 221 | data_libs=[build_data_catalog_path, global_data_catalog_path], 222 | ) 223 | return model 224 | 225 | 226 | @pytest.fixture 227 | def model_with_region( 228 | model: FIATModel, 229 | build_region_small: Path, 230 | ) -> FIATModel: 231 | model.setup_region(build_region_small) 232 | return model 233 | 234 | 235 | ## Extra data structures 236 | @pytest.fixture 237 | def box_geometry() -> gpd.GeoDataFrame: 238 | geom = gpd.GeoDataFrame( 239 | geometry=[box(4.355, 52.035, 4.365, 52.045)], 240 | crs=4326, 241 | ) 242 | return geom 243 | 244 | 245 | @pytest.fixture 246 | def exposure_cost_link() -> pd.DataFrame: 247 | df = pd.DataFrame( 248 | data={ 249 | "object_type": ["residential", "commercial", "industrial", "unknown"], 250 | "cost_type": ["residential", "commercial", "industrial", "unknown"], 251 | } 252 | ) 253 | return df 254 | -------------------------------------------------------------------------------- /tests/components/test_exposure_grid_component.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from unittest.mock import MagicMock, PropertyMock 3 | 4 | import pytest 5 | import xarray as xr 6 | from hydromt.model import ModelRoot 7 | from pytest_mock import MockerFixture 8 | 9 | from hydromt_fiat import FIATModel 10 | from hydromt_fiat.components import ExposureGridComponent 11 | from hydromt_fiat.errors import MissingRegionError 12 | from hydromt_fiat.utils import ( 13 | EXPOSURE, 14 | EXPOSURE_GRID_FILE, 15 | EXPOSURE_GRID_SETTINGS, 16 | FN_CURVE, 17 | GRID, 18 | MODEL_TYPE, 19 | VAR_AS_BAND, 20 | VULNERABILITY, 21 | ) 22 | 23 | 24 | def test_exposure_grid_component_empty( 25 | mock_model: MagicMock, 26 | ): 27 | # Setup the component 28 | component = ExposureGridComponent(model=mock_model) 29 | 30 | # Assert some basics 31 | assert component._filename == f"{EXPOSURE}/spatial.nc" 32 | assert len(component.data) == 0 33 | assert isinstance(component.data, xr.Dataset) 34 | 35 | 36 | def test_exposure_grid_component_read( 37 | mock_model_config: MagicMock, 38 | model_data_clipped_path: Path, 39 | ): 40 | type(mock_model_config).root = PropertyMock( 41 | side_effect=lambda: ModelRoot(model_data_clipped_path, mode="r"), 42 | ) 43 | # Setup the component 44 | component = ExposureGridComponent(model=mock_model_config) 45 | 46 | # Read the data by calling the data property 47 | component.data 48 | 49 | # No config so it wont read anything 50 | assert len(component.data.data_vars) == 4 51 | assert "industrial_content" in component.data.data_vars 52 | 53 | 54 | def test_exposure_grid_component_read_sig( 55 | mock_model_config: MagicMock, 56 | model_data_clipped_path: Path, 57 | ): 58 | type(mock_model_config).root = PropertyMock( 59 | side_effect=lambda: ModelRoot(model_data_clipped_path, mode="r"), 60 | ) 61 | # Setup the component 62 | component = ExposureGridComponent(model=mock_model_config) 63 | 64 | # Read the data using the signature of the read method 65 | component.read(f"{EXPOSURE}/spatial.nc") 66 | 67 | # No config so it wont read anything 68 | assert len(component.data.data_vars) == 4 69 | assert "industrial_content" in component.data.data_vars 70 | 71 | 72 | def test_exposure_grid_component_read_nothing( 73 | tmp_path: Path, 74 | mock_model_config: MagicMock, 75 | ): 76 | type(mock_model_config).root = PropertyMock( 77 | side_effect=lambda: ModelRoot(tmp_path, mode="r"), 78 | ) 79 | # Setup the component 80 | component = ExposureGridComponent(model=mock_model_config) 81 | # Assert current state 82 | assert len(component.data) == 0 83 | 84 | # Read the data (nothing) 85 | component.read() 86 | # Assert still no data 87 | assert len(component.data) == 0 88 | 89 | 90 | def test_exposure_grid_component_write( 91 | tmp_path: Path, 92 | mock_model_config: MagicMock, 93 | exposure_grid_clipped: xr.Dataset, 94 | ): 95 | # Setup the component 96 | component = ExposureGridComponent(model=mock_model_config) 97 | 98 | # Set data like a dummy 99 | component._data = exposure_grid_clipped 100 | 101 | # Write the data 102 | component.write() 103 | 104 | # Assert the output 105 | assert Path(tmp_path, EXPOSURE, "spatial.nc").is_file() 106 | # Assert the config 107 | assert component.model.config.get(EXPOSURE_GRID_FILE) == Path( 108 | tmp_path, 109 | EXPOSURE, 110 | "spatial.nc", 111 | ) 112 | assert component.model.config.get(f"{EXPOSURE_GRID_SETTINGS}.{VAR_AS_BAND}") 113 | 114 | 115 | def test_exposure_grid_component_write_config( 116 | tmp_path: Path, 117 | mock_model_config: MagicMock, 118 | exposure_grid_clipped: xr.Dataset, 119 | ): 120 | # Setup the component 121 | component = ExposureGridComponent(model=mock_model_config) 122 | 123 | # Set data like a dummy 124 | component._data = exposure_grid_clipped["industrial_content"].to_dataset() 125 | # Add to the config 126 | component.model.config.set(EXPOSURE_GRID_FILE, "foo.nc") 127 | 128 | # Write the data 129 | component.write() 130 | 131 | # Assert the output 132 | assert Path(tmp_path, "foo.nc").is_file() 133 | # Assert the config 134 | assert not component.model.config.get(f"{EXPOSURE_GRID_SETTINGS}.{VAR_AS_BAND}") 135 | 136 | 137 | def test_exposure_grid_component_write_sig( 138 | tmp_path: Path, 139 | mock_model_config: MagicMock, 140 | exposure_grid_clipped: xr.Dataset, 141 | ): 142 | # Setup the component 143 | component = ExposureGridComponent(model=mock_model_config) 144 | 145 | # Set data like a dummy 146 | component._data = exposure_grid_clipped 147 | 148 | # Write the data 149 | component.write("baz.nc") 150 | 151 | # Assert the output 152 | assert Path(tmp_path, "baz.nc").is_file() 153 | # Assert the config file 154 | assert component.model.config.get(EXPOSURE_GRID_FILE) == Path( 155 | tmp_path, 156 | "baz.nc", 157 | ) 158 | 159 | 160 | def test_exposure_grid_component_setup( 161 | model_exposure_setup: FIATModel, 162 | ): 163 | # Setup the component 164 | component = ExposureGridComponent(model=model_exposure_setup) 165 | 166 | # Call the method 167 | component.setup( 168 | exposure_fnames="industrial_content", 169 | ) 170 | 171 | # Assert the output 172 | assert isinstance(component.data, xr.Dataset) 173 | assert "industrial_content" in component.data.data_vars 174 | assert component.data.industrial_content.attrs.get(FN_CURVE) == "in2" 175 | 176 | # Assert entries in the config 177 | assert component.model.config.get(MODEL_TYPE) == GRID 178 | assert not component.model.config.get(f"{EXPOSURE_GRID_SETTINGS}.{VAR_AS_BAND}") 179 | 180 | 181 | def test_exposure_grid_component_setup_multi( 182 | model_exposure_setup: FIATModel, 183 | ): 184 | # Setup the component 185 | component = ExposureGridComponent(model=model_exposure_setup) 186 | 187 | # Call the method 188 | component.setup( 189 | exposure_fnames=["industrial_content", "industrial_structure"], 190 | exposure_link_fname="exposure_grid_link", 191 | ) 192 | 193 | # Assert the output 194 | assert "industrial_content" in component.data.data_vars 195 | assert "industrial_structure" in component.data.data_vars 196 | assert component.data.industrial_structure.attrs.get(FN_CURVE) == "in1" 197 | 198 | 199 | def test_exposure_grid_component_setup_errors( 200 | mocker: MockerFixture, 201 | model: FIATModel, 202 | ): 203 | # Setup the component 204 | component = ExposureGridComponent(model=model) 205 | 206 | # Assert the vulnerability absent error 207 | err_msg = "'setup_vulnerability' step is required before setting up exposure grid" 208 | with pytest.raises(RuntimeError, match=err_msg): 209 | component.setup( 210 | exposure_fnames="industrial_content", 211 | exposure_link_fname="", # Can be nonsense, error is raised earlier 212 | ) 213 | 214 | # Assert missing region error 215 | mocker.patch.object(FIATModel, VULNERABILITY) 216 | with pytest.raises( 217 | MissingRegionError, match="Region is required for setting up exposure grid" 218 | ): 219 | component.setup( 220 | exposure_fnames="industrial_content", 221 | exposure_link_fname="", 222 | ) 223 | -------------------------------------------------------------------------------- /tests/components/test_config_component.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from pathlib import Path 3 | from unittest.mock import MagicMock, PropertyMock 4 | 5 | import pytest 6 | from hydromt.model import ModelRoot 7 | from hydromt.readers import read_toml 8 | 9 | from hydromt_fiat.components import ConfigComponent 10 | from hydromt_fiat.utils import ( 11 | EXPOSURE, 12 | GEOM, 13 | MODEL, 14 | SETTINGS, 15 | TYPE, 16 | ) 17 | 18 | 19 | def test_config_component_init(mock_model: MagicMock): 20 | # Setup the component 21 | component = ConfigComponent(mock_model) 22 | 23 | # Assert that the internal data is None 24 | assert component._data is None 25 | 26 | # When asking for data property, it should return a dict 27 | assert isinstance(component.data, dict) 28 | assert isinstance(component._data, dict) # Same for internal 29 | assert len(component.data) == 0 30 | 31 | 32 | def test_config_component_props(tmp_path: Path, mock_model: MagicMock): 33 | # Setup the component 34 | component = ConfigComponent(mock_model) 35 | 36 | # Assert it's properties 37 | assert component.dir == tmp_path 38 | assert component.filename == f"{SETTINGS}.toml" 39 | # Set the filename 40 | component.filename = "foo.toml" 41 | assert component.filename == "foo.toml" 42 | 43 | 44 | def test_config_component_clear( 45 | mock_model: MagicMock, 46 | config_dummy: dict, 47 | ): 48 | # Setup the component 49 | component = ConfigComponent(mock_model) 50 | 51 | # Set data like a dummy 52 | component._data = config_dummy 53 | # Assert the current state 54 | assert len(component.data) == 4 55 | 56 | # Call the clear method 57 | component.clear() 58 | # Assert the state after 59 | assert len(component.data) == 0 60 | 61 | 62 | def test_config_component_get( 63 | mock_model: MagicMock, 64 | config_dummy: dict, 65 | ): 66 | # Setup the component 67 | component = ConfigComponent(mock_model) 68 | 69 | # Set data like a dummy 70 | component._data = config_dummy 71 | 72 | # Get an entry 73 | res = component.get("foo") 74 | # Assert the entry 75 | assert res == "bar" 76 | 77 | # Get an entry deeper 78 | res = component.get("spooky.ghost") 79 | # Assert the entry 80 | assert res == [1, 2, 3] 81 | 82 | # Get an entry that doesnt exists, return fallback 83 | res = component.get("No", fallback=2) 84 | # Assert the entry 85 | assert res == 2 86 | 87 | 88 | def test_config_component_get_path( 89 | tmp_path: Path, 90 | mock_model: MagicMock, 91 | config_dummy: dict, 92 | ): 93 | # Setup the component 94 | component = ConfigComponent(mock_model) 95 | 96 | # Set data like a dummy 97 | component._data = config_dummy 98 | 99 | # Get and entry as an absolute path 100 | res = component.get("baz.file2", abs_path=True) 101 | # Assert the output 102 | assert res == Path(tmp_path, "tmp/tmp.txt") 103 | 104 | 105 | def test_config_component_set( 106 | mock_model: MagicMock, 107 | ): 108 | # Setup the component 109 | component = ConfigComponent(mock_model) 110 | 111 | # Set data 112 | component.set("foo", value="bar") 113 | # Assert state 114 | assert component.data["foo"] == "bar" 115 | 116 | # Set data with an extra level (part) 117 | component.set("baz.boo", value=2) 118 | # Assert state 119 | assert component.data["baz"]["boo"] == 2 120 | 121 | 122 | def test_config_component_set_dict( 123 | mock_model: MagicMock, 124 | ): 125 | # Setup the component 126 | component = ConfigComponent(mock_model) 127 | 128 | # Set data 129 | component.set("foo.bar", value="baz") 130 | # Assert state 131 | assert component.data["foo"]["bar"] == "baz" 132 | 133 | # Set data via a dictionary 134 | component.set("foo", value={"boo": 2}) 135 | # Assert state 136 | assert component.data["foo"]["boo"] == 2 137 | assert len(component.data["foo"]) == 2 138 | 139 | 140 | def test_config_component_set_none( 141 | mock_model: MagicMock, 142 | ): 143 | # Setup the component 144 | component = ConfigComponent(mock_model) 145 | 146 | # Set data 147 | component.set("foo", value=None) 148 | # Assert state 149 | assert "foo" not in component.data 150 | 151 | # Set data via a dictionary 152 | component.set("foo", value={"bar": 2, "boo": None}) 153 | # Assert state 154 | assert "bar" in component.data["foo"] 155 | assert "boo" not in component.data["foo"] 156 | assert len(component.data["foo"]) == 1 157 | 158 | 159 | def test_config_component_read( 160 | mock_model: MagicMock, 161 | model_data_clipped_path: Path, 162 | ): 163 | # Set it to read mode 164 | type(mock_model).root = PropertyMock( 165 | side_effect=lambda: ModelRoot(model_data_clipped_path, mode="r"), 166 | ) 167 | 168 | # Setup the component 169 | component = ConfigComponent(mock_model) 170 | 171 | # Assert its data currently none 172 | assert component._data is None 173 | 174 | # Read the data 175 | component.read() 176 | 177 | # Assert the read data 178 | assert isinstance(component.data, dict) 179 | assert len(component.data) == 4 180 | assert component.data[MODEL][TYPE] == GEOM 181 | assert component.data[EXPOSURE] 182 | 183 | 184 | def test_config_component_read_none( 185 | tmp_path: Path, 186 | mock_model: MagicMock, 187 | ): 188 | # Set it to read mode 189 | type(mock_model).root = PropertyMock( 190 | side_effect=lambda: ModelRoot(tmp_path, mode="r"), 191 | ) 192 | 193 | # Setup the component 194 | component = ConfigComponent(mock_model) 195 | 196 | # Assert its data currently none 197 | assert component._data is None 198 | 199 | # Read the data 200 | component.read() 201 | 202 | # Assert the read data 203 | assert isinstance(component.data, dict) 204 | assert len(component.data) == 0 205 | 206 | 207 | def test_config_component_write( 208 | tmp_path: Path, 209 | mock_model: MagicMock, 210 | config_dummy: dict, 211 | ): 212 | # Setup the component 213 | component = ConfigComponent(mock_model) 214 | 215 | # Set data like a dummy 216 | component._data = config_dummy 217 | 218 | # Write the data 219 | component.write() 220 | 221 | # That the file exists 222 | assert Path(tmp_path, component._filename).is_file() 223 | 224 | # Assert at least the path that was absolute in the config dict 225 | data = read_toml(Path(tmp_path, component._filename)) 226 | assert data["baz"]["file1"] == "tmp.txt" 227 | 228 | 229 | def test_config_component_write_sig( 230 | tmp_path: Path, 231 | mock_model: MagicMock, 232 | ): 233 | # Setup the component 234 | component = ConfigComponent(mock_model) 235 | # Set data like a dummy 236 | component._data = {"foo": "bar"} 237 | 238 | # Write to an alternative path 239 | component.write(filename="settings/tmp.toml") 240 | 241 | # That the file exists 242 | assert Path(tmp_path, "settings").is_dir() 243 | assert Path(tmp_path, "settings/tmp.toml").is_file() 244 | 245 | 246 | def test_config_component_write_warnings( 247 | caplog: pytest.LogCaptureFixture, 248 | tmp_path: Path, 249 | mock_model: MagicMock, 250 | ): 251 | caplog.set_level(logging.WARNING) 252 | # Setup the component 253 | component = ConfigComponent(mock_model) 254 | 255 | # Write the data 256 | component.write() 257 | 258 | # Assert the logging message 259 | assert "No data in config component, writing empty file.." in caplog.text 260 | # Assert file has still been written 261 | assert Path(tmp_path, component._filename).is_file() 262 | -------------------------------------------------------------------------------- /tests/components/test_grid_component.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from unittest.mock import MagicMock 3 | 4 | import geopandas as gpd 5 | import numpy as np 6 | import pytest 7 | import xarray as xr 8 | from pyproj.crs import CRS 9 | 10 | from hydromt_fiat.components.grid import CustomGridComponent 11 | 12 | # Overwrite the abstractmethods to be able to initialize it 13 | CustomGridComponent.__abstractmethods__ = set() 14 | 15 | 16 | def test_custom_grid_component_clear( 17 | mock_model: MagicMock, 18 | hazard: xr.Dataset, 19 | ): 20 | # Set up the component 21 | component = CustomGridComponent(model=mock_model) 22 | 23 | # Set data like a dummy 24 | component._data = hazard 25 | # Assert the current state 26 | assert len(component.data.data_vars) == 1 27 | 28 | # Call the clear method 29 | component.clear() 30 | # Assert the state after 31 | assert len(component.data.data_vars) == 0 32 | 33 | 34 | def test_custom_grid_component_clip( 35 | mock_model: MagicMock, 36 | build_region_small: gpd.GeoDataFrame, 37 | hazard: xr.Dataset, 38 | ): 39 | # Set up the component 40 | component = CustomGridComponent(model=mock_model) 41 | 42 | # Set data like a dummy 43 | component._data = hazard 44 | # Assert the current state 45 | assert component.data.flood_event.shape == (34, 25) 46 | 47 | # Call the clipping method using a smaller region 48 | ds = component.clip(geom=build_region_small, buffer=0) 49 | # Assert the output 50 | assert ds.flood_event.shape == (5, 4) 51 | 52 | 53 | def test_custom_grid_component_clip_no_data( 54 | mock_model: MagicMock, 55 | build_region_small: gpd.GeoDataFrame, 56 | ): 57 | # Set up the component 58 | component = CustomGridComponent(model=mock_model) 59 | # Assert the current state 60 | assert component._data is None 61 | 62 | # Call the clipping method using a smaller region 63 | ds = component.clip(geom=build_region_small) 64 | # Assert that there is no output 65 | assert ds is None 66 | 67 | 68 | def test_custom_grid_component_clip_inplace( 69 | mock_model: MagicMock, 70 | build_region_small: gpd.GeoDataFrame, 71 | hazard: xr.Dataset, 72 | ): 73 | # Set up the component 74 | component = CustomGridComponent(model=mock_model) 75 | 76 | # Set data like a dummy 77 | component._data = hazard 78 | # Assert the current state 79 | assert component.data.flood_event.shape == (34, 25) 80 | 81 | # Call the clipping method using a smaller region 82 | ds = component.clip(geom=build_region_small, buffer=0, inplace=True) 83 | # Assert that the output is None but the shape of the component data changed 84 | assert ds is None 85 | assert component.data.flood_event.shape == (5, 4) 86 | 87 | 88 | def test_custom_grid_component_reproject( 89 | mock_model: MagicMock, 90 | hazard: xr.Dataset, 91 | ): 92 | # Set up the component 93 | component = CustomGridComponent(model=mock_model) 94 | 95 | # Set data like a dummy 96 | component._data = hazard 97 | # Assert the current state 98 | assert component.data.flood_event.shape == (34, 25) 99 | assert component.crs.to_epsg() == 28992 100 | np.testing.assert_almost_equal(component.data.x.values[0], 85250) 101 | 102 | # Reproject the data 103 | ds = component.reproject(crs=4326) 104 | 105 | # Assert the output 106 | assert not component.data.equals(ds) 107 | assert component.crs.to_epsg() == 28992 108 | assert ds.raster.crs.to_epsg() == 4326 109 | assert ds.flood_event.shape == (28, 33) 110 | np.testing.assert_almost_equal(ds.longitude.values[0], 4.371, decimal=3) 111 | 112 | 113 | def test_custom_grid_component_reproject_inplace( 114 | mock_model: MagicMock, 115 | hazard: xr.Dataset, 116 | ): 117 | # Set up the component 118 | component = CustomGridComponent(model=mock_model) 119 | 120 | # Set data like a dummy 121 | component._data = hazard 122 | # Assert the current state 123 | assert component.data.flood_event.shape == (34, 25) 124 | assert component.crs.to_epsg() == 28992 125 | 126 | # Reproject inplace 127 | ds = component.reproject(crs=CRS.from_epsg(4326), inplace=True) 128 | 129 | # Assert the output/ state 130 | assert ds is None 131 | assert component.crs.to_epsg() == 4326 132 | assert component.data.flood_event.shape == (28, 33) 133 | 134 | 135 | def test_custom_grid_component_reproject_nothing( 136 | mock_model: MagicMock, 137 | hazard: xr.Dataset, 138 | ): 139 | # Set up the component 140 | component = CustomGridComponent(model=mock_model) 141 | 142 | # Set data like a dummy 143 | component._data = hazard 144 | id_before = id(component.data) # To check later 145 | 146 | # Reproject with the same crs 147 | ds = component.reproject(crs="EPSG:28992") 148 | # Assert the state/ output 149 | assert ds is None 150 | assert id_before == id(component.data) 151 | 152 | # Same yields true when the data crs is None 153 | component._data = component._data.drop("spatial_ref") 154 | id_before = id(component.data) # To check later 155 | 156 | # Reproject with the same crs 157 | ds = component.reproject(crs="EPSG:4326") 158 | # Assert the state/ output 159 | assert ds is None 160 | assert id_before == id(component.data) 161 | 162 | 163 | def test_custom_grid_component_set( 164 | mock_model: MagicMock, 165 | exposure_grid_clipped: xr.Dataset, 166 | ): 167 | # Set up the component 168 | component = CustomGridComponent(model=mock_model) 169 | # Assert nothing in the component 170 | assert len(component.data.data_vars) == 0 171 | 172 | # Set the data via the set method, providing a dataset 173 | component.set(exposure_grid_clipped) 174 | 175 | # Assert the state 176 | assert len(component.data.data_vars) == 4 177 | assert "commercial_content" in component.data.data_vars 178 | 179 | # Set data as a dataarray 180 | component.set(exposure_grid_clipped["commercial_content"], name="foo") 181 | 182 | # Assert the state 183 | assert len(component.data.data_vars) == 5 184 | assert "foo" in component.data.data_vars 185 | 186 | 187 | def test_custom_grid_component_set_replace( 188 | caplog: pytest.LogCaptureFixture, 189 | mock_model: MagicMock, 190 | exposure_grid_clipped: xr.Dataset, 191 | ): 192 | caplog.set_level(logging.WARNING) 193 | # Set up the component 194 | component = CustomGridComponent(model=mock_model) 195 | # Assert nothing in the component 196 | assert len(component.data.data_vars) == 0 197 | 198 | # Set the data via the set method, providing a dataset 199 | component.set(exposure_grid_clipped) 200 | component.set(exposure_grid_clipped) 201 | 202 | # Assert the logging message 203 | assert "Replacing grid map: 'commercial_content'" in caplog.text 204 | 205 | # Assert the state 206 | assert len(component.data.data_vars) == 4 # not 8 207 | assert "commercial_content" in component.data.data_vars 208 | 209 | 210 | def test_custom_grid_component_set_errors( 211 | mock_model: MagicMock, 212 | exposure_grid_clipped: xr.Dataset, 213 | ): 214 | # Set up the component 215 | component = CustomGridComponent(model=mock_model) 216 | 217 | # Dataarray without a name 218 | da = exposure_grid_clipped["commercial_content"] 219 | da.name = None 220 | with pytest.raises( 221 | ValueError, 222 | match="DataArray can't be set without a name", 223 | ): 224 | component.set(da) 225 | 226 | # Wrong input data type, nonsense like an integer 227 | with pytest.raises( 228 | TypeError, 229 | match="Wrong input data type: 'int'", 230 | ): 231 | component.set(2) 232 | -------------------------------------------------------------------------------- /tests/components/test_vulnerability_component.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from pathlib import Path 3 | from unittest.mock import MagicMock, PropertyMock 4 | 5 | import pandas as pd 6 | import pytest 7 | from hydromt.model import ModelRoot 8 | 9 | from hydromt_fiat import FIATModel 10 | from hydromt_fiat.components import VulnerabilityComponent 11 | from hydromt_fiat.components.vulnerability import VulnerabilityData 12 | from hydromt_fiat.utils import ( 13 | CURVE, 14 | CURVES, 15 | EXPOSURE_LINK, 16 | VULNERABILITY, 17 | VULNERABILITY_FILE, 18 | ) 19 | 20 | 21 | def test_vulnerability_data_errors(): 22 | # Construct an empty data container 23 | x = VulnerabilityData(pd.DataFrame(), pd.DataFrame()) 24 | 25 | # Error on wrong get 26 | with pytest.raises( 27 | KeyError, 28 | match="Can't get 'foo'", 29 | ): 30 | _ = x["foo"] 31 | 32 | # Error on wrong set 33 | with pytest.raises( 34 | KeyError, 35 | match="Can't set 'foo' in VulnerabilityData class", 36 | ): 37 | x["foo"] = 2 38 | 39 | 40 | def test_vulnerability_component_empty( 41 | mock_model: MagicMock, 42 | ): 43 | # Setup the component 44 | component = VulnerabilityComponent(model=mock_model) 45 | 46 | # Assert the content 47 | assert component._filename == f"{VULNERABILITY}/{CURVES}.csv" 48 | assert component.data.curves.empty 49 | assert isinstance(component.data, VulnerabilityData) 50 | 51 | 52 | def test_vulnerability_component_clear( 53 | mock_model: MagicMock, 54 | vulnerability_curves: pd.DataFrame, 55 | ): 56 | # Setup the component 57 | component = VulnerabilityComponent(model=mock_model) 58 | 59 | # Set data like a dummy 60 | component._data = VulnerabilityData(vulnerability_curves, pd.DataFrame()) 61 | # Assert the current state 62 | assert len(component.data.curves) == 1001 63 | 64 | # Call the clear method 65 | component.clear() 66 | # Assert the state after 67 | assert len(component.data.curves) == 0 68 | 69 | 70 | def test_vulnerability_component_set( 71 | caplog: pytest.LogCaptureFixture, 72 | mock_model: MagicMock, 73 | vulnerability_curves: pd.DataFrame, 74 | ): 75 | caplog.set_level(logging.WARNING) 76 | # Setup the component 77 | component = VulnerabilityComponent(model=mock_model) 78 | # Assert current state 79 | assert component.data.curves.empty 80 | 81 | # Call the method 82 | component.set(vulnerability_curves, name=CURVES) 83 | # Assert the state 84 | assert not component.data.curves.empty 85 | 86 | # Overwrite and trigger warning 87 | component.set(vulnerability_curves, name=CURVES) 88 | # Assert the warning 89 | assert "Replacing vulnerabilty data: curves" in caplog.text 90 | 91 | 92 | def test_vulnerability_component_read( 93 | mock_model_config: MagicMock, 94 | model_data_clipped_path: Path, 95 | ): 96 | type(mock_model_config).root = PropertyMock( 97 | side_effect=lambda: ModelRoot(model_data_clipped_path, mode="r"), 98 | ) 99 | # Setup the component 100 | component = VulnerabilityComponent(model=mock_model_config) 101 | 102 | # Read by calling the data property, thereby falling back on the config entry 103 | # As there is a config entry present 104 | component.data 105 | 106 | # Assert the state 107 | assert not component.data.curves.empty 108 | assert not component.data.identifiers.empty 109 | assert len(component.data.identifiers.columns) == 5 110 | assert not any( 111 | (component.data.identifiers[EXPOSURE_LINK] == component.data.identifiers[CURVE]) 112 | ) 113 | 114 | 115 | def test_vulnerability_component_read_sig( 116 | mock_model_config: MagicMock, 117 | model_data_clipped_path: Path, 118 | ): 119 | type(mock_model_config).root = PropertyMock( 120 | side_effect=lambda: ModelRoot(model_data_clipped_path, mode="r"), 121 | ) 122 | # Setup the component 123 | component = VulnerabilityComponent(model=mock_model_config) 124 | 125 | # Read with argument 126 | component.read(f"{VULNERABILITY}/{CURVES}.csv") 127 | 128 | # Assert the state 129 | assert not component.data.curves.empty 130 | assert not component.data.identifiers.empty 131 | 132 | 133 | def test_vulnerability_component_read_no_id( 134 | caplog: pytest.LogCaptureFixture, 135 | tmp_path: Path, 136 | mock_model_config: MagicMock, 137 | vulnerability_curves_only_path: Path, 138 | ): 139 | caplog.set_level(logging.WARNING) 140 | type(mock_model_config).root = PropertyMock( 141 | side_effect=lambda: ModelRoot(tmp_path, mode="r"), 142 | ) 143 | # Setup the component 144 | component = VulnerabilityComponent(model=mock_model_config) 145 | 146 | # Read with from the tmp dir where no identifiers are present 147 | component.read(f"{CURVES}.csv") 148 | 149 | # Assert the message 150 | assert "Inferring vulnerability identifiers from curves" in caplog.text 151 | 152 | # Assert the state 153 | assert not component.data.curves.empty 154 | assert not component.data.identifiers.empty 155 | assert len(component.data.identifiers.columns) == 4 156 | assert all( 157 | (component.data.identifiers[EXPOSURE_LINK] == component.data.identifiers[CURVE]) 158 | ) 159 | 160 | 161 | def test_vulnerability_component_write( 162 | tmp_path: Path, 163 | mock_model_config: MagicMock, 164 | vulnerability_curves: pd.DataFrame, 165 | ): 166 | # Setup the component 167 | component = VulnerabilityComponent(model=mock_model_config) 168 | 169 | # Set data like a dummy 170 | component.data[CURVES] = vulnerability_curves 171 | 172 | # Write the data 173 | component.write() 174 | 175 | # Assert the output 176 | assert Path(tmp_path, VULNERABILITY, f"{CURVES}.csv").is_file() 177 | # Assert the config 178 | assert component.model.config.get(VULNERABILITY_FILE) == Path( 179 | tmp_path, 180 | VULNERABILITY, 181 | f"{CURVES}.csv", 182 | ) 183 | 184 | 185 | def test_vulnerability_component_write_empty( 186 | caplog: pytest.LogCaptureFixture, 187 | mock_model: MagicMock, 188 | ): 189 | caplog.set_level(logging.INFO) 190 | # Setup the component 191 | component = VulnerabilityComponent(model=mock_model) 192 | 193 | # Write the data 194 | component.write() 195 | 196 | # Assert the logging 197 | assert "No vulnerability curves encountered, skipping.." in caplog.text 198 | 199 | 200 | def test_vulnerability_component_write_sig( 201 | tmp_path: Path, 202 | mock_model_config: MagicMock, 203 | vulnerability_curves: pd.DataFrame, 204 | ): 205 | # Setup the component 206 | component = VulnerabilityComponent(model=mock_model_config) 207 | 208 | # Set data like a dummy 209 | component.data[CURVES] = vulnerability_curves 210 | 211 | # Write the data 212 | component.write("foo.csv", index=False) 213 | 214 | # Assert the output 215 | assert Path(tmp_path, "foo.csv").is_file() 216 | # Assert the config 217 | assert component.model.config.get(VULNERABILITY_FILE) == Path( 218 | tmp_path, 219 | "foo.csv", 220 | ) 221 | 222 | 223 | def test_vulnerability_component_setup(model: FIATModel): 224 | # Setup the component 225 | component = VulnerabilityComponent(model=model) 226 | 227 | # Assert it's empty 228 | assert component.data.curves.empty 229 | 230 | # Setup the vulnerability 231 | component.setup( 232 | vulnerability_fname="jrc_curves", 233 | vulnerability_linking_fname="jrc_curves_link", 234 | continent="europe", 235 | ) 236 | 237 | assert not component.data.curves.empty 238 | assert not component.data.identifiers.empty 239 | -------------------------------------------------------------------------------- /src/hydromt_fiat/components/config.py: -------------------------------------------------------------------------------- 1 | """The config component.""" 2 | 3 | import logging 4 | from pathlib import Path 5 | from typing import Any, cast 6 | 7 | import tomlkit 8 | from hydromt.model import Model 9 | from hydromt.model.components import ModelComponent 10 | from hydromt.model.steps import hydromt_step 11 | from hydromt.readers import read_toml 12 | 13 | from hydromt_fiat.components.utils import get_item, make_config_paths_relative 14 | from hydromt_fiat.utils import SETTINGS 15 | 16 | __all__ = ["ConfigComponent"] 17 | 18 | logger = logging.getLogger(f"hydromt.{__name__}") 19 | 20 | 21 | class ConfigComponent(ModelComponent): 22 | """Config component. 23 | 24 | Container for all the settings of a Delft-FIAT model. 25 | 26 | Parameters 27 | ---------- 28 | model : Model 29 | HydroMT model instance (FIATModel). 30 | filename : str, optional 31 | A path relative to the root where the configuration file will 32 | be read and written if user does not provide a path themselves. 33 | By default 'settings.toml'. 34 | """ 35 | 36 | def __init__( 37 | self, 38 | model: Model, 39 | *, 40 | filename: str = f"{SETTINGS}.toml", 41 | ): 42 | self._data: dict[str, Any] | None = None 43 | self._filename: Path | str = filename 44 | super().__init__( 45 | model, 46 | ) 47 | 48 | ## Private methods 49 | def _initialize(self, skip_read=False) -> None: 50 | """Initialize the model config.""" 51 | if self._data is None: 52 | self._data = {} 53 | if not skip_read and self.root.is_reading_mode(): 54 | self.read() 55 | 56 | ## Properties 57 | @property 58 | def data(self) -> dict[str, Any]: 59 | """Model config values.""" 60 | if self._data is None: 61 | self._initialize() 62 | assert isinstance(self._data, dict) 63 | return self._data 64 | 65 | @property 66 | def dir(self) -> Path: 67 | """The absolute directory of configurations file. 68 | 69 | In most cases this will be equal to the model root directory, however one 70 | can specify a subdirectory for the configuration file, therefore this property 71 | exists. 72 | """ 73 | return Path(self.root.path, self.filename).parent 74 | 75 | @property 76 | def filename(self) -> Path | str: 77 | """Filename of the config file.""" 78 | return self._filename 79 | 80 | @filename.setter 81 | def filename(self, value: Path | str): 82 | self._filename = value 83 | 84 | ## I/O methods 85 | @hydromt_step 86 | def read( 87 | self, 88 | filename: Path | str | None = None, 89 | ) -> None: 90 | """Read the FIAT model config file. 91 | 92 | Parameters 93 | ---------- 94 | filename : Path | str, optional 95 | The path to the model configurations file. This can either be a relative 96 | or absolute path. If not provided, the component attribute `_filename` is 97 | used as a fallback. By default None. 98 | """ 99 | self.root._assert_read_mode() 100 | self._initialize(skip_read=True) 101 | 102 | # Sort the filename 103 | # Hierarchy: 1) signature, 2) default 104 | filename = filename or self.filename 105 | self.filename = filename 106 | read_path = Path(self.root.path, filename) 107 | 108 | # Check for the path 109 | if not read_path.is_file(): 110 | return 111 | 112 | # Read the data (config) 113 | logger.info(f"Reading the config file at {read_path.as_posix()}") 114 | self._data = read_toml(read_path) 115 | 116 | @hydromt_step 117 | def write( 118 | self, 119 | filename: Path | str | None = None, 120 | ) -> None: 121 | """Write the FIAT model config file. 122 | 123 | Parameters 124 | ---------- 125 | filename : Path | str, optional 126 | The path to the model configurations file. This can either be a relative 127 | or absolute path. If not provided, the component attribute `_filename` is 128 | used as a fallback. By default None. 129 | """ 130 | self.root._assert_write_mode() 131 | 132 | # If no data, return 133 | if not self.data: 134 | logger.warning("No data in config component, writing empty file..") 135 | 136 | # Path from signature or internal default 137 | # Hierarchy is 1) signature, 2) default 138 | p = filename or self._filename 139 | 140 | # Set the write path 141 | write_path = Path(self.root.path, p) 142 | 143 | # Solve the pathing in the data 144 | # Extra check for dir_input 145 | parent_dir = write_path.parent 146 | write_data = make_config_paths_relative(self.data, parent_dir) 147 | 148 | # Write the data to the drive. 149 | if not parent_dir.exists(): 150 | parent_dir.mkdir(parents=True) 151 | 152 | # Dump to a file 153 | logger.info(f"Writing the config data to {write_path.as_posix()}") 154 | with open(write_path, "w") as writer: 155 | tomlkit.dump(write_data, writer) 156 | 157 | ## Action methods 158 | def get( 159 | self, 160 | key: str, 161 | fallback: Any | None = None, 162 | abs_path: bool = False, 163 | ) -> Any: 164 | """Get a configurations value. 165 | 166 | Parameters 167 | ---------- 168 | args : tuple | str 169 | Key can given as a string with '.' indicating a new level: ('key1.key2'). 170 | fallback: Any, optional 171 | Fallback value if key not found in config, by default None. 172 | abs_path: bool, optional 173 | If True return the absolute path relative to the configurations directory, 174 | by default False. 175 | 176 | Returns 177 | ------- 178 | value : Any 179 | Dictionary value 180 | """ 181 | parts = key.split(".") 182 | current = dict(self.data) # reads config at first call 183 | value = get_item( 184 | parts, current, root=self.dir, fallback=fallback, abs_path=abs_path 185 | ) 186 | # Return the value 187 | return value 188 | 189 | def set(self, key: str, value: Any) -> None: 190 | """Set an entry in the configurations. 191 | 192 | Parameters 193 | ---------- 194 | key : str 195 | A string with '.' indicating a new level: 'key1.key2' will translate 196 | to {"key1":{"key2": value}}. 197 | value : Any 198 | The value to set the config to. 199 | """ 200 | self._initialize() 201 | if isinstance(value, dict): 202 | for subkey, subvalue in value.items(): 203 | self.set(f"{key}.{subkey}", subvalue) 204 | return 205 | if value is None: # Not allowed in toml files 206 | return 207 | parts = key.split(".") 208 | num_parts = len(parts) 209 | current = cast(dict[str, Any], self._data) 210 | for i, part in enumerate(parts): 211 | if part not in current or not isinstance(current[part], dict): 212 | current[part] = {} 213 | if i < num_parts - 1: 214 | current = current[part] 215 | else: 216 | current[part] = value 217 | 218 | ## Mutating methods 219 | @hydromt_step 220 | def clear(self): 221 | """Clear the config data.""" 222 | self._data = None 223 | self._initialize(skip_read=True) 224 | -------------------------------------------------------------------------------- /src/hydromt_fiat/components/region.py: -------------------------------------------------------------------------------- 1 | """The region components.""" 2 | 3 | from logging import Logger, getLogger 4 | from pathlib import Path 5 | from typing import cast 6 | 7 | import geopandas as gpd 8 | from hydromt.model import Model 9 | from hydromt.model.components.spatial import SpatialModelComponent 10 | from pyproj.crs import CRS 11 | 12 | from hydromt_fiat.utils import REGION 13 | 14 | __all__ = ["RegionComponent"] 15 | 16 | logger: Logger = getLogger(f"hydromt.{__name__}") 17 | 18 | 19 | class RegionComponent(SpatialModelComponent): 20 | """Component for the region. 21 | 22 | Contains a single geometry at most, i.e. the region. 23 | 24 | Parameters 25 | ---------- 26 | model : Model 27 | HydroMT model instance (FIATModel). 28 | filename : str, optional 29 | The path to use for reading and writing of component data by default. 30 | by default "region.geojson" i.e. one file. 31 | """ 32 | 33 | def __init__( 34 | self, 35 | model: Model, 36 | *, 37 | filename: str = f"{REGION}.geojson", 38 | ): 39 | self._data: gpd.GeoDataFrame | None = None 40 | self._filename: str = filename 41 | self._init: bool = False # Prevention of recursion 42 | super().__init__( 43 | model=model, 44 | ) 45 | 46 | ## Private methods 47 | def _initialize(self, skip_read=False) -> None: 48 | """Initialize region.""" 49 | self._init = True 50 | if self.root.is_reading_mode() and not skip_read: 51 | self.read() 52 | 53 | ## Properties 54 | @property 55 | def _region_data(self) -> gpd.GeoDataFrame | None: 56 | # Use the total bounds of all geometries as region 57 | if self.data is None: 58 | return None 59 | return self.data 60 | 61 | @property 62 | def data(self) -> gpd.GeoDataFrame | None: 63 | """Model geometries. 64 | 65 | Return `geopandas.GeoDataFrame`. 66 | """ 67 | if self._data is None and not self._init: 68 | self._initialize() 69 | return self._data 70 | 71 | ## I/O methods 72 | def read(self, filename: str | None = None, **kwargs) -> None: 73 | """Read model region data. 74 | 75 | Parameters 76 | ---------- 77 | filename : str, optional 78 | Filename relative to model root. 79 | If None, the value is taken from the `_filename` attribute, 80 | by default None. 81 | **kwargs : dict 82 | Additional keyword arguments that are passed to the 83 | `geopandas.read_file` function. 84 | """ 85 | self.root._assert_read_mode() 86 | self._initialize(skip_read=True) 87 | 88 | # Sort the pathing 89 | f = filename or self._filename 90 | read_path = self.root.path / f 91 | if not read_path.is_file(): 92 | return 93 | 94 | # Read the data 95 | logger.info(f"Reading the model region file at {read_path.as_posix()}") 96 | data = cast(gpd.GeoDataFrame, gpd.read_file(read_path, **kwargs)) 97 | self.set(data=data) 98 | 99 | def write( 100 | self, 101 | filename: str | None = None, 102 | to_wgs84: bool = False, 103 | **kwargs, 104 | ) -> None: 105 | """Write model region data. 106 | 107 | Parameters 108 | ---------- 109 | filename : str, optional 110 | Filename relative to model root. 111 | If None, the value is taken from the `_filename` attribute, 112 | by default None. 113 | to_wgs84 : bool, optional 114 | If True, the geoms will be reprojected to WGS84(EPSG:4326) 115 | before they are written. By default False. 116 | **kwargs : dict 117 | Additional keyword arguments that are passed to the 118 | `geopandas.to_file` function. 119 | """ 120 | self.root._assert_write_mode() 121 | 122 | # If nothing to write, return 123 | if self.data is None: 124 | logger.info("No region data found, skip writing.") 125 | return 126 | 127 | # Sort the filename 128 | # Hierarchy: 1) signature, 2) default 129 | filename = filename or self._filename 130 | write_path = Path(self.root.path, filename) 131 | 132 | # Write the file 133 | data = self.data 134 | if len(data) == 0: 135 | logger.warning("Region is empty. Skipping...") 136 | return 137 | 138 | logger.info(f"Writing the model region file to {write_path.as_posix()}") 139 | # Create dir if not there 140 | if not write_path.parent.is_dir(): 141 | write_path.parent.mkdir(parents=True, exist_ok=True) 142 | 143 | # Reproject to WGS84 is wantead 144 | if to_wgs84 and ( 145 | kwargs.get("driver") == "GeoJSON" 146 | or str(write_path).lower().endswith(".geojson") 147 | ): 148 | data.to_crs(epsg=4326, inplace=True) 149 | # Write 150 | data.to_file(write_path, **kwargs) 151 | 152 | ## Mutating methods 153 | def clear(self): 154 | """Clear the region.""" 155 | self._data = None 156 | self._initialize(skip_read=True) 157 | 158 | def reproject( 159 | self, 160 | crs: CRS | int | str, 161 | inplace: bool = False, 162 | ) -> gpd.GeoDataFrame | None: 163 | """Reproject the model region. 164 | 165 | Parameters 166 | ---------- 167 | crs : CRS | int | str 168 | The coordinate system to reproject to. 169 | inplace : bool, optional 170 | Whether to do the reprojection in place or return a new GeoDataFrame. 171 | By default False. 172 | """ 173 | # Set the crs 174 | if not isinstance(crs, CRS): 175 | crs = CRS.from_user_input(crs) 176 | 177 | # Check for equal crs 178 | if self.data is None or crs == self.crs: 179 | return None 180 | 181 | # Reproject 182 | data = self.data.to_crs(crs) 183 | 184 | # Check return or inplace 185 | if inplace: 186 | self._data = data 187 | return None 188 | return data 189 | 190 | def set( 191 | self, 192 | data: gpd.GeoDataFrame | gpd.GeoSeries, 193 | replace: bool = False, 194 | ) -> None: 195 | """Set a region. 196 | 197 | If a region is already present, the new region will be merged with in one 198 | already present in a union. 199 | 200 | Parameters 201 | ---------- 202 | data : gpd.GeoDataFrame | gpd.GeoSeries 203 | New geometry data to add. 204 | replace : bool, optional 205 | Whether or not to replace the current region outright. If set to False, 206 | a union is created between the existing and given geometries. 207 | By default False. 208 | """ 209 | if self.data is not None: 210 | logger.warning("Replacing/ updating region") 211 | 212 | if isinstance(data, gpd.GeoSeries): 213 | data = cast(gpd.GeoDataFrame, data.to_frame()) 214 | 215 | # Verify if a geom is set to model crs and if not sets geom to model crs 216 | model_crs = self.crs 217 | if model_crs and model_crs != data.crs: 218 | data.to_crs(model_crs, inplace=True) 219 | 220 | # Get rid of columns that aren't geometry 221 | data = data["geometry"].to_frame() 222 | 223 | # Make a union with the current region geodataframe 224 | cur = self.data 225 | if cur is not None and not data.equals(cur) and not replace: 226 | data = data.union(cur) 227 | 228 | self._data = data 229 | -------------------------------------------------------------------------------- /tests/drivers/test_osm_driver.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import re 3 | from pathlib import Path 4 | 5 | import geopandas as gpd 6 | import pandas as pd 7 | import pytest 8 | from hydromt import DataCatalog 9 | from hydromt.data_catalog.sources import GeoDataFrameSource 10 | from osmnx._errors import InsufficientResponseError 11 | from pytest_mock import MockerFixture 12 | 13 | from hydromt_fiat.drivers import OSMDriver 14 | 15 | 16 | @pytest.mark.parametrize("tag_name", ["building", "highway", "landuse", "amenity"]) 17 | def test_osm_driver_get_osm_data( 18 | caplog: pytest.LogCaptureFixture, 19 | tag_name: str, 20 | build_region: gpd.geodataframe, 21 | osm_data_path: Path, 22 | ): 23 | geom_type = ( 24 | ["LineString", "MultiLineString"] 25 | if tag_name == "highway" 26 | else ["MultiPolygon", "Polygon"] 27 | ) 28 | tag = {tag_name: True} 29 | polygon = build_region.geometry[0] 30 | caplog.set_level(logging.INFO) 31 | 32 | osm_data = OSMDriver.get_osm_data(polygon=polygon, tag=tag, geom_type=geom_type) 33 | assert isinstance(osm_data, gpd.GeoDataFrame) 34 | assert f"Total number of {tag_name} found from OSM:" in caplog.text 35 | assert not osm_data.empty 36 | assert osm_data.columns.to_list() == ["geometry", tag_name] 37 | assert osm_data.intersects(polygon).all() 38 | 39 | 40 | def test_osm_driver_get_osm_data_errors( 41 | caplog: pytest.LogCaptureFixture, 42 | build_region: gpd.GeoDataFrame, 43 | osm_data_path: Path, 44 | ): 45 | geom_type = ["MultiPolygon", "Polygon"] 46 | tag = {"building": True} 47 | with pytest.raises( 48 | TypeError, 49 | match=re.escape("Given geometry is not a (multi)polygon"), 50 | ): 51 | OSMDriver.get_osm_data(build_region, tag=tag, geom_type=geom_type) 52 | 53 | caplog.set_level(logging.ERROR) 54 | tag = {"buildin": True} 55 | with pytest.raises( 56 | InsufficientResponseError, 57 | match="No matching features. Check query location, tags, and log.", 58 | ): 59 | OSMDriver.get_osm_data( 60 | polygon=build_region.geometry[0], tag=tag, geom_type=geom_type 61 | ) 62 | 63 | assert f"No OSM data retrieved with the following tags: {tag}" in caplog.text 64 | 65 | 66 | def test_osm_driver_get_osm_data_empty( 67 | caplog: pytest.LogCaptureFixture, 68 | mocker: MockerFixture, 69 | build_region: gpd.GeoDataFrame, 70 | osm_data_path: Path, 71 | ): 72 | geom_type = ["MultiPolygon", "Polygon"] 73 | tag = {"building": True} 74 | caplog.set_level(logging.WARNING) 75 | mocker.patch( 76 | "hydromt_fiat.drivers.osm_driver.ox.features.features_from_polygon", 77 | returns=gpd.GeoDataFrame(), 78 | ) 79 | osm_data = OSMDriver.get_osm_data( 80 | build_region.geometry[0], tag=tag, geom_type=geom_type 81 | ) 82 | assert not osm_data 83 | assert "No building features found for polygon" in caplog.text 84 | 85 | 86 | def test_osm_driver_read_raise_errors( 87 | build_region: gpd.GeoDataFrame, 88 | osm_data_path: Path, 89 | ): 90 | osm_driver = OSMDriver() 91 | with pytest.raises( 92 | ValueError, match="Cannot use multiple uris for reading OSM data." 93 | ): 94 | osm_driver.read(uris=["uri1", "uri2"], mask=build_region) 95 | 96 | with pytest.raises(ValueError, match="Mask is required to retrieve OSM data"): 97 | osm_driver.read(uris=["building"], mask=None) 98 | 99 | mask = [1, 2, 3, 4] 100 | err_msg = f"Wrong type: {type(mask)} -> should be GeoDataFrame or GeoSeries" 101 | with pytest.raises(TypeError, match=err_msg): 102 | osm_driver.read(uris=["uri"], mask=mask) 103 | 104 | 105 | def test_osm_driver_read( 106 | caplog: pytest.LogCaptureFixture, 107 | mocker: MockerFixture, 108 | build_region: gpd.GeoDataFrame, 109 | osm_data_path: Path, 110 | ): 111 | osm_driver = OSMDriver() 112 | mock_method = mocker.patch.object(OSMDriver, "get_osm_data") 113 | osm_driver.read(uris=["building"], mask=build_region) 114 | mock_method.assert_called_with( 115 | polygon=build_region.geometry[0], tag={"building": True}, geom_type=None 116 | ) 117 | # Test with a mask geodataframe containing two geometries 118 | mask = build_region.copy() 119 | mask = pd.concat([mask, build_region]) 120 | caplog.set_level(logging.WARNING) 121 | osm_driver.read(uris=["building"], mask=mask) 122 | assert ( 123 | "Received multiple geometries for mask, geometries will be dissolved into" 124 | " single geometry." in caplog.text 125 | ) 126 | 127 | 128 | def test_osm_driver_write( 129 | tmp_path: Path, 130 | caplog: pytest.LogCaptureFixture, 131 | build_region: gpd.GeoDataFrame, 132 | osm_data_path: Path, 133 | ): 134 | osm_driver = OSMDriver() 135 | # Test with supported extension 136 | fp = tmp_path / "test_data.fgb" 137 | osm_driver.write(path=fp, gdf=build_region) 138 | assert fp.exists 139 | gdf = gpd.read_file(fp) 140 | assert gdf.equals(build_region) 141 | 142 | # Test with unsupported extension 143 | fp = tmp_path / "test_data.csv" 144 | caplog.set_level(logging.WARNING) 145 | p = osm_driver.write(path=fp, gdf=build_region) 146 | assert "driver osm has no support for extension .csv" in caplog.text 147 | assert Path(p).suffix == ".fgb" 148 | assert Path(p).exists 149 | 150 | 151 | def test_osm_driver_datacatalog( 152 | tmp_path: Path, 153 | build_region: gpd.GeoDataFrame, 154 | build_data_catalog_path: Path, 155 | osm_data_path: Path, 156 | ): 157 | dc = DataCatalog(build_data_catalog_path) 158 | # Create data catalog source for osm data and add to data catalog 159 | osm_source = GeoDataFrameSource( 160 | name="osm_buildings", uri="building", driver="osm", uri_resolver="osm_resolver" 161 | ) 162 | dc.add_source(name="osm_buildings", source=osm_source) 163 | assert osm_source == dc.get_source("osm_buildings") 164 | 165 | # Read osm data from data catalog 166 | building_data = dc.get_geodataframe("osm_buildings", geom=build_region) 167 | assert isinstance(building_data, gpd.GeoDataFrame) 168 | 169 | # Write datacatalog source to file 170 | fp = tmp_path / "test_data.fgb" 171 | osm_building_source = dc.get_source("osm_buildings") 172 | osm_building_source.to_file(file_path=fp, mask=build_region) 173 | assert fp.exists 174 | gdf = gpd.read_file(fp) 175 | assert gdf.columns.to_list() == ["building", "geometry"] 176 | 177 | 178 | def test_osm_driver_datacatalog_yml_entry( 179 | build_region: gpd.GeoDataFrame, 180 | build_data_catalog_path: Path, 181 | osm_data_path: Path, 182 | ): 183 | dc = DataCatalog(build_data_catalog_path) 184 | # Add datacatalog source as dict 185 | data_source_dict = { 186 | "osm_roads": { 187 | "uri": "highway", 188 | "data_type": "GeoDataFrame", 189 | "uri_resolver": "osm_resolver", 190 | "driver": { 191 | "name": "osm", 192 | "options": { 193 | "geom_type": ["LineString", "MultiLineString"], 194 | "tags": ["motorway", "primary", "secondary", "tertiary"], 195 | }, 196 | }, 197 | }, 198 | } 199 | dc = dc.from_dict(data_source_dict) 200 | source = dc.get_source("osm_roads") 201 | osm_roads_data = source.read_data(mask=build_region) 202 | assert not osm_roads_data.empty 203 | assert all( 204 | [ 205 | road_type in ["motorway", "primary", "secondary", "tertiary"] 206 | for road_type in osm_roads_data["highway"].unique() 207 | ] 208 | ) 209 | assert all( 210 | [ 211 | geom_type in ["LineString", "MultiLineString"] 212 | for geom_type in osm_roads_data.geometry.type.unique() 213 | ] 214 | ) 215 | --------------------------------------------------------------------------------