├── .testdata └── .gitkeep ├── docs ├── .nojekyll ├── api │ └── .gitkeep ├── images │ └── .gitkeep ├── examples │ ├── example_output │ │ └── .gitkeep │ ├── index.qmd │ ├── clear_notebooks.py │ └── single_event.ipynb ├── _static │ ├── images │ │ ├── FIATScheme.png │ │ ├── deltares-blue.svg │ │ └── deltares-white.svg │ ├── deltares_link.html │ ├── styles.css │ ├── theme-light.scss │ ├── listing_code.html │ ├── theme-dark.scss │ ├── listing_code_medium.html │ ├── listing_code_large.html │ ├── fiat.svg │ └── version.js ├── .gitignore ├── 404.qmd ├── _scripts │ ├── add_packages.ps1 │ ├── add_packages.sh │ └── inv.py ├── user_guide │ ├── data │ │ ├── exposure │ │ │ ├── csv.qmd │ │ │ └── geometries.qmd │ │ ├── index.qmd │ │ ├── exposure.qmd │ │ ├── hazard.qmd │ │ ├── supported.qmd │ │ ├── hazard_maps.qmd │ │ └── vulnerability.qmd │ ├── settings │ │ ├── computation.qmd │ │ └── index.qmd │ └── index.qmd ├── developments │ ├── index.qmd │ └── guide.qmd ├── setup_guide │ ├── general │ │ ├── conda.qmd │ │ └── docker.qmd │ ├── kernel │ │ ├── dev.qmd │ │ ├── install.qmd │ │ └── build.qmd │ └── index.qmd ├── .archive │ ├── active.js │ ├── populate.js │ └── switcher.js ├── info │ ├── models.qmd │ └── faq.qmd └── index.qmd ├── test ├── __init__.py ├── test_log.py ├── test_config.py ├── test_io.py ├── test_model.py ├── test_logic.py ├── test_job.py ├── test_struct.py ├── test_cli.py ├── test_run.py ├── test_checks.py ├── conftest.py └── test_gis.py ├── src └── fiat │ ├── cli │ ├── __init__.py │ ├── action.py │ ├── util.py │ └── formatter.py │ ├── methods │ ├── __init__.py │ ├── util.py │ ├── ead.py │ └── flood.py │ ├── gis │ ├── __init__.py │ ├── util.py │ ├── grid.py │ └── geom.py │ ├── version.py │ ├── models │ ├── __init__.py │ └── util.py │ ├── __init__.py │ ├── error.py │ └── job.py ├── MANIFEST.in ├── .archive ├── ui_fiat │ ├── ui_fiat.rc │ ├── ui_fiat.qrc │ ├── ui_fiat.ico │ ├── ui_fiat.cpp │ ├── main.cpp │ ├── ui_fiat.h │ ├── ui_fiat.vcxproj.filters │ ├── ui_fiat.ui │ └── ui_fiat.vcxproj.user ├── delft_fiat.exe.manifest ├── bin.manifest └── ui_fiat.sln ├── .build ├── check_fiat.bat ├── activate.sh ├── activate.bat ├── win64_d.bat ├── setup_gdal.sh ├── win64.bat ├── runtime_hooks.py ├── linux64_build_env.sh ├── win64_d.spec ├── hook-fiat.py ├── hook-osgeo.py ├── build.spec ├── linux64.sh └── fiat_installer.iss ├── .gitattributes ├── .github ├── ISSUE_TEMPLATE │ ├── config.yml │ ├── feature_request.yaml │ ├── documentation.yaml │ └── bugs.yaml ├── workflows │ ├── linting.yml │ ├── purge_cache.yml │ ├── sonar.yml │ ├── pre-commit.yml │ ├── lock.yml │ ├── version.yml │ ├── docker.yml │ ├── build.yml │ ├── test.yml │ ├── docs.yml │ └── publish.yml ├── dependabot.yml └── pull_request_template.md ├── sonar-project.properties ├── .pre-commit-config.yaml ├── Dockerfile ├── LICENSE ├── README.md ├── .gitignore ├── SETUP.md ├── res ├── logo.svg └── fiat.svg └── make_env.py /.testdata/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/.nojekyll: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/api/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /test/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/images/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/examples/example_output/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/fiat/cli/__init__.py: -------------------------------------------------------------------------------- 1 | """Empty file for cli.""" 2 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | prune src/*.egg-info 2 | recursive-exclude test * 3 | -------------------------------------------------------------------------------- /.archive/ui_fiat/ui_fiat.rc: -------------------------------------------------------------------------------- 1 | IDI_ICON1 ICON DISCARDABLE "ui_fiat.ico" 2 | -------------------------------------------------------------------------------- /.build/check_fiat.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | 3 | call "..\bin\Release\fiat.exe" 4 | 5 | pause 6 | -------------------------------------------------------------------------------- /.archive/ui_fiat/ui_fiat.qrc: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | -------------------------------------------------------------------------------- /.archive/ui_fiat/ui_fiat.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Deltares/Delft-FIAT/master/.archive/ui_fiat/ui_fiat.ico -------------------------------------------------------------------------------- /.build/activate.sh: -------------------------------------------------------------------------------- 1 | export INCLUDE="${CONDA_PREFIX}/include:$INCLUDE" 2 | export LIB="${CONDA_PREFIX}/lib:$LIB" 3 | -------------------------------------------------------------------------------- /docs/_static/images/FIATScheme.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Deltares/Delft-FIAT/master/docs/_static/images/FIATScheme.png -------------------------------------------------------------------------------- /src/fiat/methods/__init__.py: -------------------------------------------------------------------------------- 1 | """Logic submule of FIAT.""" 2 | 3 | __all__ = ["ead", "flood"] 4 | 5 | from . import ead, flood 6 | -------------------------------------------------------------------------------- /.build/activate.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | set INCLUDE=%CONDA_PREFIX%\Library\include;%INCLUDE% 3 | set LIB=%CONDA_PREFIX%\Library\lib;%LIB% 4 | -------------------------------------------------------------------------------- /src/fiat/gis/__init__.py: -------------------------------------------------------------------------------- 1 | """GIS module of FIAT.""" 2 | 3 | __all__ = ["geom", "grid", "overlay"] 4 | 5 | from . import geom, grid, overlay 6 | -------------------------------------------------------------------------------- /.build/win64_d.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | 3 | call activate fiat_build 4 | pyinstaller "win64_d.spec" --distpath ../bin --workpath ../bin/intermediates 5 | 6 | pause 7 | -------------------------------------------------------------------------------- /src/fiat/methods/util.py: -------------------------------------------------------------------------------- 1 | """Calculation utility.""" 2 | 3 | from fiat.util import mean 4 | 5 | AREA_METHODS = { 6 | "max": max, 7 | "mean": mean, 8 | } 9 | -------------------------------------------------------------------------------- /docs/.gitignore: -------------------------------------------------------------------------------- 1 | /.quarto/ 2 | /_extensions/ 3 | /_freeze/ 4 | /_inv/ 5 | /_site/ 6 | /api/* 7 | !api/.gitkeep 8 | *.jupyter_cache/ 9 | objects.json 10 | 11 | **/*.quarto_ipynb 12 | -------------------------------------------------------------------------------- /src/fiat/version.py: -------------------------------------------------------------------------------- 1 | """FIAT version.""" 2 | 3 | main = 0 4 | major = 5 5 | minor = 0 6 | build = 0 7 | suffix = ".dev" 8 | 9 | __version__ = f"{main}.{major}.{minor}{suffix}" 10 | -------------------------------------------------------------------------------- /.archive/ui_fiat/ui_fiat.cpp: -------------------------------------------------------------------------------- 1 | #include "ui_fiat.h" 2 | 3 | ui_fiat::ui_fiat(QWidget *parent) 4 | : QMainWindow(parent) 5 | { 6 | ui.setupUi(this); 7 | } 8 | 9 | ui_fiat::~ui_fiat() 10 | {} 11 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | * text eol=lf 2 | *.gpkg binary 3 | *.yaml text eol=lf 4 | *.yml text eol=lf 5 | *.png binary 6 | *.nc binary 7 | 8 | # GitHub syntax highlighting 9 | pixi.lock linguist-language=YAML 10 | -------------------------------------------------------------------------------- /docs/404.qmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: Page Not Found 3 | --- 4 | 5 | The page you requested cannot be found (perhaps it was moved or renamed). 6 | 7 | You may want to try searching to find the page's new location. 8 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | contact_links: 3 | - name: Ask a question 4 | url: https://github.com/Deltares/Delft-FIAT/discussions 5 | about: Ask questions and discuss with other community members 6 | -------------------------------------------------------------------------------- /.archive/ui_fiat/main.cpp: -------------------------------------------------------------------------------- 1 | #include "ui_fiat.h" 2 | #include 3 | 4 | int main(int argc, char *argv[]) 5 | { 6 | QApplication a(argc, argv); 7 | ui_fiat w; 8 | w.show(); 9 | return a.exec(); 10 | } 11 | -------------------------------------------------------------------------------- /src/fiat/models/__init__.py: -------------------------------------------------------------------------------- 1 | """Entry point for models.""" 2 | 3 | __all__ = ["GeomModel", "GridModel", "worker_geom", "worker_grid"] 4 | 5 | from . import worker_geom, worker_grid 6 | from .geom import GeomModel 7 | from .grid import GridModel 8 | -------------------------------------------------------------------------------- /.build/setup_gdal.sh: -------------------------------------------------------------------------------- 1 | # A small script to unsure a reasonably recent gdal on the linux machine 2 | sudo add-apt-repository ppa:ubuntugis/ubuntugis-unstable -y 3 | sudo apt update 4 | sudo apt -y install gdal-bin 5 | sudo apt-get -y install libgdal-dev 6 | -------------------------------------------------------------------------------- /docs/examples/index.qmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Examples" 3 | --- 4 | 5 | This part of the documentation is under development. In the future, it will show more examples of how to use FIAT. Until that, this minimal example is available: 6 | 7 | - [Single event example](single_event.ipynb) 8 | -------------------------------------------------------------------------------- /.build/win64.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | 3 | REM set the current directory of the batch file 4 | set CUR_DIR=%~dp0 5 | 6 | REM Execute building 7 | pixi run -e build-win pyinstaller "%CUR_DIR%/build.spec" --distpath %CUR_DIR%../bin --workpath %CUR_DIR%../bin/intermediates 8 | 9 | pause 10 | -------------------------------------------------------------------------------- /.archive/ui_fiat/ui_fiat.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include "ui_ui_fiat.h" 5 | 6 | class ui_fiat : public QMainWindow 7 | { 8 | Q_OBJECT 9 | 10 | public: 11 | ui_fiat(QWidget *parent = nullptr); 12 | ~ui_fiat(); 13 | 14 | private: 15 | Ui::ui_fiatClass ui; 16 | }; 17 | -------------------------------------------------------------------------------- /docs/_static/deltares_link.html: -------------------------------------------------------------------------------- 1 | 8 | -------------------------------------------------------------------------------- /.github/workflows/linting.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Linting 3 | 4 | on: 5 | pull_request: 6 | push: 7 | branches: 8 | - master 9 | 10 | jobs: 11 | pre-commit: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v4 15 | - uses: actions/setup-python@v5 16 | - uses: pre-commit/action@v3.0.0 17 | -------------------------------------------------------------------------------- /sonar-project.properties: -------------------------------------------------------------------------------- 1 | # Must be unique 2 | sonar.projectKey=Deltares_Delft-FIAT 3 | 4 | # Further Sonar settings 5 | sonar.organization=deltares 6 | sonar.projectName=Delft_FIAT 7 | sonar.projectVersion=0.1.0rc2 8 | 9 | sonar.sources=src/fiat/ 10 | sonar.tests=test/ 11 | 12 | sonar.python.coverage.reportPaths=coverage.xml 13 | 14 | sonar.sourceEncoding=UTF-8 15 | -------------------------------------------------------------------------------- /.archive/delft_fiat.exe.manifest: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /.archive/bin.manifest: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/_scripts/add_packages.ps1: -------------------------------------------------------------------------------- 1 | # Define your list of packages 2 | $packages = @('machow/quartodoc', 'pandoc-ext/section-bibliographies', 'quarto-ext/fontawesome', 'quarto-ext/include-code-files') 3 | 4 | # Iterate over the packages 5 | foreach ($package in $packages) { 6 | # Start the process 7 | $process = Start-Process quarto -ArgumentList "add", $package -PassThru -Wait -NoNewWindow 8 | } 9 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: 2 3 | updates: 4 | # Since the conda deps are also built from pyproject.toml it should work well enough 5 | - package-ecosystem: "pip" 6 | directory: "/" # Location of package manifests 7 | schedule: 8 | interval: "weekly" 9 | - package-ecosystem: "github-actions" 10 | directory: "/" 11 | schedule: 12 | # Check for updates to GitHub Actions every week 13 | interval: "weekly" 14 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | ## Issue addressed 2 | Fixes # 3 | 4 | ## Explanation 5 | Explain how you addressed the bug/feature request, what choices you made and why. 6 | 7 | ## Checklist 8 | - [ ] Updated tests or added new tests 9 | - [ ] Branch is up to date with `master` 10 | - [ ] Tests & pre-commit hooks pass 11 | - [ ] Updated documentation if needed 12 | 13 | ## Additional Notes (optional) 14 | Add any additional notes or information that may be helpful. 15 | -------------------------------------------------------------------------------- /docs/_static/styles.css: -------------------------------------------------------------------------------- 1 | /* css styles */ 2 | 3 | .only-on-dark { 4 | display: none; 5 | } 6 | 7 | @media (prefers-color-scheme: dark) { 8 | .only-on-dark { 9 | display: block; 10 | } 11 | 12 | .only-on-light { 13 | display: none; 14 | } 15 | } 16 | 17 | [aria-label="Deltares-Image"] { 18 | height: 30px; 19 | } 20 | 21 | .navbar-right .fa-github { 22 | font-size: 40px; /* Adjust the value as needed */ 23 | } 24 | 25 | .active-item { 26 | font-weight: bold; 27 | text-decoration: underline; 28 | } 29 | -------------------------------------------------------------------------------- /docs/_scripts/add_packages.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/expect -f 2 | 3 | # Define your list of packages 4 | set packages {machow/quartodoc pandoc-ext/section-bibliographies quarto-ext/fontawesome quarto-ext/include-code-files} 5 | 6 | # Iterate over the packages 7 | foreach package $packages { 8 | spawn quarto add $package 9 | expect "Do you trust the authors of this extension" 10 | send -- "y\r" 11 | expect "Would you like to continue" 12 | send -- "y\r" 13 | expect "View documentation using default browser" 14 | send -- "n\r" 15 | expect eof 16 | } 17 | -------------------------------------------------------------------------------- /.github/workflows/purge_cache.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Purge Cache 3 | on: 4 | workflow_dispatch: 5 | workflow_call: 6 | 7 | jobs: 8 | cleanup: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: Check out code 12 | uses: actions/checkout@v4 13 | 14 | - name: Cleanup caches 15 | run: | 16 | if gh cache list | grep -vq "No cache"; then 17 | gh cache delete -a 18 | else 19 | echo "No caches found. skipping..." 20 | fi 21 | env: 22 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 23 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | repos: 3 | - repo: https://github.com/pre-commit/pre-commit-hooks 4 | rev: v5.0.0 5 | hooks: 6 | - id: trailing-whitespace 7 | - id: end-of-file-fixer 8 | - id: check-yaml 9 | - id: check-added-large-files 10 | - id: check-ast 11 | - id: check-json 12 | - id: debug-statements 13 | - id: mixed-line-ending 14 | - repo: https://github.com/astral-sh/ruff-pre-commit 15 | rev: v0.6.9 16 | hooks: 17 | - id: ruff 18 | args: [--fix, --exit-non-zero-on-fix] 19 | - id: ruff-format 20 | -------------------------------------------------------------------------------- /docs/examples/clear_notebooks.py: -------------------------------------------------------------------------------- 1 | """Clear Jupyter output.""" 2 | 3 | import subprocess 4 | from pathlib import Path 5 | 6 | if __name__ == "__main__": 7 | p = Path(__file__).parent 8 | 9 | # Loop through notebooks 10 | for nb in p.glob("*ipynb"): 11 | _file = Path(nb) 12 | 13 | # Execute jupyter clear 14 | subprocess.call( 15 | [ 16 | "jupyter", 17 | "nbconvert", 18 | "--clear-output", 19 | "--inplace", 20 | _file.as_posix(), 21 | ] 22 | ) 23 | -------------------------------------------------------------------------------- /src/fiat/__init__.py: -------------------------------------------------------------------------------- 1 | """FIAT.""" 2 | 3 | ################################################## 4 | # Organisation: Deltares 5 | ################################################## 6 | # Author: B.W. Dalmijn 7 | # E-mail: brencodeert@outlook.com 8 | ################################################## 9 | # License: MIT license 10 | # 11 | # 12 | # 13 | # 14 | ################################################## 15 | from osgeo import osr 16 | 17 | osr.UseExceptions() 18 | 19 | from .cfg import Configurations 20 | from .fio import open_csv, open_geom, open_grid 21 | from .models import GeomModel, GridModel 22 | from .version import __version__ 23 | -------------------------------------------------------------------------------- /.build/runtime_hooks.py: -------------------------------------------------------------------------------- 1 | """Runtime hooks for pyinstaller.""" 2 | 3 | import os 4 | import sys 5 | from pathlib import Path 6 | 7 | # Path to executable 8 | cwd = Path(sys.executable).parent 9 | 10 | # Paths to libaries/ data 11 | os.environ["GDAL_DATA"] = str(Path(cwd, "bin", "share", "gdal")) 12 | os.environ["GDAL_DRIVER_PATH"] = str(Path(cwd, "bin", "gdalplugins")) 13 | # Newer versions of GDAL and PROJ 14 | os.environ["PROJ_DATA"] = str(Path(cwd, "bin", "share", "proj")) 15 | # Older versions of GDAL and PROJ 16 | os.environ["PROJ_LIB"] = str(Path(cwd, "bin", "share", "proj")) 17 | # Append to path 18 | sys.path.append(str(Path(cwd, "bin", "share"))) 19 | -------------------------------------------------------------------------------- /.archive/ui_fiat/ui_fiat.vcxproj.filters: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | -------------------------------------------------------------------------------- /src/fiat/error.py: -------------------------------------------------------------------------------- 1 | """Custom errors for FIAT.""" 2 | 3 | 4 | class DriverNotFoundError(Exception): 5 | """Custom driver not found class.""" 6 | 7 | def __init__(self, gog, path): 8 | self.base = f"{gog} data" 9 | self.msg = f"Extension of file: {path.name} not recoqnized" 10 | super(DriverNotFoundError, self).__init__(f"{self.base} -> {self.msg}") 11 | 12 | def __str__(self): 13 | return f"{self.base} -> {self.msg}" 14 | 15 | 16 | class FIATDataError(Exception): 17 | """Custom FIAT error class.""" 18 | 19 | def __init__(self, msg): 20 | self.base = "Data error" 21 | self.msg = msg 22 | 23 | def __str__(self): 24 | return f"{self.base} -> {self.msg}" 25 | -------------------------------------------------------------------------------- /docs/_static/theme-light.scss: -------------------------------------------------------------------------------- 1 | /*-- scss:defaults --*/ 2 | 3 | $background: #212529; 4 | $highlight: #292e33; 5 | $highlight: #373a3c; 6 | $links: #080c80; 7 | $text: #ffffff; 8 | 9 | // $body-bg: $background; 10 | // $body-color: $text; 11 | $link-color: $links; 12 | $navbar-hl: $links ; 13 | // $navbar-bg: $highlight; 14 | 15 | .nav-tabs .nav-link, 16 | .nav-tabs .nav-link.active, 17 | .nav-tabs .nav-item.show .nav-link { 18 | border: 1px solid rgb(222, 226, 230) 19 | } 20 | 21 | .nav-tabs .nav-link:hover { 22 | background-color: $highlight; 23 | border-color: $links; 24 | border: 1px solid $links; 25 | } 26 | 27 | .footer-image-dark { 28 | display: none; 29 | } 30 | .footer-image-light { 31 | display: inline; 32 | } 33 | -------------------------------------------------------------------------------- /docs/user_guide/data/exposure/csv.qmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: CSV 3 | --- 4 | 5 | The exposure data CSV file contains information about each asset in the area of interest that is optional for the damage calculation. Each row represents one asset, such as a building, road segment, or utility, and each column represents an attribute of the asset, such as its location, elevation or maximum potential damage value. 6 | For users that want to create their own exposure data, or modify existing exposure data, a description of the default fields (columns) in the exposure data CSV can be found [here](./data.qmd). 7 | 8 | ::: {.callout-tip title="Tip"} 9 | Users can add as many columns to the exposure CSV as they want, for example, to later identify the most damaged administrative areas. All data field will also be saved in the output of FIAT. 10 | ::: 11 | -------------------------------------------------------------------------------- /.github/workflows/sonar.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Code Analysis 3 | on: 4 | push: 5 | branches: 6 | - master 7 | paths: 8 | - src/fiat/* 9 | - test/* 10 | pull_request: 11 | paths: 12 | - src/fiat/* 13 | - test/* 14 | types: 15 | - opened 16 | - synchronize 17 | - reopened 18 | 19 | jobs: 20 | sonarcloud: 21 | env: 22 | PYVERSION: "3.13" 23 | 24 | name: SonarCloud 25 | runs-on: ubuntu-latest 26 | 27 | steps: 28 | - name: Checkout code 29 | uses: actions/checkout@v4 30 | with: 31 | fetch-depth: 0 32 | 33 | - name: SonarCloud Scan 34 | uses: SonarSource/sonarcloud-github-action@master 35 | env: 36 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 37 | SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} 38 | -------------------------------------------------------------------------------- /docs/developments/index.qmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Developments" 3 | --- 4 | 5 | Welcome to Delft-FIAT. Here we present an overview of what's going on. 6 | 7 | ## Roadmap 8 | What are we up to and what are the goals. 9 | 10 | ### Q2/ Q3 2025 11 | - Further stabilize the api and cli 12 | - Improve the grid model performance 13 | - Probably port sections to Cython 14 | 15 | ## What's new... 16 | In order to see what has been done and what has been changed, please see the [changelog](../changelog.qmd). 17 | 18 | ## Contribute? 19 | We welcome contributions like bug reports, bug fixes, documentation improvements, enhancements, and ideas. 20 | Our [developer guide](guide.qmd) explains how one can contribute to Delft-FIAT! 21 | 22 | ## Licencing 23 | The [MIT license](https://github.com/Deltares/Delft-FIAT/blob/master/LICENSE) applies to all contributions. 24 | -------------------------------------------------------------------------------- /docs/setup_guide/general/conda.qmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Setup Miniforge3" 3 | --- 4 | 5 | In order to develop on **FIAT** locally, the Python package manager **Miniforge3** is recommended. 6 | 7 | Download and install [Miniforge3](https://github.com/conda-forge/miniforge#mambaforge) 8 | 9 | Make sure the conda binary (and mamba) is added to PATH. In windows this is simply done via the 'set environment variables' screen, on linux one can append the 'PATH' variable via the `.bashrc` configurations file (or another rc file corresponding with the shell in use). 10 | Initialize conda by running the following command in your shell. 11 | 12 | ```bash 13 | conda init 14 | ``` 15 | 16 | Depending on your computer settings, you might also have to run the following in a Powershell terminal as administrator: 17 | 18 | ```bash 19 | Set-ExecutionPolicy -ExecutionPolicy RemoteSigned 20 | ``` 21 | -------------------------------------------------------------------------------- /.archive/ui_fiat/ui_fiat.ui: -------------------------------------------------------------------------------- 1 | 2 | ui_fiatClass 3 | 4 | 5 | ui_fiatClass 6 | 7 | 8 | 9 | 0 10 | 0 11 | 600 12 | 400 13 | 14 | 15 | 16 | ui_fiat 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /.github/workflows/pre-commit.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Pre-commit auto update 3 | on: 4 | schedule: 5 | # At 03:00 on day 3 of the month 6 | - cron: "0 3 3 * *" 7 | # on demand 8 | workflow_dispatch: 9 | jobs: 10 | auto-update: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v4 14 | with: 15 | ssh-key: ${{ secrets.SSH_PRIVATE_KEY }} 16 | - uses: actions/setup-python@v5 17 | with: 18 | python-version: "3.12" 19 | - uses: browniebroke/pre-commit-autoupdate-action@main 20 | - uses: peter-evans/create-pull-request@v7 21 | if: always() 22 | with: 23 | token: ${{ secrets.GITHUB_TOKEN }} 24 | branch: update/pre-commit-hooks 25 | title: Update pre-commit hooks 26 | commit-message: "Update pre-commit hooks" 27 | body: Update versions of pre-commit hooks to latest version. 28 | author: "GitHub " 29 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:bookworm-slim AS base 2 | ARG PIXIENV 3 | ARG UID=1000 4 | RUN apt-get update && apt-get install -y curl && apt-get install -y vim && apt-get install -y binutils 5 | 6 | RUN useradd deltares 7 | RUN usermod -u ${UID} deltares 8 | USER deltares 9 | WORKDIR /home/deltares 10 | 11 | RUN curl -fsSL https://pixi.sh/install.sh | bash 12 | ENV PATH=/home/deltares/.pixi/bin:$PATH 13 | COPY pixi.lock pyproject.toml README.md ./ 14 | COPY --chown=deltares:deltares src/fiat ./src/fiat 15 | 16 | RUN chmod u+x src/ \ 17 | && pixi install -e ${PIXIENV} \ 18 | && rm -rf .cache \ 19 | && find .pixi -type f -name "*.pyc" -delete 20 | 21 | # Workaround: write a file that runs pixi with correct environment. 22 | # This is needed because the argument is not passed to the entrypoint. 23 | ENV RUNENV="${PIXIENV}" 24 | RUN echo "pixi run --locked -e ${RUNENV} \$@" > run_pixi.sh \ 25 | && chown deltares:deltares run_pixi.sh \ 26 | && chmod u+x run_pixi.sh 27 | ENTRYPOINT ["bash", "run_pixi.sh"] 28 | CMD ["fiat"] 29 | -------------------------------------------------------------------------------- /docs/setup_guide/kernel/dev.qmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Development install" 3 | --- 4 | 5 | This is for those who wish to contribute to the development of FIAT. 6 | 7 | - First, clone the FIAT repository on Github into a local directory of choice: 8 | 9 | ```bash 10 | cd ~/{your path} 11 | 12 | git clone https://github.com/Deltares/Delft-FIAT.git fiat 13 | ``` 14 | 15 | - Create a new development environment. Make sure you either have tomli or tomllib (build-in with Python 3.11) in your base enviroment. Go into your cloned FIAT repository folder and create the environment file by running the *make_env.py* script: 16 | 17 | ```bash 18 | cd ~/{your path}/fiat 19 | 20 | python make_env.py dev 21 | ``` 22 | 23 | - Then, create and activate the new environment in conda: 24 | 25 | ```bash 26 | conda env create -f environment.yml 27 | 28 | conda activate fiat_dev 29 | ``` 30 | 31 | - To install all the required dependencies, run: 32 | 33 | ```bash 34 | pip install -e . 35 | ``` 36 | 37 | There you go. FIAT is now installed on your local machine for development purposes. 38 | -------------------------------------------------------------------------------- /docs/setup_guide/index.qmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Setup guide" 3 | --- 4 | 5 | ::: column-page-inset-right 6 | {{< include /_static/listing_code.html >}} 7 | ```{=html} 8 | 9 | 26 | 27 | ``` 28 | ::: 29 | 30 | ## Overview 31 | FIAT requires Python 3.10 or greater, a [package manager](general/conda.qmd), and Git to be installed. After that, FIAT can be [installed](kernel/install.qmd) for use or for [development](kernel/dev.qmd), or it can be [built](kernel/build.qmd) as an application/executable. 32 | -------------------------------------------------------------------------------- /docs/.archive/active.js: -------------------------------------------------------------------------------- 1 | window.onload = function() { 2 | // Select corrent dropdown menu 3 | var dropdownMenu = document.querySelector('#nav-menu-select-version').nextElementSibling; 4 | 5 | // Get all dropdown items within the specific dropdown menu 6 | var dropdownItems = dropdownMenu.querySelectorAll('.dropdown-item'); 7 | 8 | // Get the current page in chunks 9 | var currentPagePath = window.location.pathname.split('/'); 10 | 11 | for (var i = 0; i < dropdownItems.length; i++) { 12 | // Get textcontent 13 | var textContent = dropdownItems[i].querySelector('.dropdown-text').textContent; 14 | 15 | // Get the index of the current version 16 | var index = currentPagePath.indexOf(textContent); 17 | 18 | if (index !== -1) { 19 | // Remove the active-item class from all items 20 | for (var j = 0; j < dropdownItems.length; j++) { 21 | dropdownItems[j].classList.remove('active-item'); 22 | } 23 | 24 | dropdownItems[i].classList.add('active-item'); 25 | break 26 | } 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Deltares 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /.archive/ui_fiat.sln: -------------------------------------------------------------------------------- 1 | 2 | Microsoft Visual Studio Solution File, Format Version 12.00 3 | # Visual Studio Version 17 4 | VisualStudioVersion = 17.4.33213.308 5 | MinimumVisualStudioVersion = 10.0.40219.1 6 | Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "ui_fiat", "ui_fiat\ui_fiat.vcxproj", "{14B9FD82-FD06-4344-A34F-FF90D3557B27}" 7 | EndProject 8 | Global 9 | GlobalSection(SolutionConfigurationPlatforms) = preSolution 10 | Debug|x64 = Debug|x64 11 | Release|x64 = Release|x64 12 | EndGlobalSection 13 | GlobalSection(ProjectConfigurationPlatforms) = postSolution 14 | {14B9FD82-FD06-4344-A34F-FF90D3557B27}.Debug|x64.ActiveCfg = Debug|x64 15 | {14B9FD82-FD06-4344-A34F-FF90D3557B27}.Debug|x64.Build.0 = Debug|x64 16 | {14B9FD82-FD06-4344-A34F-FF90D3557B27}.Release|x64.ActiveCfg = Release|x64 17 | {14B9FD82-FD06-4344-A34F-FF90D3557B27}.Release|x64.Build.0 = Release|x64 18 | EndGlobalSection 19 | GlobalSection(SolutionProperties) = preSolution 20 | HideSolutionNode = FALSE 21 | EndGlobalSection 22 | GlobalSection(ExtensibilityGlobals) = postSolution 23 | SolutionGuid = {D4A36270-3FD5-4077-B3BB-668C9BCC89E7} 24 | EndGlobalSection 25 | EndGlobal 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Delft-FIAT: Fast Impact Assessment Tool 2 | [![PyPI Latest Release](https://img.shields.io/pypi/v/delft_fiat.svg)](https://pypi.org/project/Delft-FIAT/) [![Conda Version](https://img.shields.io/conda/v/conda-forge/delft_fiat)](https://anaconda.org/conda-forge/delft_fiat) [![Coverage](https://codecov.io/github/Deltares/Delft-FIAT/coverage.svg?branch=master)](https://codecov.io/github/Deltares/Delft-FIAT) ![GitHub Actions Workflow Status](https://img.shields.io/github/actions/workflow/status/Deltares/Delft-FIAT/build.yml) 3 | 4 | Combined assessment for spatial (raster) and object (vector). 5 | 6 | A python package for quickly estimating damages as a result of hazards. 7 | This can be done for both vector based data and raster based data. 8 | 9 | ## Information 10 | For more information, please visit the [documentation](https://deltares.github.io/Delft-FIAT/). 11 | 12 | ## Usage 13 | Although the package is called `Delft-FIAT`, the package name in Python is `fiat`. 14 | 15 | ``` 16 | # A minimalistic example 17 | import fiat 18 | from fiat.fio import * 19 | ``` 20 | 21 | ## License 22 | [MIT](https://github.com/Deltares/Delft-FIAT/blob/master/LICENSE) 23 | -------------------------------------------------------------------------------- /test/test_log.py: -------------------------------------------------------------------------------- 1 | import io 2 | from pathlib import Path 3 | 4 | from fiat.log import CHandler, Logger, MessageFormatter, spawn_logger 5 | 6 | 7 | def test_stream(log1, log2): 8 | stream = io.StringIO() 9 | sl = CHandler(stream=stream) 10 | sl.set_formatter(MessageFormatter("{message}")) 11 | 12 | sl.emit(log1) 13 | sl.emit(log2) 14 | 15 | stream.seek(0) 16 | assert stream.readline().strip() == "Hello!" 17 | 18 | 19 | def test_log(tmp_path): 20 | log = Logger("test_log", level=2) 21 | log.add_handler( 22 | level=2, 23 | ) 24 | log.add_file_handler( 25 | str(tmp_path), 26 | filename="test_log", 27 | ) 28 | child_log = spawn_logger("test_log.child") 29 | 30 | assert id(log) != id(child_log) 31 | 32 | log.debug("This message should not be output") 33 | log.info("Start of test logging") 34 | log.warning("Warning about stuff") 35 | log.error("Something has really gone wrong...") 36 | child_log.error("I also quit!") 37 | log.dead("Bye") 38 | 39 | del child_log 40 | del log 41 | 42 | fh = open(Path(str(tmp_path), "test_log.log"), mode="r") 43 | 44 | assert sum(1 for _ in fh) == 5 45 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.yaml: -------------------------------------------------------------------------------- 1 | name: Feature Request 2 | description: Propose an improvement for Delft-FIAT 3 | # title: "Improvement: " 4 | labels: [enhancement] 5 | 6 | body: 7 | - type: dropdown 8 | id: checks 9 | attributes: 10 | description: What kind of feature request is this? 11 | label: Feature type 12 | options: 13 | - Adding new functionality 14 | - Changing existing functionality 15 | - Removing existing functionality 16 | - type: textarea 17 | id: description 18 | attributes: 19 | label: Improvement Description 20 | description: > 21 | Please provide a clear and concise description of what your proposed improvement is: 22 | validations: 23 | required: true 24 | - type: textarea 25 | id: feature 26 | attributes: 27 | label: Implementation Description 28 | description: > 29 | Please describe how this would be implemented: 30 | validations: 31 | required: false 32 | - type: textarea 33 | id: context 34 | attributes: 35 | label: Additional Context 36 | description: > 37 | Please add any other context about the improvement here: 38 | -------------------------------------------------------------------------------- /.archive/ui_fiat/ui_fiat.vcxproj.user: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | false 5 | 6 | 7 | PATH=$(QtDllPath);%PATH% 8 | $(LocalDebuggerEnvironment) 9 | ;$(TargetDir)\bin 10 | 11 | WindowsLocalDebugger 12 | 13 | 14 | PATH=$(QtDllPath);%PATH% 15 | $(LocalDebuggerEnvironment) 16 | ;$(TargetDir)\bin 17 | 18 | WindowsLocalDebugger 19 | 20 | 21 | 2023-03-21T08:22:00.8708908Z 22 | 23 | 24 | 2023-03-21T08:22:01.0108830Z 25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/user_guide/data/index.qmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Data" 3 | --- 4 | 5 | FIAT is data agnostic; what you put in is what you get out. Please follow the guidelines in this documentation to build your FIAT model and pay close attention to the validation of data together with local experts. 6 | 7 | ## Types of Data 8 | Here a quick overview is given of the required data: 9 | 10 | - [Hazard](hazard.qmd) 11 | - Hazard values in a gridded format 12 | - [Exposure](exposure.qmd) 13 | - Exposure data either in vector or gridded format 14 | - [Vulnerability](vulnerability.qmd) 15 | - Data relating the hazard intensity to damage fraction 16 | 17 | These are the types of data that FIAT needs in order to run properly. 18 | 19 | ## Folder structure 20 | It is recommended to set up a folder structure as shown below to ensure that FIAT can find each data file and run smoothly. The files inside the folders are example files and do not need to be of those data types or named as such. 21 | 22 | ``` 23 | 24 | ├───📁 FIAT/ 25 | | 26 | ├───📁 hazard/ 27 | │ └───📄 hazard_map.nc 28 | | 29 | ├───📁 exposure/ 30 | │ ├───📄 buildings.gpkg 31 | │ └───📄 exposure.csv 32 | | 33 | ├───📁 vulnerability/ 34 | │ └───📄 vulnerability_curves.csv 35 | | 36 | └───📄 settings.toml 37 | 38 | ``` 39 | -------------------------------------------------------------------------------- /test/test_config.py: -------------------------------------------------------------------------------- 1 | from fiat.cfg import Configurations 2 | 3 | 4 | def test_settigs(configs): 5 | cfg = Configurations() 6 | assert len(cfg.keys()) == 1 7 | assert "output.path" in cfg 8 | assert cfg.get("output.path").name == "output" 9 | assert cfg.filepath.name == "< Configurations-in-memory >" 10 | 11 | cfg = Configurations(some_var="some_value") 12 | assert len(cfg.keys()) == 2 13 | assert "some_var" in cfg 14 | 15 | file = configs["geom_event"].get("hazard.file") 16 | cfg = Configurations(**{"hazard": {"file": file}, "some_file": "data2.dat"}) 17 | assert len(cfg.keys()) == 3 18 | assert cfg.get("hazard.file").is_absolute() 19 | assert isinstance(cfg.get("some_file"), str) 20 | 21 | 22 | def test_settings_from_file(settings_files): 23 | cfg = Configurations.from_file(settings_files["geom_risk"]) 24 | 25 | # Assert path to itself 26 | assert cfg.path.name == ".testdata" 27 | assert cfg.filepath.name == "geom_risk.toml" 28 | 29 | # Assert generated kwargs functionality 30 | haz_kw = cfg.generate_kwargs("hazard.settings") 31 | assert "var_as_band" in haz_kw 32 | 33 | # Update 34 | cfg.update({"output.path": "other", "_some_var": "some_value"}) 35 | assert cfg.get("_some_var") == "some_value" 36 | assert cfg.get("output.path").is_absolute() 37 | -------------------------------------------------------------------------------- /docs/setup_guide/kernel/install.qmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Installing FIAT" 3 | --- 4 | 5 | ## For Use 6 | FIAT can be installled in an existing environment or the user can create a new environment. We recommened to create a new environment to avoid issues with other dependencies and packages. 7 | 8 | ### New environment 9 | To create a new environment follow the steps below. 10 | 11 | - Create a new environment: 12 | 13 | ```bash 14 | conda create -n fiat python=3.12.* 15 | ``` 16 | - Activate the environment: 17 | 18 | ```bash 19 | conda activate fiat` 20 | ``` 21 | 22 | - Install FIAT from [pypi](): 23 | 24 | ```bash 25 | pip install delft_fiat 26 | ``` 27 | 28 | - Install FIAT from [conda-forge]() 29 | 30 | ```bash 31 | # When conda-forge is added as a channel 32 | conda install delft_fiat 33 | 34 | # When not 35 | conda install delft_fiat -c conda-forge 36 | ``` 37 | 38 | - Install FIAT from Github. After creating the new environment, you need to install all dependencies from the Deltares Github repository. You can use **pip install** to do so: 39 | 40 | ```bash 41 | pip install git+https://github.com/Deltares/Delft-FIAT.git 42 | ``` 43 | 44 | ### Existing environment 45 | If you want to install FIAT into an existing environment, simply activate the desired environment and run: 46 | 47 | ```bash 48 | pip install git+https://github.com/Deltares/Delft-FIAT.git 49 | ``` 50 | -------------------------------------------------------------------------------- /.build/linux64_build_env.sh: -------------------------------------------------------------------------------- 1 | echo "Setup paths relative to this script" 2 | #!/usr/bin/bash 3 | # Absolute path to this script, e.g. /home/user/bin/foo.sh 4 | SCRIPT=$(readlink -f "$0") 5 | # Absolute path this script is in, thus /home/user/bin 6 | SCRIPTPATH=$(dirname "$SCRIPT") 7 | 8 | # Setup conda stuff 9 | echo "Locating conda.." 10 | conda_executable=$(which conda) 11 | conda_base_dir=$(dirname $(dirname $conda_executable)) 12 | source $conda_base_dir/etc/profile.d/conda.sh 13 | 14 | # Extract the GDAL version number 15 | echo "Get GDAL version" 16 | version=$(gdalinfo --version | cut -d' ' -f2) 17 | # Remove the annoying comma 18 | version=${version%,} 19 | 20 | # Make the yaml and create the environment 21 | echo "Create the fiat_build env" 22 | python $SCRIPTPATH/../make_env.py build -p 3.11.* 23 | mamba env create -f $SCRIPTPATH/../environment.yml 24 | 25 | # Set the appropriate env variables for GDAL 26 | echo "Setup some important GDAL env variables" 27 | export CPLUS_INCLUDE_PATH=/usr/include/gdal 28 | export C_INCLUDE_PATH=/usr/include/gdal 29 | 30 | # Install GDAL and FIAT 31 | echo "Install GDAL and FIAT" 32 | conda activate fiat_build 33 | pip install --no-cache-dir gdal==$version 34 | pip install -e $SCRIPTPATH/.. 35 | conda deactivate 36 | 37 | # Clear the conda and pip cache 38 | echo "Clear the python cache" 39 | rm -rf $conda_base_dir/pkgs/* 40 | pip cache purge 41 | -------------------------------------------------------------------------------- /.github/workflows/lock.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Lockfile Update 3 | 4 | permissions: 5 | contents: write 6 | pull-requests: write 7 | 8 | on: 9 | schedule: 10 | - cron: 0 5 1 * * # First of every month 11 | workflow_dispatch: 12 | 13 | jobs: 14 | pixi-update: 15 | defaults: 16 | run: 17 | shell: bash -e -l {0} 18 | timeout-minutes: 30 19 | 20 | strategy: 21 | fail-fast: false 22 | matrix: 23 | os: ["ubuntu-latest"] 24 | include: 25 | - os: ubuntu-latest 26 | label: linux-64 27 | 28 | name: ${{ matrix.label }} 29 | runs-on: ${{ matrix.os }} 30 | 31 | steps: 32 | - name: Checkout code 33 | uses: actions/checkout@v4 34 | 35 | - name: Set up pixi 36 | uses: prefix-dev/setup-pixi@v0.8.13 37 | with: 38 | run-install: false 39 | 40 | - name: Update lockfile 41 | run: | 42 | set -o pipefail 43 | pixi update --json | pixi exec pixi-diff-to-markdown >> diff.md 44 | 45 | - name: Create pull request 46 | uses: peter-evans/create-pull-request@v7 47 | with: 48 | token: ${{ secrets.GITHUB_TOKEN }} 49 | commit-message: Update pixi lockfile 50 | title: Update pixi lockfile 51 | body-path: diff.md 52 | branch: update-pixi 53 | base: master 54 | labels: pixi 55 | delete-branch: true 56 | add-paths: pixi.lock 57 | -------------------------------------------------------------------------------- /.github/workflows/version.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Versioning 3 | 4 | on: 5 | repository_dispatch: 6 | types: 7 | - docs-updated 8 | workflow_dispatch: 9 | 10 | jobs: 11 | version: 12 | defaults: 13 | run: 14 | shell: bash -l {0} 15 | 16 | strategy: 17 | fail-fast: false 18 | matrix: 19 | os: ["ubuntu-latest"] 20 | include: 21 | - os: ubuntu-latest 22 | label: linux-64 23 | 24 | name: ${{ matrix.os }} 25 | runs-on: ${{ matrix.os }} 26 | 27 | steps: 28 | - name: Checkout code 29 | uses: actions/checkout@v4 30 | with: 31 | ref: gh-pages 32 | 33 | - name: Install package(s) 34 | run: | 35 | sudo apt install -y jq 36 | 37 | - name: Run versioning script 38 | id: check 39 | run: | 40 | ./version.sh 41 | continue-on-error: true 42 | 43 | - name: Adjust stable symbolic link 44 | if: ${{ steps.check.outcome == 'success' }} 45 | run: | 46 | ln -sfn ${{ env.NEW_STABLE_VERSION }} stable 47 | 48 | - name: Commit and push changes 49 | if: ${{ steps.check.outcome == 'success' }} 50 | run: | 51 | git config --local user.email "action@github.com" 52 | git config --local user.name "GitHub Action" 53 | git add . 54 | git commit -m "Added version ${{ env.NEW_STABLE_VERSION }}" 55 | git push 56 | -------------------------------------------------------------------------------- /.github/workflows/docker.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Docker Test 3 | 4 | on: 5 | push: 6 | branches: 7 | - master 8 | paths: 9 | - Dockerfile 10 | - pyproject.toml 11 | - pixi.lock 12 | - .github/workflows/docker.yml 13 | pull_request: 14 | branches: 15 | - master 16 | paths: 17 | - Dockerfile 18 | - pyproject.toml 19 | - pixi.lock 20 | - .github/workflows/docker.yml 21 | 22 | jobs: 23 | docker: 24 | defaults: 25 | run: 26 | shell: bash -l {0} 27 | 28 | strategy: 29 | fail-fast: false 30 | matrix: 31 | os: ["ubuntu-latest"] 32 | include: 33 | - os: ubuntu-latest 34 | label: linux-64 35 | 36 | name: ${{ matrix.label }} 37 | runs-on: ${{ matrix.os }} 38 | 39 | steps: 40 | - name: Checkout code 41 | uses: actions/checkout@v4 42 | 43 | - name: Build docker image 44 | run: | 45 | docker build -f Dockerfile --build-arg="PIXIENV=default" --build-arg="UID=1001" -t fiat . 46 | 47 | - name: Execute docker container 48 | uses: addnab/docker-run-action@v3 49 | with: 50 | username: deltares 51 | options: -v ${{ github.workspace }}/.testdata:/home/deltares/.testdata 52 | shell: bash 53 | image: fiat:latest 54 | run: | 55 | pixi run generate-data 56 | pixi run fiat run .testdata/geom_event.toml 57 | exit 58 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/documentation.yaml: -------------------------------------------------------------------------------- 1 | name: Documentation 2 | description: Missing or bad documentation. 3 | # title: "Docs: " 4 | labels: [documentation] 5 | 6 | body: 7 | - type: checkboxes 8 | attributes: 9 | label: Delft-FIAT version checks 10 | options: 11 | - label: > 12 | I have checked that the issue still exists on the latest versions of the docs 13 | on `master` [here](https://github.com/Deltares/Delft-FIAT) 14 | required: true 15 | - type: dropdown 16 | id: kind 17 | attributes: 18 | description: What kind of documentation issue is this? 19 | label: Kind of issue 20 | options: 21 | - Docs are wrong 22 | - Docs are unclear 23 | - Docs are missing 24 | - type: textarea 25 | id: location 26 | attributes: 27 | label: Location of the documentation 28 | description: > 29 | Please provide the URL of the documentation: 30 | validations: 31 | required: true 32 | - type: textarea 33 | id: problem 34 | attributes: 35 | label: Documentation problem 36 | description: > 37 | Please provide a description of the documentation problem: 38 | validations: 39 | required: true 40 | - type: textarea 41 | id: suggested-fix 42 | attributes: 43 | label: Suggested fix for documentation 44 | description: > 45 | Please explain your suggested fix and why it's better than the existing documentation: 46 | validations: 47 | required: false 48 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Build Binary 3 | 4 | on: 5 | push: 6 | branches: 7 | - master 8 | paths: 9 | - .build/* 10 | - src/fiat/* 11 | - pyproject.toml 12 | - pixi.lock 13 | - .github/workflows/build.yml 14 | pull_request: 15 | branches: 16 | - master 17 | paths: 18 | - .build/* 19 | - src/fiat/* 20 | - pyproject.toml 21 | - pixi.lock 22 | - .github/workflows/build.yml 23 | 24 | jobs: 25 | build: 26 | defaults: 27 | run: 28 | shell: bash -l {0} 29 | 30 | strategy: 31 | fail-fast: false 32 | matrix: 33 | os: ["ubuntu-latest"] 34 | include: 35 | - os: ubuntu-latest 36 | label: linux-64 37 | 38 | name: ${{ matrix.os }} - build 39 | runs-on: ${{ matrix.os }} 40 | 41 | steps: 42 | - name: Checkout code 43 | uses: actions/checkout@v4 44 | 45 | - name: Setup pixi env 46 | uses: prefix-dev/setup-pixi@v0.8.13 47 | with: 48 | pixi-version: "v0.58.0" 49 | environments: build 50 | locked: false 51 | cache: true 52 | cache-write: ${{ github.event_name == 'push' && github.ref_name == 'master' }} 53 | 54 | - name: Build FIAT 55 | run: | 56 | pixi run -e build build 57 | 58 | - name: Create Testdata 59 | run: | 60 | pixi run -e build generate-data 61 | 62 | - name: Test Binary 63 | run: ./bin/release/fiat run .testdata/geom_event.toml 64 | -------------------------------------------------------------------------------- /docs/info/models.qmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: Models 3 | --- 4 | 5 | Within FIAT, two types of models are currently present: 6 | 7 | - GeomModel 8 | - GridModel 9 | 10 | ## GeomModel 11 | The `GeomModel` requires a configuration file that indicates where the exposure geom(s), vulnerability and hazard data are located and whether risk should be calculated on top of damages. The model reads in vector data as exposure assets (e.g., building footprints, road segments, and utility locations) one by one. Each exposure asset gets overlaid with the hazard map and with this data, the corresponding damage fraction is taken from the vulnerability curve and multiplied with the maximum potential damage to get the flood damage. When the user supplies multiple hazard maps with related return periods, they can choose to also calculate risk. 12 | 13 | ## GridModel 14 | The `GridModel` requires a configuration file that indicates where the exposure raster(s), vulnerability and hazard data are located and whether risk should be calculated on top of damages. The model reads in raster data as exposure layers (e.g. commercial, residential, industrial) holding the maximum potential damage values, which should be in the same resolution, size, and projection as the hazard map. To calculate damages, the exposure raster and hazard raster are overlaid and per cell, the maximum potential damage is multiplied with the damage fraction corresponding to the hazard value in that cell. Risk can also be calculated with the `GridModel`, although it must be noted that this functionality has not been thoroughly tested. 15 | -------------------------------------------------------------------------------- /docs/info/faq.qmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: Frequently asked questions 3 | --- 4 | 5 | ### How do I install FIAT? 6 | Visit the [Setup guide](../setup_guide/index.qmd), which provides installation instructions for Windows. 7 | 8 | ### How do I report a bug? 9 | You can submit a new [issue](https://github.com/Deltares/Delft-FIAT/issues/new/choose) with the *Bugs* template in the FIAT repository. You can also submit an issue regarding *Missing or bad documentation*, a *Feature request*, or you can *Ask a question*. 10 | 11 | ### How do I build a model? 12 | You can use [HydroMT-FIAT](https://deltares.github.io/hydromt_fiat/latest) to build a FIAT model or you can follow the instructions in the [User guide](../user_guide/index.qmd). 13 | 14 | ### How do I modify parameters or calibrate my model? 15 | You can manually change parameters (e.g., the Ground Floor Height) in your exposure data in Excel or with Python. You can also use [HydroMT-FIAT](https://deltares.github.io/hydromt_fiat/latest) for this. 16 | 17 | ### How do I request different output? 18 | You can change the output data types in the *settings.toml* file, visit the [Settings file](../user_guide/settings/index.qmd) page for more information. 19 | 20 | ### Can I use FIAT for other hazards than flooding? 21 | Yes! If the damage that hazard causes can be estimated in the same manner as FIAT uses for flooding, other hazard maps can be used in combination with corresponding damage functions. 22 | 23 | ### What does FIAT have to do with the car brand FIAT? 24 | Nothing, it is an acronym for **F**lood **I**mpact **A**ssessment **T**ool. 25 | -------------------------------------------------------------------------------- /test/test_io.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from fiat.fio import BufferedGeomWriter, BufferedTextWriter 4 | 5 | 6 | def test_bufferedgeom(tmp_path, geom_data): 7 | out_path = Path(str(tmp_path)) 8 | writer = BufferedGeomWriter( 9 | Path(out_path, "bufferedgeoms.gpkg"), 10 | geom_data.srs, 11 | geom_data.layer.GetLayerDefn(), 12 | buffer_size=2, 13 | ) 14 | assert writer.size == 0 15 | 16 | writer.add_feature_with_map( 17 | geom_data.layer.GetFeature(1), 18 | {}, 19 | ) 20 | assert writer.size == 1 21 | 22 | writer.add_feature_with_map( 23 | geom_data.layer.GetFeature(2), 24 | {}, 25 | ) 26 | assert writer.size == 2 27 | 28 | writer.add_feature_with_map( 29 | geom_data.layer.GetFeature(3), 30 | {}, 31 | ) 32 | assert writer.size == 1 33 | 34 | writer.close() 35 | pass 36 | 37 | 38 | def test_bufferedtext(tmp_path): 39 | out_path = Path(str(tmp_path)) 40 | writer = BufferedTextWriter( 41 | Path(out_path, "bufferedtext.txt"), 42 | mode="wb", 43 | buffer_size=15, # 15 bytes (15 chars) 44 | ) 45 | 46 | writer.write(b"Hello there\n") 47 | assert writer.tell() == 12 48 | 49 | writer.write(b"Another line\n") 50 | assert writer.tell() == 13 51 | writer.seek(0) 52 | assert writer.read() == b"Another line\n" 53 | 54 | writer.close() 55 | 56 | with open(Path(out_path, "bufferedtext.txt"), "r") as reader: 57 | text = reader.read() 58 | 59 | assert len(text) == 25 60 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bugs.yaml: -------------------------------------------------------------------------------- 1 | name: Bugs 2 | description: Broken elements within Delft-FIAT 3 | # title: "BUG: " 4 | labels: [bug] 5 | 6 | body: 7 | - type: checkboxes 8 | id: checks 9 | attributes: 10 | label: Delft-FIAT version checks 11 | options: 12 | - label: > 13 | I have checked that this issue has not already been reported. 14 | required: true 15 | - label: > 16 | I have checked that this bug exists on the latest version of Delft-FIAT. 17 | required: true 18 | - type: textarea 19 | id: example 20 | attributes: 21 | label: Reproducible Example 22 | description: > 23 | Please provide a minimal, copy-pastable example to reproduce the behavior: 24 | validations: 25 | required: true 26 | - type: textarea 27 | id: current-behaviour 28 | attributes: 29 | description: > 30 | Please provide a description of the incorrect behaviour shown in the reproducible example 31 | label: Current behaviour 32 | validations: 33 | required: true 34 | - type: textarea 35 | id: expected-behaviour 36 | attributes: 37 | description: > 38 | Please provide a description of what you think the behaviour should be 39 | label: Desired behaviour 40 | validations: 41 | required: true 42 | - type: textarea 43 | id: Additional 44 | attributes: 45 | label: Additional Context 46 | description: > 47 | Please add any other context about the bug here: 48 | validations: 49 | required: false 50 | -------------------------------------------------------------------------------- /docs/.archive/populate.js: -------------------------------------------------------------------------------- 1 | window.onload = function() { 2 | // Assuming you have a ul element in your HTML like this: 3 | // 4 | 5 | // Fetch the JSON data 6 | fetch("http://localhost:8008/switcher.json") 7 | .then(response => response.json()) 8 | .then(data => { 9 | console.log('Data loaded:', data); // Log the loaded data 10 | 11 | const dropdown = document.querySelector('#nav-menu-version').nextElementSibling; 12 | console.log('Dropdown element:', dropdown); // Log the dropdown element 13 | 14 | // Clear all existing dropdown items 15 | dropdown.innerHTML = ''; 16 | 17 | data.forEach(item => { 18 | console.log('Adding item:', item); // Log the item being added 19 | 20 | // Create a new li element 21 | const li = document.createElement('li'); 22 | 23 | // Create a new a element 24 | const a = document.createElement('a'); 25 | a.className = 'dropdown-item'; 26 | a.href = item.url; // Use the 'url' property as the href 27 | a.textContent = item.name; // Use the 'name' property as the text 28 | 29 | // Add the a element to the li 30 | li.appendChild(a); 31 | 32 | // Add the li to the dropdown 33 | dropdown.appendChild(li); 34 | }); 35 | 36 | console.log('Dropdown after adding items:', dropdown); // Log the dropdown after adding items 37 | }) 38 | .catch(error => console.error('Error:', error)); // Log any errors 39 | 40 | 41 | } 42 | -------------------------------------------------------------------------------- /.build/win64_d.spec: -------------------------------------------------------------------------------- 1 | from fiat.util import generic_folder_check 2 | 3 | import os 4 | import sys 5 | from pathlib import Path 6 | 7 | #Pre build event setup 8 | app_name = "fiat" 9 | sys.setrecursionlimit(5000) 10 | generic_folder_check("../bin") 11 | 12 | cwd = Path.cwd() 13 | env_path = os.path.dirname(sys.executable) 14 | mode = "Debug" 15 | 16 | proj = Path(os.environ["PROJ_LIB"]) 17 | 18 | binaries = [ 19 | (Path(proj, 'proj.db'), './share'), 20 | ] 21 | 22 | # Build event 23 | a = Analysis( 24 | ["../src/fiat/cli/main.py"], 25 | pathex=["../src", Path(env_path, "lib/site-packages")], 26 | binaries=binaries, 27 | datas=[], 28 | hiddenimports=[], 29 | hookspath=[], 30 | hooksconfig={}, 31 | runtime_hooks=['runtime_hooks.py'], 32 | excludes=[], 33 | win_no_prefer_redirects=False, 34 | win_private_assemblies=False, 35 | noarchive=False, 36 | ) 37 | 38 | pyz = PYZ( 39 | a.pure, 40 | ) 41 | 42 | exe = EXE( 43 | pyz, 44 | a.scripts, 45 | [('v', None, 'OPTION')], 46 | icon="NONE", 47 | exclude_binaries=True, 48 | name=app_name, 49 | debug=False, 50 | bootloader_ignore_signals=False, 51 | strip=False, 52 | upx=False, 53 | console=True, 54 | disable_windowed_traceback=False, 55 | argv_emulation=False, 56 | target_arch=None, 57 | codesign_identity=None, 58 | entitlements_file=None, 59 | contents_directory='bin', 60 | ) 61 | 62 | coll = COLLECT( 63 | exe, 64 | a.binaries, 65 | a.datas, 66 | strip=False, 67 | upx=False, 68 | upx_exclude=[], 69 | name=mode, 70 | ) 71 | -------------------------------------------------------------------------------- /docs/setup_guide/kernel/build.qmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Building FIAT" 3 | --- 4 | 5 | To build FIAT as an executable/ binary, it is required to clone the FIAT repository to your local drive. 6 | This required [git](https://git-scm.com/) to be installed on your local machine. 7 | 8 | ```bash 9 | git clone git@github.com:Deltares/Delft-FIAT.git 10 | ``` 11 | 12 | Besides git, a python installation/ environment with the necessary packages is needed. 13 | It is recommended to use [miniforge3](../general/conda.qmd) for this purpose. 14 | 15 | ::: {.panel-tabset} 16 | 17 | ## Windows 18 | 19 | - Create a yml for a seperate **build** environment: 20 | 21 | ```bash 22 | python make_env.py build 23 | ``` 24 | 25 | - Create the environment with mamba. This time, FIAT will be automatically installed with the environment: 26 | 27 | ```bash 28 | mamba env create -f environment.yml 29 | ``` 30 | 31 | - Go to the .build/ directory and execute the win64.bat script: 32 | 33 | ```bash 34 | cd ./.build 35 | 36 | win64.bat 37 | ``` 38 | 39 | That's it. 40 | A FIAT application will be located in the `{root}/bin/Release` folder. 41 | 42 | ## Linux 43 | 44 | - Create a yml for a seperate **build** environment: 45 | 46 | ```bash 47 | python make_env.py build 48 | ``` 49 | 50 | - Create the environment with mamba. This time, FIAT will be automatically installed with the environment: 51 | 52 | ```bash 53 | mamba env create -f environment.yml 54 | ``` 55 | 56 | - Set the rights of the shell script and exexute it: 57 | 58 | ```bash 59 | chmod u+x .build/linux64.sh 60 | 61 | .build/linux64.sh 62 | ``` 63 | 64 | That's it. 65 | A FIAT application will be located in the `{root}/bin/Release` folder 66 | 67 | ::: 68 | -------------------------------------------------------------------------------- /src/fiat/cli/action.py: -------------------------------------------------------------------------------- 1 | """Custom actions for cli.""" 2 | 3 | import argparse 4 | import re 5 | 6 | from fiat.util import _dtypes_reversed, deter_type 7 | 8 | 9 | def parse_cli_value(value: str) -> object: 10 | """Parse the value to a python type.""" 11 | value = value.strip() 12 | m = re.findall(r"^\[(.*)\]$", value) # Pattern for checking list 13 | # If not a list, then it's a single value 14 | if len(m) == 0: 15 | t = deter_type(value.encode(), 0) 16 | value = _dtypes_reversed[t](value) 17 | return value 18 | 19 | # If it's a list 20 | item = m[0] 21 | item = item.replace(" ", "") # Strip it from spaces 22 | # Detemine the type of the list and use that to set the types of the elements 23 | t = deter_type(item.replace(",", "\n").encode(), item.count(",")) 24 | value = [_dtypes_reversed[t](elem) for elem in item.split(",")] 25 | return value 26 | 27 | 28 | class KeyValueAction(argparse.Action): 29 | """Simple class for key values pairs with equal signs.""" 30 | 31 | def __call__(self, parser, namespace, values, option_string=None): 32 | """Overwrite call method.""" 33 | # Check for existence 34 | if getattr(namespace, self.dest) is None: 35 | setattr(namespace, self.dest, {}) 36 | 37 | # Set the values 38 | try: 39 | key_value_dict = getattr(namespace, self.dest) 40 | key, value = values.split("=", 1) 41 | key_value_dict[key] = parse_cli_value(value) 42 | setattr(namespace, self.dest, key_value_dict) 43 | except BaseException: 44 | parser.error(f"-d, key/ value pair in the wrong format: -> '{values}'. \ 45 | Should be KEY=VALUE") 46 | -------------------------------------------------------------------------------- /docs/user_guide/data/exposure/geometries.qmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: Geometry 3 | --- 4 | A user can specify one or multiple **geometry files** in the [settings.toml](../../settings/index.qmd). It is advisable to give descriptive names to the geometry files, e.g., *buildings.gpkg* or *roads.gpkg*. Most-used file types are GeoPackages or Shapefiles but more file types are accepted as can be seen at the bottom of this page. All geometry files must have an attribute `object_id`. These need to be unique over all geometry files if a csv file with exposure data is provided and corresponding to the `object_is`'s in the provided csv file. 5 | 6 | If no csv file provided with the exposure information, that information needs to be present in the geometry files(s) themselves. 7 | An overview of all the required input is shown [here](./data.qmd). 8 | 9 | ::: {.callout-tip title="Example"} 10 | When using a GeoPackage file with roads containing `object_ids` 1-100, a second GeoPackage file with building footprints should contain `object_id`s from 101-n. Then, if applicable, those IDs should link to the corresponding rows and IDs in the exposure CSV file. 11 | ::: 12 | 13 | When providing a polygon or line geometry file, a user can choose to use the *area extraction method* ('extraction method' = 'area', see [here](csv.qmd#tbl-exposure)) for considering water levels or depths over the whole area of the asset. This can be more appropriate than point estimates for example for large buildings. 14 | 15 | ::: {.callout-warning} 16 | Assets cannot be partially or fully outside the hazard map extent! FIAT will stop if it detects any. 17 | ::: 18 | 19 | #### Supported data formats 20 | An overview of supported formats is presented [here](../supported.qmd#geometry-files) 21 | -------------------------------------------------------------------------------- /.build/hook-fiat.py: -------------------------------------------------------------------------------- 1 | """Build hook for FIAT.""" 2 | 3 | import glob 4 | import os 5 | import sys 6 | from pathlib import Path 7 | 8 | from osgeo.gdal import __version__ as gdal_version 9 | from packaging.version import Version 10 | from PyInstaller.compat import is_conda, is_win 11 | from PyInstaller.utils.hooks import logger 12 | from PyInstaller.utils.hooks.conda import ( 13 | distribution, 14 | ) 15 | 16 | datas = [] 17 | 18 | if hasattr(sys, "real_prefix"): # check if in a virtual environment 19 | root_path = sys.real_prefix 20 | else: 21 | root_path = sys.prefix 22 | 23 | if is_conda and Version(gdal_version) >= Version("3.9.1"): 24 | try: 25 | plugin = distribution("libgdal-netcdf") 26 | 27 | # Look for all the plugins 28 | plugin_dir = Path(root_path, plugin.files[0].parent) 29 | all_plugins = glob.glob(Path(plugin_dir, "*").as_posix()) 30 | 31 | # Append the data 32 | datas += list(map(lambda path: (path, "./gdalplugins"), all_plugins)) 33 | except BaseException: 34 | logger.warning("NetCDF plugin for gdal not found.") 35 | 36 | # Sort out the proj database 37 | src_proj = None 38 | if "PROJ_DATA" in os.environ: 39 | src_proj = os.environ["PROJ_DATA"] 40 | elif "PROJ_LIB" in os.environ: 41 | src_proj = os.environ["PROJ_LIB"] 42 | 43 | # Default check based on known directories 44 | if src_proj is None: 45 | if is_win: 46 | src_proj = os.path.join(root_path, "Library", "share", "proj") 47 | else: # both linux and darwin 48 | src_proj = os.path.join(root_path, "share", "proj") 49 | if not os.path.isdir(src_proj): 50 | src_proj = None 51 | logger.warning("Proj data was not found.") 52 | 53 | if src_proj is not None: 54 | datas.append((src_proj, "./share/proj")) 55 | -------------------------------------------------------------------------------- /docs/user_guide/data/exposure.qmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Exposure data" 3 | --- 4 | 5 | The **exposure data** describe the assets within the region of interest. FIAT allows **vector and raster data** inputs. It is recommended to place the exposure data in the 'exposure' folder, see the [folder structure](index.qmd#folder-structure). However, the file name is free to be chosen by the user and must simply be refered to in the [settings.toml](../settings/index.qmd) configuration file. 6 | 7 | ::: {.callout-tip} 8 | You can also create your exposure data with the [**HydroMT-FIAT model builder**](https://deltares.github.io/hydromt_fiat/latest/#) 9 | ::: 10 | 11 | ## Types of Data 12 | Because FIAT consists of two models, the exposure data can be supplied in either a vector format or in raster format 13 | 14 | ### Vector data 15 | FIAT can be used to assess flood damages to buildings, roads, utilities, and other types of structures of which data is available. These type of assets are often spatially represented with vector, or geometry data. Building a FIAT model with geometry exposure data requires two data types: 16 | 17 | - [Geometry file(s)](exposure/geometries.qmd) 18 | - [CSV file (optional)](exposure/csv.qmd) 19 | 20 | ### Raster Data 21 | 22 | ::: {.callout-important} 23 | This functionality is in beta phase 24 | ::: 25 | 26 | Raster files must be **pre-processed** before being ingested into the FIAT model. Raster projection and extent must be coherent among exposure rasters and hazard maps. For each object type (e.g. residential buildings, industrial assets) a raster file with the **max potential damage** per cell is required. A damage function must be assigned to each raster file as well, linking with the damage function name in the meta data to the name in the [vulnerability curves file](vulnerability.qmd). 27 | -------------------------------------------------------------------------------- /docs/_static/images/deltares-blue.svg: -------------------------------------------------------------------------------- 1 | Artboard 1 2 | -------------------------------------------------------------------------------- /docs/_static/listing_code.html: -------------------------------------------------------------------------------- 1 | 71 | -------------------------------------------------------------------------------- /docs/_static/images/deltares-white.svg: -------------------------------------------------------------------------------- 1 | Artboard 1 2 | -------------------------------------------------------------------------------- /test/test_model.py: -------------------------------------------------------------------------------- 1 | from fiat import Configurations, GeomModel, GridModel 2 | 3 | 4 | def test_geommodel(tmp_path, settings_files): 5 | cfg = Configurations.from_file(settings_files["geom_event"]) 6 | 7 | # With no config file 8 | no_cfg = Configurations(_root=tmp_path) 9 | model = GeomModel(no_cfg) 10 | assert model.exposure_data is None 11 | assert model.hazard_grid is None 12 | assert model.vulnerability_data is None 13 | assert len(model.exposure_geoms) == 0 14 | assert model.threads == 1 15 | assert model.cfg.get("output.path").name == "output" 16 | model.read_hazard_grid(path=cfg.get("hazard.file")) 17 | assert model.hazard_grid is not None 18 | assert model.hazard_grid.shape == (10, 10) 19 | model.read_exposure_geoms(paths=[cfg.get("exposure.geom.file1")]) 20 | assert model.exposure_geoms[1].size == 4 21 | 22 | model = GeomModel(cfg) 23 | assert model.exposure_data is not None 24 | assert len(model.exposure_geoms) == 1 25 | 26 | _ = cfg.pop("exposure.csv.file") 27 | cfg.set("model.threads", 4) 28 | model = GeomModel(cfg) 29 | assert model.exposure_data is None 30 | assert model.threads == 4 31 | 32 | 33 | def test_gridmodel(tmp_path, settings_files): 34 | cfg = Configurations.from_file(settings_files["grid_event"]) 35 | 36 | # Without config file 37 | no_cfg = Configurations(_root=tmp_path) 38 | model = GridModel(no_cfg) 39 | assert model.exposure_grid is None 40 | assert model.hazard_grid is None 41 | assert model.vulnerability_data is None 42 | model.read_vulnerability_data(cfg.get("vulnerability.file")) 43 | model.read_exposure_grid(cfg.get("exposure.grid.file")) 44 | 45 | cfg = Configurations.from_file(settings_files["grid_event"]) 46 | model = GridModel(cfg) 47 | assert model.exposure_grid is not None 48 | assert model.vulnerability_data is not None 49 | -------------------------------------------------------------------------------- /docs/user_guide/settings/computation.qmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: Computational input 3 | --- 4 | 5 | The inputs give the user a bit more control over the computational aspects of FIAT. 6 | 7 | | Entry | Type | Default | 8 | |:---------------------------------|---------|-------------| 9 | | **[model]** | | | 10 | | [threads](#model) | integer | 1 | 11 | | **[model.geom]** | | | 12 | | [chunk](#model.geom) | integer | - | 13 | | **[model.grid]** | | | 14 | | [chunk](#model.grid) | list | - | 15 | : Computational FIAT input settings {#tbl-toml .hover} 16 | 17 | ::: {.callout-warning} 18 | All settings that are not globally defined (e.g. settings chunk size via `hazard.settings`) are ill-advised. Please use the global chunking settings. 19 | ::: 20 | 21 | #### [model] 22 | 23 | - `threads`: Set the number of threads of the calculations. If this number exceeds the cpu count, the amount of threads will be capped by the cpu count. 24 | 25 | #### [model.geom] 26 | 27 | - `chunk`: Set the chunk size of the geometry calculations. The calculations will then be done in vectors of these lengths in parallel. This settings will also be used for chunking when writing. 28 | 29 | ::: {.callout-tip} 30 | This input benefits from multiple threads. 31 | ::: 32 | 33 | ::: {.callout-note} 34 | This input is only applicable to the [GeomModel](../../info/models.qmd#geommodel) 35 | ::: 36 | 37 | #### [model.grid] 38 | 39 | - `chunk`: Set the chunk size for the gridded calculations. This will chunk the data in rectangles with the goal of reducing the memory foodprint. An example would be `[1024, 1024,]`. 40 | 41 | ::: {.callout-note} 42 | This input is only applicable to the [GridModel](../../info/models.qmd#gridmodel) 43 | ::: 44 | -------------------------------------------------------------------------------- /test/test_logic.py: -------------------------------------------------------------------------------- 1 | from fiat.methods.ead import calc_ead, risk_density 2 | from fiat.methods.flood import calculate_hazard 3 | 4 | 5 | def test_calc_haz(): 6 | dmg, red_f = calculate_hazard( 7 | [2.5, 5, 10], 8 | reference="dem", 9 | ground_flht=1.0, 10 | ground_elevtn=0, 11 | method="mean", 12 | ) 13 | assert int(dmg * 100) == 483 14 | assert int(red_f) == 1 15 | 16 | dmg, red_f = calculate_hazard( 17 | [0, 2.5, 5, 10], 18 | reference="dem", 19 | ground_flht=1.0, 20 | ground_elevtn=0, 21 | method="mean", 22 | ) 23 | assert int(dmg * 100) == 483 24 | assert int(red_f * 100) == 75 25 | 26 | dmg, red_f = calculate_hazard( 27 | [0, 2.5, 5, 10], 28 | reference="datum", 29 | ground_flht=1.0, 30 | ground_elevtn=1.0, 31 | method="mean", 32 | ) 33 | assert int(dmg * 100) == 383 34 | assert int(red_f * 100) == 75 35 | 36 | dmg, red_f = calculate_hazard( 37 | [0, 1.5, 5, 10], 38 | reference="datum", 39 | ground_flht=1.0, 40 | ground_elevtn=1.0, 41 | method="mean", 42 | ) 43 | assert int(dmg * 100) == 350 44 | assert int(red_f * 100) == 75 45 | 46 | 47 | def test_calc_risk(): 48 | rps = [1, 2, 5, 25, 50, 100] 49 | dms = [5, 10, 50, 300, 1200, 3000] 50 | 51 | coef = risk_density(rps) 52 | ead = calc_ead(coef, dms) 53 | 54 | assert int(round(ead, 1) * 100) == 9850 55 | 56 | 57 | def test_calc_risk_order(): 58 | rps = [50, 2, 100, 25, 1, 5] 59 | dms = [1200, 10, 3000, 300, 5, 50] 60 | 61 | coef = risk_density(rps) 62 | ead = calc_ead(coef, dms) 63 | 64 | assert int(round(ead, 1) * 100) == 9850 65 | 66 | 67 | def test_calc_risk_one(): 68 | rps = [10] 69 | dms = [5] 70 | 71 | coef = risk_density(rps) 72 | ead = calc_ead(coef, dms) 73 | 74 | assert int(ead * 100) == 50 75 | -------------------------------------------------------------------------------- /docs/_static/theme-dark.scss: -------------------------------------------------------------------------------- 1 | /*-- scss:defaults --*/ 2 | // Base document colors 3 | 4 | $background: #212529; 5 | $highlight: #292e33; 6 | $highlight: #373a3c; 7 | $links: #0EBBF0; 8 | $text: #ffffff; 9 | 10 | 11 | $body-bg: $background; 12 | $body-color: $text; 13 | $link-color: $links; 14 | $navbar-bg: $highlight; 15 | $navbar-hl: $links ; 16 | 17 | // Code blocks 18 | $code-block-bg-alpha: -.8; 19 | $code-bg: $highlight; 20 | $code-color: $text; 21 | 22 | .navbar-nav .dropdown-menu { 23 | background-color: $highlight; // Dark mode dropdown background color 24 | a { 25 | color: #0EBBF0; // Dark mode dropdown text color 26 | } 27 | } 28 | 29 | // nav tabs 30 | // .nav-tabs { 31 | // background-color: $links; 32 | // } 33 | 34 | // .nav-tabs .nav-link { 35 | // background-color: $links; 36 | // } 37 | 38 | // .nav-tabs .nav-link, 39 | // .nav-tabs .nav-link.active, 40 | // .nav-tabs .nav-item.show .nav-link { 41 | // background-color: $links; 42 | // } 43 | 44 | .nav-tabs .nav-link:hover { 45 | background-color: $links; 46 | border-color: $links; 47 | } 48 | 49 | // .nav-tabs .nav-link i { 50 | // background-color: $links; 51 | // } 52 | 53 | // .nav-tabs .nav-link { 54 | // // background-color: rgb(0, 0, 0); 55 | // // background: #212529; 56 | // color: $text; 57 | // } 58 | 59 | // .nav-tabs .nav-link:hover { 60 | // border-color: $links; 61 | // color: $text; 62 | // } 63 | 64 | // // .nav-tabs .nav-item.show .nav-link { 65 | // // background-color: rgb(0, 0, 0); 66 | // // color: $text; 67 | // // } 68 | 69 | // .nav-tabs .nav-link:hover { 70 | // border-color: rgb(255, 72, 0); 71 | // // border-width: 1px; 72 | // } 73 | 74 | // :root { 75 | // --footer-image: url('/_static/images/deltares-white.svg'); 76 | // } 77 | // 78 | // footer img { 79 | // content: var(--footer-image); 80 | // } 81 | 82 | .footer-image-light { 83 | display: none; 84 | } 85 | .footer-image-dark { 86 | display: inline; 87 | } 88 | -------------------------------------------------------------------------------- /.build/hook-osgeo.py: -------------------------------------------------------------------------------- 1 | """Build hook for osgeo.""" 2 | # ------------------------------------------------------------------ 3 | # Copyright (c) 2020 PyInstaller Development Team. 4 | # 5 | # This file is distributed under the terms of the GNU General Public 6 | # License (version 2.0 or later). 7 | # 8 | # The full license is available in LICENSE, distributed with 9 | # this software. 10 | # 11 | # SPDX-License-Identifier: GPL-2.0-or-later 12 | # ------------------------------------------------------------------ 13 | 14 | import os 15 | import sys 16 | 17 | from PyInstaller.compat import is_darwin, is_win 18 | from PyInstaller.utils.hooks import logger 19 | 20 | # Location of the binary 21 | if hasattr(sys, "real_prefix"): # check if in a virtual environment 22 | root_path = sys.real_prefix 23 | else: 24 | root_path = sys.prefix 25 | 26 | # Check the current state of the data 27 | datas = [] 28 | 29 | src_gdal = None 30 | # Do the same for gdal data 31 | if src_gdal is None: 32 | if is_win: 33 | src_gdal = os.path.join(root_path, "Library", "share", "gdal") 34 | if not os.path.exists(src_gdal): 35 | src_gdal = os.path.join(root_path, "Library", "data") 36 | else: # both linux and darwin 37 | src_gdal = os.path.join(root_path, "share", "gdal") 38 | if not os.path.isdir(src_gdal): 39 | src_gdal = None 40 | logger.warning("GDAL data was not found.") 41 | 42 | if src_gdal is not None: 43 | datas.append((src_gdal, "./share/gdal")) 44 | 45 | # Hidden dependencies 46 | if src_gdal is not None: 47 | # if `proj.4` is present, it provides additional functionalities 48 | if is_win: 49 | proj4_lib = os.path.join(root_path, "proj.dll") 50 | elif is_darwin: 51 | proj4_lib = os.path.join(root_path, "lib", "libproj.dylib") 52 | else: # assumed linux-like settings 53 | proj4_lib = os.path.join(root_path, "lib", "libproj.so") 54 | 55 | if os.path.exists(proj4_lib): 56 | binaries = [(proj4_lib, ".")] 57 | -------------------------------------------------------------------------------- /docs/index.qmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Delft-FIAT" 3 | toc: false 4 | include-in-header: 5 | - text: | 6 | 7 | format: 8 | html 9 | --- 10 | 11 | A fast impact assessment tool. 12 | 13 | [![PyPI Latest Release](https://img.shields.io/pypi/v/delft_fiat.svg)](https://pypi.org/project/Delft-FIAT/) [![Conda Version](https://img.shields.io/conda/v/conda-forge/delft_fiat)](https://anaconda.org/conda-forge/delft_fiat) [![Coverage](https://codecov.io/github/Deltares/Delft-FIAT/coverage.svg?branch=master)](https://codecov.io/github/Deltares/Delft-FIAT) ![GitHub Actions Workflow Status](https://img.shields.io/github/actions/workflow/status/Deltares/Delft-FIAT/build.yml) 14 | 15 | ## Quick overview 16 | 17 | ::: column-page-inset-right 18 | {{< include /_static/listing_code_medium.html >}} 19 | ```{=html} 20 | 21 | 53 | 54 | ``` 55 | ::: 56 | -------------------------------------------------------------------------------- /docs/developments/guide.qmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Developer guide" 3 | --- 4 | 5 | We welcome contributions like bug reports, bug fixes, documentation improvements, enhancements, and ideas. 6 | This guide provided an overview on how to go about it. 7 | 8 | ## Development philosophy 9 | The aim of FIAT is to be a lean calculation core, focusing on the main tasks of FIAT of combining exposure, vulnerability and hazard data and calculating damages and risk. Developments related to pre- and post-processing of FIAT data should be done in respectively the [HydroMT-FIAT](https://deltares.github.io/hydromt_fiat/latest) and [FIAT Toolbox](https://github.com/Deltares/fiat_toolbox) repositories. 10 | 11 | ## Issue conventions {#sec-issue-conventions} 12 | 13 | FIAT has known and unknown bugs. Improvements through bug reports or feature requests in our [issue tracker](https://github.com/Deltares/Delft-FIAT/issues) are welcomed. Please search open and closed existing issues, before creating a new one. 14 | 15 | For issues related to pre- and post-processing of FIAT data, please refer to respectively the [HydroMT-FIAT](https://deltares.github.io/hydromt_fiat/latest) and [FIAT Toolbox](https://github.com/Deltares/fiat_toolbox) repositories. 16 | 17 | ## Guidelines 18 | Please follow the guidelines when contributing to the FIAT development: 19 | 20 | 1. Create an issue if it does not yet exist, when starting contributing. For more information on the issues, see the [contributing page](./#sec-issue-conventions). 21 | 2. Fork the repository. 22 | 3. Develop on the repository with descriptive commit messages. 23 | 4. We work with Ruff linting, so always format the added or changed code with this library. 24 | 5. For each newly developed feature we require a pytest test. 25 | 6. Create a Pull Request to the master branch stating the developments made and the related issue. 26 | 7. Once reviewed and if accepted, Deltares will merge the developments to the master branch. 27 | 28 | We would like to get in touch with those that want to contribute. Do not hesitate to send us an email [email address coming soon]! 29 | -------------------------------------------------------------------------------- /docs/_static/listing_code_medium.html: -------------------------------------------------------------------------------- 1 | 79 | -------------------------------------------------------------------------------- /.build/build.spec: -------------------------------------------------------------------------------- 1 | """Spec file for building fiat.""" 2 | 3 | import inspect 4 | import sys 5 | import time 6 | from pathlib import Path 7 | 8 | from fiat.util import generic_folder_check 9 | 10 | # Pre build event setup 11 | app_name = "fiat" 12 | mode = "release" 13 | sys.setrecursionlimit(5000) 14 | 15 | # Some general information 16 | _file = Path(inspect.getfile(lambda: None)) 17 | project_root = _file.parents[1] 18 | build_dir = Path(project_root, ".build") 19 | 20 | generic_folder_check(Path(project_root, "bin")) 21 | 22 | # Set the build time for '--version' usage 23 | now = time.localtime(time.time()) 24 | FIAT_BUILD_TIME = time.strftime('%Y-%m-%dT%H:%M:%S UTC%z', now) 25 | with open(Path(build_dir, "fiat_build_time.py"), "w") as _w: 26 | _w.write(f'BUILD_TIME = "{FIAT_BUILD_TIME}"') 27 | 28 | # Build event 29 | a = Analysis( 30 | [Path(project_root, "src/fiat/cli/main.py")], 31 | pathex=[Path(build_dir), Path(project_root, "src")], 32 | binaries=[], 33 | datas=[], 34 | hiddenimports=["fiat_build_time", "fiat.methods"], 35 | hookspath=[build_dir.as_posix()], 36 | hooksconfig={}, 37 | runtime_hooks=[Path(build_dir, 'runtime_hooks.py')], 38 | excludes=[], 39 | win_no_prefer_redirects=False, 40 | win_private_assemblies=False, 41 | noarchive=False, 42 | ) 43 | 44 | # Whatever the fuck this precisely does.. 45 | pyz = PYZ( 46 | a.pure, 47 | ) 48 | 49 | # Arguments for the executable 50 | exe = EXE( 51 | pyz, 52 | a.scripts, 53 | [], 54 | icon="NONE", 55 | exclude_binaries=True, 56 | name=app_name, 57 | debug=False, 58 | bootloader_ignore_signals=False, 59 | strip=False, 60 | upx=False, 61 | console=True, 62 | disable_windowed_traceback=False, 63 | argv_emulation=False, 64 | target_arch=None, 65 | codesign_identity=None, 66 | entitlements_file=None, 67 | contents_directory='bin', 68 | ) 69 | 70 | # Collect all binaries and libraries 71 | coll = COLLECT( 72 | exe, 73 | a.binaries, 74 | a.datas, 75 | strip=False, 76 | upx=False, 77 | upx_exclude=[], 78 | name=mode, 79 | ) 80 | -------------------------------------------------------------------------------- /docs/setup_guide/general/docker.qmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: Setup Docker 3 | --- 4 | 5 | A docker file is present within the repository of Delft-FIAT. This docker file can 6 | be found [here](https://github.com/Deltares/Delft-FIAT/blob/master/Dockerfile). 7 | 8 | From this docker file a docker image can be produced. In the creation process of the 9 | docker image, pixi is used. [Pixi](https://pixi.sh/latest/) is a fast(er) alternative of creating and 10 | installing python environments compared to mamba (conda). Creating a docker image can 11 | be done with the following command: 12 | 13 | ::: {.callout-important} 14 | The OS running used in the docker/ container image will be Debian. 15 | ::: 16 | 17 | ::: {.callout-note} 18 | Note: Set the current directory to the directory of the cloned repository. 19 | ::: 20 | 21 | ```bash 22 | docker build -f Dockerfile --build-arg="PIXIENV=default" -t fiat . 23 | ``` 24 | 25 | However, as an alternative, it can be build directly from the docker file in the github 26 | remote repository: 27 | 28 | ```bash 29 | docker build -f https://raw.githubusercontent.com/Deltares/Delft-FIAT/master/Dockerfile --build-arg="PIXIENV=default" -t fiat . 30 | ``` 31 | 32 | ::: {.callout-tip} 33 | The build argument `PIXIENV` is used to define which python enviroment to use. Using the `default` environment is recommended. 34 | An overview of all the environments can be found in the [pixi toml file](https://github.com/Deltares/Delft-FIAT/blob/master/pixi.toml). 35 | ::: 36 | 37 | After setting up the docker image, a docker container can be spawned from this docker 38 | image. The docker container will contain all the necessary components to run 39 | Delft-FIAT. 40 | 41 | To run/ start a docker container from the previously created docker image: 42 | 43 | ::: {.callout-tip} 44 | The `-v` argument sets a mount, i.e. connection from a directory on the host machine to the docker container. 45 | ::: 46 | 47 | ```bash 48 | docker run -it -v $PWD/data:/home/deltares/data --name fiat fiat:latest bash 49 | ``` 50 | 51 | When inside the docker container one can simply call FIAT: 52 | 53 | ```bash 54 | fiat run --help 55 | ``` 56 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Visual Studio 2 | .vs/ 3 | 4 | # C++ compilation 5 | *.manifest 6 | !.build/gui/*.manifest 7 | src/bin/ 8 | bin/ 9 | 10 | # Visual Studio Code 11 | .vscode/ 12 | 13 | # Geospatial temporary files 14 | *.aux.xml 15 | 16 | # Byte-compiled / optimized / DLL files 17 | __pycache__/ 18 | *.py[cod] 19 | *$py.class 20 | 21 | # Local documentation and temporary files (calculation etc.) 22 | .cov/ 23 | .docs/ 24 | tmp/ 25 | whl/ 26 | 27 | # pixi environments 28 | .pixi 29 | *.egg-info 30 | 31 | # C extensions 32 | *.so 33 | 34 | # Pyinstaller files 35 | .build/fiat_build_time.py 36 | 37 | # Distribution / packaging 38 | .Python 39 | build/ 40 | develop-eggs/ 41 | dist/ 42 | downloads/ 43 | eggs/ 44 | .eggs/ 45 | lib/ 46 | lib64/ 47 | parts/ 48 | sdist/ 49 | var/ 50 | wheels/ 51 | share/python-wheels/ 52 | *.egg-info/ 53 | *.whl 54 | .installed.cfg 55 | *.egg 56 | MANIFEST 57 | 58 | # Installer logs 59 | pip-log.txt 60 | pip-delete-this-directory.txt 61 | 62 | # Unit test / coverage reports 63 | htmlcov/ 64 | .tox/ 65 | .nox/ 66 | .coverage 67 | .coverage.* 68 | .cache 69 | nosetests.xml 70 | coverage.xml 71 | *.cover 72 | *.py,cover 73 | .hypothesis/ 74 | .pytest_cache/ 75 | .testdata/* 76 | !.testdata/.gitkeep 77 | !.testdata/create_test_data.py 78 | cover/ 79 | 80 | # Scrapy stuff: 81 | .scrapy 82 | 83 | # PyBuilder 84 | .pybuilder/ 85 | target/ 86 | 87 | # Jupyter Notebook 88 | .ipynb_checkpoints 89 | 90 | # IPython 91 | profile_default/ 92 | ipython_config.py 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 95 | __pypackages__/ 96 | 97 | # Environments 98 | .env 99 | .venv 100 | env/ 101 | venv/ 102 | ENV/ 103 | env.bak/ 104 | venv.bak/ 105 | environment.yml 106 | pixi-hook.rc 107 | **/shell-hook.* 108 | 109 | # Rope project settings 110 | .ropeproject 111 | 112 | # mkdocs documentation 113 | /site 114 | 115 | # pytype static type analyzer 116 | .pytype/ 117 | 118 | # Cython debug symbols 119 | cython_debug/ 120 | 121 | # example output data 122 | docs/examples/example_output 123 | docs/PDF_Documentation_files 124 | -------------------------------------------------------------------------------- /src/fiat/cli/util.py: -------------------------------------------------------------------------------- 1 | """Util for cli.""" 2 | 3 | import cProfile 4 | import pstats 5 | import sys 6 | from pathlib import Path 7 | from typing import Callable 8 | 9 | from fiat.cfg import Configurations 10 | from fiat.log import Logger 11 | 12 | 13 | def file_path_check(path): 14 | """Cli friendly version of path checking.""" 15 | root = Path.cwd() 16 | path = Path(path) 17 | if not path.is_absolute(): 18 | path = Path(root, path) 19 | if not (path.is_file() | path.is_dir()): 20 | raise FileNotFoundError(f"{str(path)} is not a valid path") 21 | return path 22 | 23 | 24 | def run_log( 25 | func: Callable, 26 | logger: Logger, 27 | *args, 28 | ): 29 | """Cli friendly run for/ with logging exceptions.""" 30 | try: 31 | out = func(*args) 32 | except BaseException: 33 | exc_info = sys.exc_info() 34 | msg = ",".join([str(item) for item in exc_info[1].args]) 35 | if exc_info[0] is KeyboardInterrupt: 36 | msg = "KeyboardInterrupt" 37 | logger.error(msg) 38 | exc_info = None 39 | # Exit with code 1 40 | sys.exit(1) 41 | else: 42 | return out 43 | 44 | 45 | def run_profiler( 46 | func: Callable, 47 | profile: str, 48 | cfg: Configurations, 49 | logger: Logger, 50 | ): 51 | """Run the profiler from cli.""" 52 | logger.warning("Running profiler...") 53 | 54 | # Setup the profiler and run the function 55 | profiler = cProfile.Profile() 56 | profiler.enable() 57 | run_log(func, logger=logger) 58 | profiler.disable() 59 | 60 | # Save all the stats 61 | profile_out = cfg.get("output.path") / profile 62 | profiler.dump_stats(profile_out) 63 | logger.info(f"Saved profiling stats to: {profile_out}") 64 | 65 | # Save a human readable portion to a text file 66 | txt_out = cfg.get("output.path") / "profile.txt" 67 | with open(txt_out, "w") as _w: 68 | _w.write(f"Delft-FIAT profile ({cfg.filepath}):\n\n") 69 | stats = pstats.Stats(profiler, stream=_w) 70 | _ = stats.sort_stats("tottime").print_stats() 71 | logger.info(f"Saved profiling stats in human readable format: {txt_out}") 72 | -------------------------------------------------------------------------------- /docs/_static/listing_code_large.html: -------------------------------------------------------------------------------- 1 | 84 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Test Package 3 | 4 | on: 5 | push: 6 | branches: 7 | - master 8 | paths: 9 | - test/* 10 | - src/fiat/* 11 | - .testdata/* 12 | - pyproject.toml 13 | - pixi.lock 14 | - .github/workflows/test.yml 15 | pull_request: 16 | branches: 17 | - master 18 | paths: 19 | - test/* 20 | - src/fiat/* 21 | - .testdata/* 22 | - pyproject.toml 23 | - pixi.lock 24 | - .github/workflows/test.yml 25 | 26 | jobs: 27 | test: 28 | defaults: 29 | run: 30 | shell: bash -l {0} 31 | env: 32 | branch_name: ${{ github.head_ref || github.ref_name }} 33 | timeout-minutes: 15 34 | 35 | strategy: 36 | fail-fast: false 37 | matrix: 38 | os: ["ubuntu-latest", "windows-latest"] 39 | python-version: ["311", "312", "313"] 40 | include: 41 | - os: ubuntu-latest 42 | label: linux-64 43 | - os: windows-latest 44 | label: win-64 45 | 46 | name: ${{ matrix.label }} - py${{ matrix.python-version }} 47 | runs-on: ${{ matrix.os }} 48 | concurrency: 49 | group: ${{ github.workflow }}-${{ matrix.os }}-${{ matrix.python-version }}-${{ github.ref }} 50 | cancel-in-progress: true 51 | 52 | steps: 53 | - name: Check-out source 54 | uses: actions/checkout@v4 55 | 56 | - name: Setup pixi env 57 | uses: prefix-dev/setup-pixi@v0.8.13 58 | with: 59 | pixi-version: "v0.49.0" 60 | environments: test-py${{ matrix.python-version }} 61 | locked: false 62 | cache: true 63 | cache-write: ${{ github.event_name == 'push' && github.ref_name == 'master' }} 64 | 65 | - name: Create testdata 66 | run: | 67 | pixi run -e test-py${{ matrix.python-version }} generate-data 68 | 69 | - name: Test 70 | run: | 71 | pixi run -e test-py${{ matrix.python-version }} test-cov 72 | 73 | - name: Upload coverage to codecov.io 74 | if: matrix.python-version == '313' 75 | uses: codecov/codecov-action@v5 76 | with: 77 | files: ./coverage.xml 78 | token: ${{ secrets.CODECOV_TOKEN }} 79 | verbose: true 80 | -------------------------------------------------------------------------------- /src/fiat/cli/formatter.py: -------------------------------------------------------------------------------- 1 | """Formatter for the cli.""" 2 | 3 | from argparse import PARSER, Action, HelpFormatter, _MutuallyExclusiveGroup 4 | from collections.abc import Iterable 5 | 6 | 7 | class MainHelpFormatter(HelpFormatter): 8 | """Format the help screen over cli.""" 9 | 10 | def __init__(self, *args, **kwargs): 11 | kwargs["max_help_position"] = 40 12 | super().__init__(*args, **kwargs) 13 | 14 | def add_usage( 15 | self, 16 | usage: str | None, 17 | actions: Iterable[Action], 18 | groups: Iterable[_MutuallyExclusiveGroup], 19 | prefix: str | None = None, 20 | ) -> None: 21 | """Add usage string.""" 22 | return super().add_usage(usage, actions, groups, prefix) 23 | 24 | def _format_action_invocation(self, action): 25 | if not action.option_strings: 26 | return super()._format_action_invocation(action) 27 | else: 28 | default = self._get_default_metavar_for_optional(action) 29 | metavar = self._format_args(action, default) 30 | # help_string = self._get_help_string(action) 31 | return ", ".join(action.option_strings) + " " + metavar 32 | 33 | def _format_action(self, action): 34 | parts = super()._format_action(action) 35 | if action.nargs == PARSER: 36 | parts = "\n".join(parts.split("\n")[1:]) 37 | return parts 38 | 39 | def _format_usage(self, usage, actions, groups, prefix): 40 | if prefix is None: 41 | prefix = "Usage: " 42 | 43 | # Program name 44 | usage_args = [self._prog] 45 | 46 | # Add options string if there are actions (options) 47 | if actions: 48 | usage_args.append("") 49 | 50 | # Positional arguments 51 | positionals = [ 52 | action.metavar or action.dest 53 | for action in actions 54 | if action.option_strings == [] 55 | ] 56 | usage_args.extend(positionals) 57 | 58 | # Return the formatted string 59 | return f"{prefix}{' '.join(usage_args)}\n" 60 | 61 | def start_section(self, heading): 62 | """Show start section.""" 63 | heading = heading[0].upper() + heading[1:] 64 | return super().start_section(heading) 65 | -------------------------------------------------------------------------------- /src/fiat/models/util.py: -------------------------------------------------------------------------------- 1 | """The FIAT model workers.""" 2 | 3 | from pathlib import Path 4 | 5 | from osgeo import ogr 6 | 7 | from fiat.cfg import Configurations 8 | from fiat.fio import TableLazy 9 | from fiat.util import NEWLINE_CHAR, generic_path_check, replace_empty 10 | 11 | GEOM_DEFAULT_CHUNK = 50000 12 | GRID_PREFER = { 13 | False: "hazard", 14 | True: "exposure", 15 | } 16 | 17 | 18 | def check_file_for_read( 19 | cfg: Configurations, 20 | entry: str, 21 | path: Path | str, 22 | ): 23 | """Quick check on the input for reading.""" 24 | if path is not None: 25 | path = generic_path_check(path, cfg.path) 26 | else: 27 | path = cfg.get(entry) 28 | return path 29 | 30 | 31 | def exposure_from_geom( 32 | ft: ogr.Feature, 33 | exp: TableLazy, 34 | oid: int, 35 | mid: int, 36 | idxs_haz: list | tuple, 37 | pattern: object, 38 | ): 39 | """Get exposure info from feature.""" 40 | method = ft.GetField(mid) 41 | haz = [ft.GetField(idx) for idx in idxs_haz] 42 | return ft, [ft.GetField(oid)], method, haz 43 | 44 | 45 | def exposure_from_csv( 46 | ft: ogr.Feature, 47 | exp: TableLazy, 48 | oid: int, 49 | mid: int, 50 | idxs_haz: list | tuple, 51 | pattern: object, 52 | ): 53 | """Get exposure info from csv file.""" 54 | ft_info_raw = exp[ft.GetField(oid)] 55 | if ft_info_raw is None: 56 | return None, None, None, None 57 | 58 | ft_info = replace_empty(pattern.split(ft_info_raw)) 59 | ft_info = [x(y) for x, y in zip(exp.dtypes, ft_info)] 60 | method = ft_info[exp._columns["extract_method"]].lower() 61 | haz = [ft_info[idx] for idx in idxs_haz] 62 | return ft_info, ft_info, method, haz 63 | 64 | 65 | EXPOSURE_FIELDS = { 66 | True: exposure_from_geom, 67 | False: exposure_from_csv, 68 | } 69 | 70 | 71 | def csv_def_file( 72 | p: Path | str, 73 | columns: tuple | list, 74 | ): 75 | """_summary_Set up the outgoing csv file. 76 | 77 | Parameters 78 | ---------- 79 | p : Path | str 80 | Path to the file. 81 | columns : tuple | list 82 | Headers to be added to the file. 83 | """ 84 | header = b"" 85 | header += ",".join(columns).encode() 86 | header += NEWLINE_CHAR.encode() 87 | 88 | with open(p, "wb") as _dw: 89 | _dw.write(header) 90 | -------------------------------------------------------------------------------- /src/fiat/methods/ead.py: -------------------------------------------------------------------------------- 1 | """EAD (Expected Annual Damages) related functionality.""" 2 | 3 | import math 4 | 5 | 6 | def calc_ead( 7 | rp_coef: list, 8 | dms: list, 9 | ) -> float: 10 | """Calculate the EAD (risk). 11 | 12 | From a list of return periods and list of corresponding damages. 13 | 14 | Parameters 15 | ---------- 16 | rp_coef : list 17 | List of return period coefficients. 18 | dms : list 19 | List of corresponding damages 20 | (in the same order of the return periods coefficients). 21 | 22 | Returns 23 | ------- 24 | float 25 | The Expected Annual Damage (EAD), or risk, as a log-linear integration over the 26 | return periods. 27 | """ 28 | # Calculate the EAD 29 | ead = sum([x * y for x, y in zip(rp_coef, dms)]) 30 | return ead 31 | 32 | 33 | def risk_density( 34 | rp: list | tuple, 35 | ) -> list: 36 | """Calculate the risk density factors from return periods values. 37 | 38 | Parameters 39 | ---------- 40 | rp : list | tuple 41 | A list of return periods. 42 | 43 | Returns 44 | ------- 45 | list 46 | List of risk density factors. 47 | """ 48 | # Step 1: Compute frequencies associated with T-values. 49 | _rp = sorted(rp) 50 | idxs = [_rp.index(n) for n in rp] 51 | rp_u = sorted(rp) 52 | rp_l = len(rp_u) 53 | 54 | f = [1 / n for n in rp_u] 55 | lf = [math.log(1 / n) for n in rp_u] 56 | 57 | if rp_l == 1: 58 | return f 59 | 60 | # Step 2: 61 | c = [(1 / (lf[idx] - lf[idx + 1])) for idx in range(rp_l - 1)] 62 | 63 | # Step 3: 64 | G = [(f[idx] * lf[idx] - f[idx]) for idx in range(rp_l)] 65 | 66 | # Step 4: 67 | a = [ 68 | ( 69 | (1 + c[idx] * lf[idx + 1]) * (f[idx] - f[idx + 1]) 70 | + c[idx] * (G[idx + 1] - G[idx]) 71 | ) 72 | for idx in range(rp_l - 1) 73 | ] 74 | b = [ 75 | (c[idx] * (G[idx] - G[idx + 1] + lf[idx + 1] * (f[idx + 1] - f[idx]))) 76 | for idx in range(rp_l - 1) 77 | ] 78 | 79 | # Step 5: 80 | alpha = [ 81 | b[0] 82 | if idx == 0 83 | else f[idx] + a[idx - 1] 84 | if idx == rp_l - 1 85 | else a[idx - 1] + b[idx] 86 | for idx in range(rp_l) 87 | ] 88 | 89 | return [alpha[idx] for idx in idxs] 90 | -------------------------------------------------------------------------------- /test/test_job.py: -------------------------------------------------------------------------------- 1 | from multiprocessing import get_context 2 | from typing import Generator 3 | 4 | from fiat.job import execute_pool, generate_jobs 5 | 6 | 7 | def test_generate_jobs_simple(): 8 | # Setup some sinple 9 | jobs = generate_jobs( 10 | {"foo": [1, 2], "bar": [2], "baz": [3, 4]}, 11 | ) 12 | 13 | # Assert typing 14 | assert isinstance(jobs, Generator) 15 | # Make a list from the jobs to check 16 | jobs = list(jobs) 17 | assert len(jobs) == 4 18 | assert jobs[0] == {"foo": 1, "bar": 2, "baz": 3} 19 | assert jobs[3] == {"foo": 2, "bar": 2, "baz": 4} 20 | 21 | jobs = generate_jobs( 22 | {"foo": [1, 2, 3], "bar": [2, 3], "baz": [3, 4, 5]}, 23 | ) 24 | 25 | # Assert the size 26 | jobs = list(jobs) 27 | assert len(jobs) == 18 28 | 29 | 30 | def test_generate_jobs_tied(): 31 | # Tied two together 32 | jobs = generate_jobs( 33 | {"foo": [1, 2, 3], "bar": [2, 3], "baz": [3, 4, 5]}, 34 | tied=["foo", "baz"], 35 | ) 36 | 37 | # Assert the size 38 | jobs = list(jobs) 39 | assert len(jobs) == 6 40 | assert jobs[0] == {"foo": 1, "bar": 2, "baz": 3} 41 | assert jobs[1] == {"foo": 2, "bar": 2, "baz": 4} 42 | 43 | 44 | # Dummy function to test 45 | def multiply(x, y): 46 | return x * y 47 | 48 | 49 | # Testing of the execution 50 | def test_execute_pool_single_thread(): 51 | # Setup the context 52 | ctx = get_context("spawn") 53 | 54 | # Execute the pool 55 | res = execute_pool( 56 | ctx=ctx, 57 | func=multiply, 58 | jobs=generate_jobs({"x": [2], "y": [4]}), 59 | threads=1, 60 | ) 61 | 62 | # Assert the output 63 | assert res == [8] 64 | 65 | # Execute the pool with more than one job 66 | res = execute_pool( 67 | ctx=ctx, 68 | func=multiply, 69 | jobs=generate_jobs({"x": [2, 4], "y": [4, 5]}), 70 | threads=1, 71 | ) 72 | 73 | assert res == [8, 10, 16, 20] 74 | 75 | 76 | def test_execute_pool_multi_thread(): 77 | # Setup the context 78 | ctx = get_context("spawn") 79 | 80 | # Execute the pool with more than one job 81 | res = execute_pool( 82 | ctx=ctx, 83 | func=multiply, 84 | jobs=generate_jobs({"x": [2, 4], "y": [4, 5]}), 85 | threads=2, 86 | ) 87 | 88 | assert res == [8, 10, 16, 20] 89 | -------------------------------------------------------------------------------- /SETUP.md: -------------------------------------------------------------------------------- 1 | # Quick setup guide 2 | 3 | ## How setup/ install fiat 4 | A brief summary will be put here for: 5 | - Just installing it for use 6 | - Development install 7 | - Freeze it as an application 8 | 9 | For each and every one of these installs we solely look at the 10 | commandline user interface or batch/ bash scripts. 11 | 12 | N.b. this guide assumes one has conda/ mamba/ micro3 and git installed 13 | (Who doesnt..) 14 | 15 | ## Just install for use 16 | For just using fiat, either create a new environment: 17 | ```bat 18 | # Create an environment 19 | conda create -n fiat python=3.11.* 20 | # Activate it 21 | conda activate fiat 22 | 23 | #Install the good stuff 24 | pip install git+https://github.com/Deltares/Delft-FIAT.git 25 | ``` 26 | 27 | or install FIAT directly in an existing environment 28 | ```bat 29 | pip install git+https://github.com/Deltares/Delft-FIAT.git 30 | ``` 31 | 32 | ## Development install 33 | This is for those who wish to contribute at this early stage 34 | First, clone the repository 35 | ```bat 36 | # Go to some directory where your repos are located 37 | cd ~/{your path} 38 | # Clone FIAT 39 | git clone https://github.com/Deltares/Delft-FIAT.git fiat 40 | ``` 41 | 42 | Now let's do the python stuff. 43 | Make sure you either have tomli or tomllib (build-in with py 3.11) 44 | in your base enviroment 45 | ```bat 46 | # Go into the FIAT repository directory 47 | cd ~/{your path}/fiat 48 | 49 | # Create the environment file 50 | python make_env.py dev 51 | 52 | # Create conda env 53 | conda env create -f environment.yml 54 | 55 | # Activate and install FIAT 56 | conda activate fiat_dev 57 | pip install -e . 58 | ``` 59 | 60 | There ya go. 61 | 62 | ## Freeze FIAT as an application 63 | This more or less assumes the one went for a development install 64 | If not: do the development install for FIAT first 65 | 66 | Create a yaml for a seperate environment 67 | ```bat 68 | python make_env.py build 69 | ``` 70 | 71 | Again, create the environment with conda 72 | ```bat 73 | conda env create -f environment.yml 74 | ``` 75 | 76 | This time FIAT will be automatically installed with the environment 77 | Now, go to the .build/core directory 78 | ```bat 79 | cd ./.build/core 80 | ``` 81 | 82 | And just execute the pybuild.bat script 83 | ```bat 84 | pybuild.bat 85 | ``` 86 | 87 | That's it. 88 | FIAT will be located in the {root}/bin/core/Release folder. 89 | -------------------------------------------------------------------------------- /docs/_scripts/inv.py: -------------------------------------------------------------------------------- 1 | import click 2 | import sphobjinv as soi 3 | import yaml 4 | from pathlib import Path 5 | from quartodoc import convert_inventory 6 | from quartodoc.interlinks import inventory_from_url 7 | 8 | 9 | @click.group() 10 | def cli(): 11 | pass 12 | 13 | 14 | @click.command( 15 | short_help="Generate inventory files that the Quarto " 16 | "`interlink` extension can use to auto-link to other docs." 17 | ) 18 | @click.argument("config", default="_quarto.yml") 19 | @click.option("--dry-run", is_flag=True, default=False) 20 | @click.option("--fast", is_flag=True, default=False) 21 | def interlinks(config, dry_run, fast): 22 | """ 23 | Generate inventory files that the Quarto `interlink` extension can use to 24 | auto-link to other docs. 25 | 26 | The files are stored in a cache directory, which defaults to _inv. 27 | The Quarto extension `interlinks` will look for these files in the cache 28 | and add links to your docs accordingly. 29 | """ 30 | 31 | # config loading ---- 32 | cfg = yaml.safe_load(open(config)) 33 | interlinks = cfg.get("interlinks", {}) 34 | 35 | p_root = Path(config).parent 36 | 37 | if not interlinks: 38 | print("No interlinks field found in your quarto config. Quitting.") 39 | return 40 | 41 | # interlinks config settings ---- 42 | cache = p_root / "_inv" 43 | cfg_fast = interlinks.get("fast", False) 44 | 45 | fast = cfg_fast or fast 46 | 47 | for k, v in interlinks["sources"].items(): 48 | # don't include user's own docs (users don't need to specify their own docs in 49 | # the interlinks config anymore, so this is for backwards compat). 50 | if v["url"] == "/": 51 | continue 52 | 53 | url = v["url"] + v.get("inv", "objects.inv") 54 | 55 | inv = inventory_from_url(url) 56 | 57 | p_dst = cache / f"{k}_objects" 58 | p_dst.parent.mkdir(exist_ok=True, parents=True) 59 | 60 | if fast: 61 | # use sphobjinv to dump inv in txt format 62 | df = inv.data_file() 63 | soi.writebytes(p_dst.with_suffix(".txt"), df) 64 | 65 | else: 66 | # old behavior of converting to custom json format 67 | convert_inventory(inv, p_dst.with_suffix(".json")) 68 | 69 | 70 | cli.add_command(interlinks) 71 | 72 | 73 | if __name__ == "__main__": 74 | cli() 75 | -------------------------------------------------------------------------------- /res/logo.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 19 | Delft-FIAT Logo 21 | 41 | 43 | 47 | 53 | 54 | 56 | 57 | 59 | Delft-FIAT Logo 60 | 2024-01-22 61 | 62 | 63 | B.W. Dalmijn 64 | 65 | 66 | 67 | 68 | Deltares 69 | 70 | 71 | 72 | 73 | 74 | 75 | -------------------------------------------------------------------------------- /src/fiat/gis/util.py: -------------------------------------------------------------------------------- 1 | """Util of GIS module.""" 2 | 3 | from math import floor 4 | 5 | 6 | def world2pixel( 7 | gtf: tuple, 8 | x: float | int, 9 | y: float | int, 10 | ): 11 | """Calculate the pixel location based on coordinates. 12 | 13 | (Thanks to the [ogr cookbook]\ 14 | (https://pcjericks.github.io/py-gdalogr-cookbook/index.html)!) 15 | 16 | Parameters 17 | ---------- 18 | gtf : tuple 19 | The geotransform of a grid dataset. 20 | Has the following shape: (left, xres, xrot, upper, yrot, yres). 21 | x : float | int 22 | The x coordinates of a point 23 | y : float | int 24 | The y coordinates of a point 25 | 26 | Returns 27 | ------- 28 | tuple 29 | Row and column indices. 30 | 31 | Examples 32 | -------- 33 | ```Python 34 | # Load a dataset 35 | gs = fiat.fio.GridSource() 36 | # Get the geotransform 37 | gtf = gs.geotransform 38 | # Calculate the indices 39 | row, col = world2pixel(gtf, , ) 40 | ``` 41 | """ 42 | ulX = gtf[0] 43 | ulY = gtf[3] 44 | xDist = gtf[1] 45 | yDist = gtf[5] 46 | coorX = floor((x - ulX) / xDist) 47 | coorY = floor((y - ulY) / yDist) 48 | return (coorX, coorY) 49 | 50 | 51 | def pixel2world( 52 | gtf: tuple, 53 | x: int, 54 | y: int, 55 | ): 56 | """Calculate coordinates based on pixel location. 57 | 58 | (Thanks to the [ogr cookbook]\ 59 | (https://pcjericks.github.io/py-gdalogr-cookbook/index.html)!) 60 | 61 | Parameters 62 | ---------- 63 | gtf : tuple 64 | The geotransform of a grid dataset. 65 | Has the following shape: (left, xres, xrot, upper, yrot, yres). 66 | x : int 67 | Column number of the pixel 68 | y : int 69 | Row number of the pixel 70 | 71 | Returns 72 | ------- 73 | tuple 74 | Return the x, y coordinates of the upper left corner of the cell. 75 | 76 | Examples 77 | -------- 78 | ```Python 79 | # Load a dataset 80 | gs = fiat.fio.GridSource() 81 | # Get the geotransform 82 | gtf = gs.geotransform 83 | # Calculate the coordinates 84 | x, y = pixel2world(gtf, , ) 85 | ``` 86 | """ 87 | ulX = gtf[0] 88 | ulY = gtf[3] 89 | xDist = gtf[1] 90 | yDist = gtf[5] 91 | coorX = ulX + (x * xDist) 92 | coorY = ulY + (y * yDist) 93 | return (coorX, coorY) 94 | -------------------------------------------------------------------------------- /docs/user_guide/data/hazard.qmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Hazard data" 3 | --- 4 | FIAT computes the impact of a flood event or the risk of flooding on a community by using flood maps as hazard input. Flood maps describe the potential flood extent and magnitude in specific geographic areas. Currently, FIAT is only 5 | 6 | These flood maps can be obtained from various sources. The [The Global Flood Database](http://global-flood-database.cloudtostreet.info/) provides an extensive amount of global flood hazard mapping products. 7 | 8 | ::: {.callout-tip} 9 | The user can also create their own flood model using the [**SFINCS model**](https://sfincs.readthedocs.io/en/latest/index.html) 10 | ::: 11 | 12 | The user is free to run the model with a variety of flood scenarios, e.g., including mitigation and adaptation measures over a range of future climate conditions, as long as flood maps are available for those scenarios. The flood maps can be either **flood elevation** or **flood depth** maps. 13 | 14 | 15 | ::: {.callout-caution} 16 | **Flood elevation maps** use the **datum** as a reference point, whereas **flood depth maps** use the **ground elevation** to refer to. The datum of the elevation map must be the same as the one used to obtain the ground elevation in the exposure data. 17 | ::: 18 | 19 | With FIAT, the user has the option to assess the impact of a single flood event or the flood risk based on a set of events with multiple return periods. To do the latter, the user must provide multiple hazard maps with the same extent and projection. 20 | 21 | In each case, some requirements must be met. The hazard files of any map should be made available as a **raster file**, see the acceptable file types at the bottom of this page. It is recommended to place the hazard files in the 'hazard' folder, see the [folder structure](index.qmd#folder-structure). The file name is free to be chosen by the user and must simply be refered to in the [settings.toml](../settings/index.qmd) configuration file. 22 | 23 | ## Types of maps 24 | FIAT knows two types of hazard maps: 25 | 26 | - [Event maps](hazard_maps.qmd#event) 27 | - [Risk maps](hazard_maps.qmd#risk) 28 | 29 | Risk maps are mainly used to calculate the EAD (Estimated Annual Damages). 30 | If `risk` is not set to `true` in the [settings file](../settings/index.qmd), the supplied map will be treated as an event map. 31 | 32 | ## Supported formats 33 | An overview of supported formats is presented [here](supported.qmd#gridded-data-files) 34 | -------------------------------------------------------------------------------- /test/test_struct.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import math 3 | import pickle 4 | 5 | 6 | def test_geomsource(geom_data): 7 | # Do Attribute checks 8 | assert geom_data.size == 4 9 | 10 | bounds = geom_data.bounds 11 | bounds = [round(item * 10000) for item in bounds] 12 | assert bounds == [43550, 44395, 519605, 520450] 13 | 14 | assert geom_data.fields == ["object_id", "object_name"] 15 | 16 | srs = geom_data.srs 17 | assert srs.GetAuthorityCode(None) == "4326" 18 | 19 | # Stucture should be able to be pickled 20 | reduced = pickle.dumps(geom_data) 21 | # Rebuild it 22 | rebuild = pickle.loads(reduced) 23 | assert rebuild.size == 4 24 | 25 | 26 | def test_gridsource(grid_event_data): 27 | # Do attribute checks 28 | assert grid_event_data.size == 1 29 | 30 | bounds = grid_event_data.bounds 31 | bounds = [math.ceil(item * 10000) for item in bounds] 32 | assert bounds == [43500, 44500, 519500, 520500] 33 | 34 | assert grid_event_data.chunk == (10, 10) 35 | assert grid_event_data.shape == (10, 10) 36 | 37 | srs = grid_event_data.srs 38 | assert srs.GetAuthorityCode(None) == "4326" 39 | 40 | # Stucture should be able to be pickled 41 | reduced = pickle.dumps(grid_event_data) 42 | # Rebuild it 43 | rebuild = pickle.loads(reduced) 44 | assert rebuild.shape == (10, 10) 45 | 46 | 47 | def test_tabel(vul_data, vul_data_win): 48 | tb = copy.deepcopy(vul_data) 49 | assert tb.nchar == b"\n" 50 | assert len(tb.columns) == 3 51 | assert len(tb.index) == 21 52 | assert int(tb[9, "struct_2"] * 100) == 74 53 | max_idx = max(tb.index) 54 | assert max_idx == 20 55 | 56 | # Assert the same for the windows variant 57 | assert vul_data_win.nchar == b"\r\n" 58 | assert len(vul_data_win.columns) == 3 59 | assert len(vul_data_win.index) == 21 60 | assert int(vul_data_win[9, "struct_2"] * 100) == 74 61 | max_idx = max(vul_data_win.index) 62 | assert max_idx == 20 63 | 64 | # interpolate to refine the scale 65 | tb.upscale(0.01, inplace=True) 66 | assert len(tb.columns) == 3 67 | assert len(tb) == 2001 68 | assert int(tb[9, "struct_2"] * 100) == 74 69 | assert int(tb[8.99, "struct_2"] * 10000) == 7389 70 | 71 | # Stucture should be able to be pickled 72 | reduced = pickle.dumps(tb) 73 | # Rebuild it 74 | rebuild = pickle.loads(reduced) 75 | assert int(rebuild[8.99, "struct_2"] * 10000) == 7389 76 | -------------------------------------------------------------------------------- /test/test_cli.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | from pathlib import Path 3 | 4 | 5 | def test_parse_run(mocker, cli_parser): 6 | parser = cli_parser 7 | mocker.patch("fiat.cli.main.argparse.ArgumentParser.exit") 8 | args = parser.parse_args(args=["run"]) 9 | assert args.command == "run" 10 | assert args.threads is None 11 | assert args.quiet == 0 12 | assert args.verbose == 0 13 | 14 | args = parser.parse_args(args=["run", "-t", "4"]) 15 | assert args.threads == 4 16 | 17 | args = parser.parse_args(args=["run", "-qq", "-v"]) 18 | assert args.quiet == 2 19 | assert args.verbose == 1 20 | 21 | args = parser.parse_args( 22 | args=["run", "-d", "output.path=other", "-d", "some_var=some_value"] 23 | ) 24 | assert isinstance(args.set_entry, dict) 25 | assert len(args.set_entry) == 2 26 | assert "output.path" in args.set_entry 27 | 28 | 29 | def test_cli_main(): 30 | p = subprocess.run(["fiat"], check=True, capture_output=True, text=True) 31 | assert p.returncode == 0 32 | assert p.stdout.split("\n")[0].startswith("Usage:") 33 | 34 | 35 | def test_cli_info(): 36 | p = subprocess.run(["fiat", "info"], check=True, capture_output=True, text=True) 37 | assert p.returncode == 0 38 | assert p.stdout.split("\n")[-2].endswith("MIT license.") 39 | 40 | 41 | def test_cli_run(): 42 | p = subprocess.run( 43 | ["fiat", "run", "--help"], check=True, capture_output=True, text=True 44 | ) 45 | assert p.returncode == 0 46 | assert p.stdout.split("\n")[0].startswith("Usage:") 47 | 48 | 49 | def test_cli_run_exec(tmp_path, geom_tmp_model): 50 | p = subprocess.run( 51 | ["fiat", "run", "-d", "output.path=output", geom_tmp_model], 52 | check=True, 53 | capture_output=True, 54 | text=True, 55 | ) 56 | assert p.returncode == 0 57 | assert p.stdout.split("\n")[-2].endswith("Geom calculation are done!") 58 | assert Path(tmp_path, "output", "spatial.gpkg").is_file() 59 | 60 | 61 | def test_cli_run_profile(tmp_path, geom_tmp_model): 62 | p = subprocess.run( 63 | ["fiat", "run", "-p", "-d", "output.path=output", geom_tmp_model], 64 | check=True, 65 | capture_output=True, 66 | text=True, 67 | ) 68 | assert p.returncode == 0 69 | assert p.stdout.split("\n")[-4].endswith("Geom calculation are done!") 70 | assert p.stdout.split("\n")[-2].endswith("profile.txt") 71 | assert Path(tmp_path, "output", "spatial.gpkg").is_file() 72 | assert Path(tmp_path, "output", "profile.txt").is_file() 73 | -------------------------------------------------------------------------------- /.build/linux64.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | # Absolute path to this script, e.g. /home/user/bin/foo.sh 3 | SCRIPT=$(readlink -f "$0") 4 | SCRIPTPATH=$(dirname "$SCRIPT") 5 | PROJECTPATH=$(dirname "$SCRIPTPATH") 6 | PIXIPATH=$PROJECTPATH/.pixi 7 | 8 | bin_var=pixi 9 | shell_var=bash 10 | 11 | # Help message 12 | function help_message { 13 | echo "Building FIAT on linux systems." 14 | echo "Usage: $0 [-b value] [-h | --help]" 15 | echo "" 16 | echo "Options:" 17 | echo $'\t'"-b"$'\t'"Binary name for python environment creation (default: $bin_var)" 18 | echo $'\t'"-s"$'\t'"Shell type (default: $shell_var)" 19 | echo $'\t'"-h,"$'\t'"Display this help message"$'\n\t'"--help" 20 | } 21 | 22 | # Parsing the cli input 23 | while [[ "$1" != "" ]]; do 24 | case $1 in 25 | -b ) shift 26 | bin_var=$1 27 | ;; 28 | -s ) shift 29 | shell_var=$1 30 | ;; 31 | -h | --help ) help_message 32 | exit 0 33 | ;; 34 | * ) echo "Invalid option: $1" 35 | help_message 36 | exit 1 37 | esac 38 | shift 39 | done 40 | 41 | # Valid bin values 42 | valid_values=("conda" "pixi") 43 | 44 | # Check if value for binary is valid 45 | is_valid=false 46 | for value in "${valid_values[@]}"; do 47 | if [[ "$bin_var" == "$value" ]]; then 48 | is_valid=true 49 | break 50 | fi 51 | done 52 | 53 | if [ $is_valid == false ]; then 54 | echo "Not a valid python env system: $bin_var" 55 | exit 1 56 | fi 57 | 58 | # Setting up.. 59 | echo "INFO: Locating $bin_var" 60 | paths=$(which -a $bin_var) 61 | executable=$(echo "$paths" | grep "^$HOME" | head -n 1) 62 | 63 | if [ -z "$executable" ] && [ $bin_var != "conda" ]; then 64 | echo "Cannot find binary for: $bin_var" 65 | exit 1 66 | elif [ -z "$executable" ]; then 67 | executable="/home/runner/miniconda3/condabin/conda" 68 | if [ ! -e $executable ]; then 69 | echo "Cannot find binary for: $bin_var" 70 | exit 1 71 | fi 72 | fi 73 | 74 | echo "INFO: Executable found here: $executable" 75 | 76 | bin_dir=$(dirname $(dirname $executable)) 77 | 78 | if [ $bin_var == "conda" ]; then 79 | source $bin_dir/etc/profile.d/conda.sh 80 | conda activate fiat_build 81 | export PROJ_LIB=$bin_dir/envs/fiat_build/share/proj 82 | elif [ $bin_var == "pixi" ]; then 83 | pixi shell-hook --manifest-path $PROJECTPATH/pyproject.toml -s $shell_var -e build-linux > $PROJECTPATH/pixi-hook.rc 84 | source $PROJECTPATH/pixi-hook.rc 85 | export PROJ_LIB=$PIXIPATH/envs/build/share/proj 86 | fi 87 | 88 | # Do the thing! 89 | echo "INFO: Building binary.." 90 | pip install -e "$SCRIPTPATH/.." 91 | pyinstaller "$SCRIPTPATH/build.spec" --distpath $SCRIPTPATH/../bin --workpath $SCRIPTPATH/../bin/intermediates 92 | -------------------------------------------------------------------------------- /.github/workflows/docs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Build Documentation 3 | 4 | on: 5 | push: 6 | branches: 7 | - master 8 | tags: 9 | - v* 10 | paths: 11 | - docs/* 12 | - src/fiat/* 13 | - .testdata/* 14 | - pyproject.toml 15 | - .github/workflows/docs.yml 16 | pull_request: 17 | branches: 18 | - master 19 | paths: 20 | - docs/* 21 | - src/fiat/* 22 | - .testdata/* 23 | - pyproject.toml 24 | - .github/workflows/docs.yml 25 | 26 | jobs: 27 | docs: 28 | defaults: 29 | run: 30 | shell: bash -l {0} 31 | 32 | strategy: 33 | fail-fast: false 34 | matrix: 35 | os: ["ubuntu-latest"] 36 | include: 37 | - os: ubuntu-latest 38 | label: linux-64 39 | 40 | name: ${{ matrix.label }} 41 | runs-on: ${{ matrix.os }} 42 | 43 | steps: 44 | - name: Checkout code 45 | uses: actions/checkout@v4 46 | 47 | - name: Setup pixi env 48 | uses: prefix-dev/setup-pixi@v0.8.13 49 | with: 50 | pixi-version: "v0.49.0" 51 | environments: docs 52 | locked: false 53 | cache: true 54 | cache-write: ${{ github.event_name == 'push' && github.ref_name == 'master' }} 55 | 56 | - name: Install quarto extensions 57 | run: | 58 | sudo apt install -y expect 59 | pixi shell-hook --manifest-path pyproject.toml -s bash -e docs > pixi-hook.rc 60 | source pixi-hook.rc 61 | cd docs 62 | chmod u+x _scripts/add_packages.sh 63 | _scripts/add_packages.sh 64 | 65 | - name: Setup testdata 66 | run: | 67 | pixi run -e docs generate-data 68 | 69 | - name: Generate docs 70 | run: | 71 | pixi run -e docs docs 72 | echo "DOC_VERSION=$(pixi run -e docs python -c 'from fiat import __version__ as v; print("dev" if "dev" in v else "v"+v.replace(".dev",""))')" >> $GITHUB_ENV 73 | 74 | - name: Upload to GitHub Pages 75 | if: ${{ github.event_name != 'pull_request' && !github.event.act }} 76 | uses: peaceiris/actions-gh-pages@v4.0.0 77 | with: 78 | github_token: ${{ secrets.GITHUB_TOKEN }} 79 | publish_dir: ./docs/_site 80 | destination_dir: ./${{ env.DOC_VERSION }} 81 | keep_files: false 82 | full_commit_message: Deploy ${{ env.DOC_VERSION }} to GitHub Pages 83 | 84 | - name: Trigger Dispatch 85 | if: ${{ github.event_name != 'pull_request' && !github.event.act }} 86 | uses: peter-evans/repository-dispatch@v3 87 | with: 88 | token: ${{ secrets.GITHUB_TOKEN }} 89 | repository: ${{ github.repository }} 90 | event-type: docs-updated 91 | -------------------------------------------------------------------------------- /src/fiat/job.py: -------------------------------------------------------------------------------- 1 | """Creating run jobs in fiat.""" 2 | 3 | from concurrent.futures import ProcessPoolExecutor, wait 4 | from itertools import product 5 | from multiprocessing.context import SpawnContext 6 | from typing import Callable, Generator 7 | 8 | from fiat.log import spawn_logger 9 | 10 | logger = spawn_logger("fiat.job") 11 | 12 | 13 | def generate_jobs( 14 | d: dict, 15 | tied: tuple | list = None, 16 | ) -> dict: # type: ignore 17 | """Generate jobs. 18 | 19 | Parameters 20 | ---------- 21 | d : dict 22 | Dictionary of elements, either containing single values or iterables. 23 | tied : tuple | list, optional 24 | Values in the dictionary that depend on each other. 25 | 26 | Returns 27 | ------- 28 | dict 29 | Dictionary containing the job. 30 | """ 31 | arg_list = [] 32 | single_var = None 33 | if tied is not None: 34 | single_var = "_".join(tied) 35 | d[single_var] = list(zip(*[d[var] for var in tied])) 36 | for var in tied: 37 | del d[var] 38 | for arg in d.values(): 39 | if not isinstance(arg, (tuple, list, range, zip)): 40 | arg = [ 41 | arg, 42 | ] 43 | arg_list.append(arg) 44 | for element in product(*arg_list): 45 | kwargs = dict(zip(d.keys(), element)) 46 | if single_var is not None: 47 | values = kwargs[single_var] 48 | for var, value in zip(tied, values): 49 | kwargs[var] = value 50 | del kwargs[single_var] 51 | yield kwargs 52 | 53 | 54 | def execute_pool( 55 | ctx: SpawnContext, 56 | func: Callable, 57 | jobs: Generator, 58 | threads: int, 59 | ): 60 | """Execute a python process pool. 61 | 62 | Parameters 63 | ---------- 64 | ctx : SpawnContext 65 | Context of the current process. 66 | func : Callable 67 | To be executed function. 68 | jobs : Generator 69 | A job generator. Returns single dictionaries. 70 | threads : int 71 | Number of threads. 72 | """ 73 | # If there is only one thread needed, execute in the main process 74 | res = [] 75 | if threads == 1: 76 | for job in jobs: 77 | r = func(**job) 78 | res.append(r) 79 | return res 80 | 81 | # If there are more threads needed however 82 | processes = [] 83 | # Setup the multiprocessing pool 84 | pool = ProcessPoolExecutor( 85 | max_workers=threads, 86 | mp_context=ctx, 87 | ) 88 | 89 | # Go through all the jobs 90 | for job in jobs: 91 | pr = pool.submit( 92 | func, 93 | **job, 94 | ) 95 | processes.append(pr) 96 | 97 | # wait for all jobs to conclude 98 | wait(processes) 99 | 100 | # Ask for the result to see if everything went well 101 | for pr in processes: 102 | r = pr.result() 103 | res.append(r) 104 | 105 | pool.shutdown(wait=False) 106 | 107 | return res 108 | -------------------------------------------------------------------------------- /.build/fiat_installer.iss: -------------------------------------------------------------------------------- 1 | ; Script generated by the Inno Script Studio Wizard. 2 | ; SEE THE DOCUMENTATION FOR DETAILS ON CREATING INNO SETUP SCRIPT FILES! 3 | 4 | #define MyAppName "Delft-FIAT" 5 | #define MyAppVersion "0.1.0.dev" 6 | #define MyAppPublisher "Deltares" 7 | #define MyAppExeName "fiat.exe" 8 | #define PathToSelf ExtractFileDir(SourcePath) 9 | 10 | [Setup] 11 | ; NOTE: The value of AppId uniquely identifies this application. 12 | ; Do not use the same AppId value in installers for other applications. 13 | ; (To generate a new GUID, click Tools | Generate GUID inside the IDE.) 14 | AppId={{7333C41A-2EBF-478B-95DD-0A437BD1F7FC}} 15 | AppName={#MyAppName} 16 | AppVersion={#MyAppVersion} 17 | ;AppVerName={#MyAppName} {#MyAppVersion} 18 | AppPublisher={#MyAppPublisher} 19 | DefaultDirName={commonpf}\{#MyAppPublisher}\{#MyAppName}\{#MyAppVersion} 20 | DefaultGroupName={#MyAppName} 21 | AllowNoIcons=yes 22 | OutputDir="{#PathToSelf}\..\bin" 23 | OutputBaseFilename={#MyAppName}-{#MyAppVersion} 24 | Compression=lzma 25 | SolidCompression=yes 26 | LicenseFile="{#PathToSelf}\..\res\fiat_agree.rtf" 27 | ArchitecturesInstallIn64BitMode=x64 28 | 29 | [Code] 30 | var PrintButton: TButton; 31 | 32 | procedure PrintButtonClick(Sender: TObject); 33 | var ResultCode: Integer; 34 | begin 35 | ExtractTemporaryFile('terms_of_agreement.rtf'); 36 | if not ShellExec('', ExpandConstant('{tmp}\\terms_of_agreement.rtf'), '', '', SW_SHOW, ewNoWait, ResultCode) then 37 | MsgBox('Could not open the license file.', mbError, MB_OK); 38 | end; 39 | 40 | procedure InitializeWizard; 41 | begin 42 | PrintButton := TButton.Create(WizardForm); 43 | PrintButton.Caption := '&Print...'; 44 | PrintButton.Anchors := [akLeft, akBottom]; 45 | PrintButton.Left := WizardForm.OuterNotebook.Left + WizardForm.InnerNotebook.Left; 46 | PrintButton.Top := WizardForm.NextButton.Top; 47 | PrintButton.Width := WizardForm.NextButton.Width; 48 | PrintButton.Height := WizardForm.NextButton.Height; 49 | PrintButton.OnClick := @PrintButtonClick; 50 | PrintButton.Parent := WizardForm.NextButton.Parent; 51 | end; 52 | 53 | procedure CurPageChanged(CurPage: Integer); 54 | begin 55 | PrintButton.Visible := CurPage = wpLicense; 56 | end; 57 | 58 | [Languages] 59 | Name: "english"; MessagesFile: "compiler:Default.isl" 60 | Name: "dutch"; MessagesFile: "compiler:Languages\Dutch.isl" 61 | 62 | [Tasks] 63 | Name: "desktopicon"; Description: "{cm:CreateDesktopIcon}"; GroupDescription: "{cm:AdditionalIcons}"; Flags: unchecked 64 | 65 | [Files] 66 | Source: "{#PathToSelf}\..\bin\Release\fiat.exe"; DestDir: "{app}"; Flags: ignoreversion 67 | Source: "{#PathToSelf}\..\bin\Release\*"; DestDir: "{app}"; Flags: ignoreversion recursesubdirs createallsubdirs 68 | Source: "{#PathToSelf}\..\res\terms_of_agreement.rtf"; Flags: dontcopy 69 | ; NOTE: Don't use "Flags: ignoreversion" on any shared system files 70 | 71 | [Icons] 72 | Name: "{group}\{#MyAppName}"; Filename: "{app}\{#MyAppExeName}" 73 | Name: "{commondesktop}\{#MyAppName}"; Filename: "{app}\{#MyAppExeName}"; Tasks: desktopicon 74 | 75 | [Run] 76 | Filename: "{app}\{#MyAppExeName}"; Description: "{cm:LaunchProgram,{#StringChange(MyAppName, '&', '&&')}}"; Flags: nowait postinstall skipifsilent 77 | -------------------------------------------------------------------------------- /docs/.archive/switcher.js: -------------------------------------------------------------------------------- 1 | function checkPathExists(url) { 2 | return new Promise((resolve, reject) => { 3 | var xhr = new XMLHttpRequest(); 4 | xhr.open('HEAD', url, true); 5 | xhr.onreadystatechange = function() { 6 | if (xhr.readyState === 4) { 7 | if (xhr.status === 200) { 8 | resolve(true); 9 | } else if (xhr.status === 404) { 10 | resolve(false); 11 | } else { 12 | reject(new Error(xhr.statusText)); 13 | } 14 | } 15 | }; 16 | xhr.onerror = function() { 17 | reject(new Error('Network Error')); 18 | }; 19 | xhr.send(); 20 | }); 21 | } 22 | 23 | document.addEventListener('DOMContentLoaded', function() { 24 | // Get the specific dropdown menu by its ID 25 | var dropdownMenu = document.querySelector('#nav-menu-version').nextElementSibling; 26 | 27 | // Get all dropdown items within the specific dropdown menu 28 | var dropdownItems = dropdownMenu.querySelectorAll('.dropdown-item'); 29 | 30 | // Get the current page's path 31 | var currentPagePath = window.location.pathname.split('/'); 32 | console.log('current page path', currentPagePath); 33 | 34 | // Loop through each dropdown item 35 | for (var i = 0; i < dropdownItems.length; i++) { 36 | // Add click event listener to each item 37 | dropdownItems[i].addEventListener('click', function(event) { 38 | // Prevent default action 39 | event.preventDefault(); 40 | 41 | // Get the clicked item's text 42 | var itemText = this.querySelector('.dropdown-text').textContent; 43 | var itemHref = this.getAttribute('href') 44 | 45 | // Loop through each dropdown item again to find a match in the current page's path 46 | for (var j = 0; j < dropdownItems.length; j++) { 47 | // Get the dropdown item's text 48 | var dropdownText = dropdownItems[j].querySelector('.dropdown-text').textContent; 49 | console.log('Dropdown item:', dropdownText); 50 | 51 | // Find the index of the dropdownText in the current page's path 52 | var index = currentPagePath.indexOf(dropdownText); 53 | 54 | // If the dropdownText is found in the current page's path 55 | if (index !== -1) { 56 | // Construct the new URL relative to the dropdownText and append the itemText 57 | addElements = currentPagePath.slice(index + 1, ) 58 | relativePath = '../'.repeat(addElements.length) 59 | var newUrl = relativePath + itemText + '/' + addElements.join('/') 60 | console.log('Clicked item:', newUrl); 61 | 62 | // Redirect to the new URL 63 | checkPathExists(newUrl) 64 | .then(exists => { 65 | if (exists) { 66 | window.location.href = newUrl; 67 | } else { 68 | console.log('Path does not exist'); 69 | } 70 | }) 71 | 72 | // Exit the loop 73 | break; 74 | } 75 | } 76 | }); 77 | } 78 | }); 79 | -------------------------------------------------------------------------------- /docs/examples/single_event.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "e31ac578", 6 | "metadata": {}, 7 | "source": [ 8 | "# Single event\n", 9 | "This is a an example of running a tiny single event FIAT model.\n", 10 | "\n", 11 | "First of all, let's make sure that there is some actual testdata." 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": null, 17 | "id": "109cd4e5", 18 | "metadata": {}, 19 | "outputs": [], 20 | "source": [ 21 | "! python ../../.testdata/create_test_data.py" 22 | ] 23 | }, 24 | { 25 | "cell_type": "markdown", 26 | "id": "86356bba", 27 | "metadata": {}, 28 | "source": [ 29 | "Now that we have some testdata, let's take a look at the settings file." 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": null, 35 | "id": "93a9fde1", 36 | "metadata": {}, 37 | "outputs": [], 38 | "source": [ 39 | "with open(\"../../.testdata/geom_event.toml\", \"r\") as r:\n", 40 | " settings = r.read()\n", 41 | "print(settings)" 42 | ] 43 | }, 44 | { 45 | "cell_type": "markdown", 46 | "id": "a64a1894", 47 | "metadata": {}, 48 | "source": [ 49 | "Now let's execute the model!" 50 | ] 51 | }, 52 | { 53 | "cell_type": "code", 54 | "execution_count": null, 55 | "id": "88969f68", 56 | "metadata": {}, 57 | "outputs": [], 58 | "source": [ 59 | "! fiat run ../../.testdata/geom_event.toml" 60 | ] 61 | }, 62 | { 63 | "cell_type": "markdown", 64 | "id": "46d2f0ba", 65 | "metadata": {}, 66 | "source": [ 67 | "Let's take a quick look at the output." 68 | ] 69 | }, 70 | { 71 | "cell_type": "code", 72 | "execution_count": null, 73 | "id": "6ef31c41", 74 | "metadata": {}, 75 | "outputs": [], 76 | "source": [ 77 | "# Import a method\n", 78 | "from fiat.fio import open_csv\n", 79 | "from pathlib import Path\n", 80 | "\n", 81 | "# check the output\n", 82 | "out = open_csv(Path(\"../../.testdata/output/geom_event\", \"output.csv\"), index=\"object_id\")\n", 83 | "print(out.columns)" 84 | ] 85 | }, 86 | { 87 | "cell_type": "markdown", 88 | "id": "c79e1522", 89 | "metadata": {}, 90 | "source": [ 91 | "Assert that the output is what we would expect." 92 | ] 93 | }, 94 | { 95 | "cell_type": "code", 96 | "execution_count": null, 97 | "id": "b69e8004", 98 | "metadata": {}, 99 | "outputs": [], 100 | "source": [ 101 | "assert float(out[2, \"total_damage\"]) == 740\n", 102 | "assert float(out[3, \"total_damage\"]) == 1038" 103 | ] 104 | } 105 | ], 106 | "metadata": { 107 | "kernelspec": { 108 | "display_name": "fiat_dev", 109 | "language": "python", 110 | "name": "python3" 111 | }, 112 | "language_info": { 113 | "codemirror_mode": { 114 | "name": "ipython", 115 | "version": 3 116 | }, 117 | "file_extension": ".py", 118 | "mimetype": "text/x-python", 119 | "name": "python", 120 | "nbconvert_exporter": "python", 121 | "pygments_lexer": "ipython3", 122 | "version": "3.13.1" 123 | } 124 | }, 125 | "nbformat": 4, 126 | "nbformat_minor": 5 127 | } 128 | -------------------------------------------------------------------------------- /docs/user_guide/data/supported.qmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: Supported formats 3 | --- 4 | 5 | ## Settings file 6 | For the settings file, only one format is supported: 7 | 8 | - [toml](https://toml.io/en/) 9 | 10 | ## Comma seperated values (CSV) 11 | Your run of the mill csv files. 12 | As long as one provides the delimiter in the settings file, anything goes. 13 | For example: 14 | 15 | - comma 16 | - semicolon 17 | - tab 18 | 19 | The amount of columns are determined by length of the header or the first line of data. 20 | 21 | ## Geometry files 22 | Below a list is presented of supported drivers in regards to the vector files: 23 | 24 | ### Reading 25 | :::: {.columns} 26 | ::: {.center width="70%" style="height: 500px; overflow: auto;"} 27 | ```{python} 28 | #| echo: false 29 | #| label: tbl-geom-read-drivers 30 | #| tbl-cap: "Available drivers for reading vector data" 31 | 32 | import pandas as pd 33 | from fiat.util import GEOM_READ_DRIVER_MAP 34 | from IPython.display import HTML 35 | from osgeo import gdal 36 | 37 | dr_map = dict(sorted(GEOM_READ_DRIVER_MAP.items())) 38 | 39 | long_name = [] 40 | for _dr in dr_map.values(): 41 | dr = gdal.GetDriverByName(_dr) 42 | long_name.append(dr.LongName) 43 | dr = None 44 | 45 | df = pd.DataFrame( 46 | data={ 47 | "File extension": dr_map.keys(), 48 | "Driver": dr_map.values(), 49 | "Long name": long_name, 50 | } 51 | ) 52 | 53 | HTML(df.to_html(index=False)) 54 | ``` 55 | ::: 56 | :::: 57 | 58 | ### Writing 59 | :::: {.columns} 60 | ::: {.center width="70%" style="height: 500px; overflow: auto;"} 61 | ```{python} 62 | #| echo: false 63 | #| label: tbl-geom-write-drivers 64 | #| tbl-cap: "Available drivers for writing vector data" 65 | 66 | import pandas as pd 67 | from fiat.util import GEOM_WRITE_DRIVER_MAP 68 | from IPython.display import HTML 69 | from osgeo import gdal 70 | 71 | dr_map = dict(sorted(GEOM_WRITE_DRIVER_MAP.items())) 72 | 73 | long_name = [] 74 | for _dr in dr_map.values(): 75 | dr = gdal.GetDriverByName(_dr) 76 | long_name.append(dr.LongName) 77 | dr = None 78 | 79 | df = pd.DataFrame( 80 | data={ 81 | "File extension": dr_map.keys(), 82 | "Driver": dr_map.values(), 83 | "Long name": long_name, 84 | } 85 | ) 86 | 87 | HTML(df.to_html(index=False)) 88 | ``` 89 | ::: 90 | :::: 91 | 92 | ## Gridded data files 93 | Below a list is presented of supported drivers (both reading and writing) in regards to the raster files: 94 | 95 | :::: {.columns} 96 | ::: {.center width="70%" style="height: 500px; overflow: auto;"} 97 | ```{python} 98 | #| echo: false 99 | #| label: tbl-grid-drivers 100 | #| tbl-cap: "Available drivers for gridded data" 101 | 102 | import pandas as pd 103 | from fiat.util import GRID_DRIVER_MAP 104 | from IPython.display import HTML 105 | from osgeo import gdal 106 | 107 | dr_map = dict(sorted(GRID_DRIVER_MAP.items())) 108 | 109 | long_name = [] 110 | for _dr in dr_map.values(): 111 | dr = gdal.GetDriverByName(_dr) 112 | long_name.append(dr.LongName) 113 | dr = None 114 | 115 | df = pd.DataFrame( 116 | data={ 117 | "File extension": dr_map.keys(), 118 | "Driver": dr_map.values(), 119 | "Long name": long_name, 120 | } 121 | ) 122 | 123 | HTML(df.to_html(index=False)) 124 | ``` 125 | ::: 126 | :::: 127 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Publish to PyPI 3 | 4 | on: 5 | release: 6 | types: 7 | - published 8 | push: 9 | tags: 10 | - v* 11 | workflow_dispatch: 12 | 13 | jobs: 14 | build-distributions: 15 | 16 | runs-on: ubuntu-latest 17 | 18 | steps: 19 | - name: Checkout code 20 | uses: actions/checkout@v4 21 | with: 22 | fetch-depth: 0 23 | 24 | - uses: actions/setup-python@v5 25 | name: Install Python 26 | with: 27 | python-version: "3.13" 28 | 29 | - name: generate env 30 | run: | 31 | pip install build tomli twine wheel 32 | 33 | - name: Build artifacts 34 | run: | 35 | python -m build --sdist 36 | python -m build --wheel 37 | python -m twine check dist/* 38 | 39 | - uses: actions/upload-artifact@v4 40 | with: 41 | name: releases 42 | path: dist 43 | 44 | test-built-dist: 45 | needs: build-distributions 46 | 47 | defaults: 48 | run: 49 | shell: bash -l {0} 50 | 51 | strategy: 52 | fail-fast: false 53 | matrix: 54 | os: ["ubuntu-latest"] 55 | include: 56 | - os: ubuntu-latest 57 | label: linux-64 58 | runs-on: ${{ matrix.os }} 59 | 60 | steps: 61 | - name: Checkout code 62 | uses: actions/checkout@v4 63 | 64 | - name: Setup pixi env 65 | uses: prefix-dev/setup-pixi@v0.8.13 66 | with: 67 | pixi-version: "v0.49.0" 68 | environments: slim 69 | locked: false 70 | cache: true 71 | cache-write: ${{ github.event_name == 'push' && github.ref_name == 'master' }} 72 | 73 | - name: Download artifact 74 | uses: actions/download-artifact@v4 75 | with: 76 | name: releases 77 | path: dist 78 | 79 | - name: List contents of built dist 80 | run: | 81 | ls -ltrh 82 | ls -ltrh dist 83 | 84 | - name: Verify the built dist/wheel is valid 85 | run: | 86 | pixi run python -m pip install dist/delft_fiat*.whl 87 | pixi run fiat --help 88 | 89 | upload-to-test-pypi: 90 | needs: test-built-dist 91 | 92 | if: github.event_name == 'push' 93 | 94 | runs-on: ubuntu-latest 95 | 96 | environment: 97 | name: testpypi 98 | url: https://test.pypi.org/p/Delft-FIAT 99 | 100 | permissions: 101 | id-token: write 102 | 103 | steps: 104 | - name: Download the sdist and wheel 105 | uses: actions/download-artifact@v4 106 | with: 107 | name: releases 108 | path: dist/ 109 | - name: Publish package to TestPyPI 110 | uses: pypa/gh-action-pypi-publish@release/v1 111 | with: 112 | repository-url: https://test.pypi.org/legacy/ 113 | 114 | upload-to-pypi: 115 | needs: test-built-dist 116 | 117 | if: ${{ github.event_name == 'release' && !github.event.act }} 118 | 119 | runs-on: ubuntu-latest 120 | 121 | environment: 122 | name: pypi 123 | url: https://pypi.org/p/Delft-FIAT 124 | 125 | permissions: 126 | id-token: write 127 | 128 | steps: 129 | - uses: actions/download-artifact@v4 130 | with: 131 | name: releases 132 | path: dist 133 | - name: Publish package to PyPI 134 | uses: pypa/gh-action-pypi-publish@release/v1 135 | -------------------------------------------------------------------------------- /docs/user_guide/index.qmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "User guide" 3 | include-in-header: 4 | - text: | 5 | 6 | --- 7 | 8 | ::: column-page-inset-right 9 | {{< include /_static/listing_code.html >}} 10 | ```{=html} 11 | 12 | 29 | 30 | ``` 31 | ::: 32 | 33 | ## Quick start 34 | FIAT computes the (economic) damage and risk of flood events at a specified geographic location based on flood inundation data, exposured assets and vulnerability functions. The model can be configured with a settings file and data inputs: 35 | 36 | - [Settings file](settings/index.qmd) 37 | - [Data](data/index.qmd) 38 | 39 | The HydroMT plugin [HydroMT-FIAT](https://deltares.github.io/hydromt_fiat/latest) can be used to set up the FIAT model but that is not compulsory. If a user sets up their own FIAT model, it is recommended to save the data into the same [folder structure](data/index.qmd#folder-structure) that HydroMT-FIAT creates. 40 | 41 | ## The models 42 | The data is used by either one or both of the following models: 43 | 44 | - GeomModel 45 | - GridModel 46 | 47 | Some more information about the models can be found [here](../info/models.qmd). 48 | 49 | ## General User Information 50 | 51 | FIAT derives **damages and risk** at asset-level based on flood maps and additional inputs such as depth-damage functions, asset locations and their maximum potential damages. 52 | For each asset specified in the exposure dataset, the water depth or level is sampled from the flood map at the location of the assets. 53 | 54 | ::: {.callout-note} 55 | Water elevations are converted to water depths using the ground elevation of each asset. 56 | ::: 57 | 58 | See @fig-fiat for an overview of the FIAT workflow. To obtain the flood inundation level, FIAT extracts the water depth or level at the location of each asset. For line or polygon assets, either the average or maximum water depth or level and the fraction of the asset are extracted from the hazard map. The **inundation depth** within buildings or on top of for example, roads, is obtained by subtracting the **ground floor height** from the **water depth**. FIAT derives the damage fraction for each asset using its inundation depth and interpolating over its depth-damage curve. Thereafter, the damage to the asset is calculated as the product of the maximum potential damage and the damage fraction. In case an asset is only partially flooded, the damages will be reduced by the dry fraction of the building. Instead of single events, the user can also provide return-period flood maps as input. Hence, FIAT calculates and integrates the associated return-period damages to derive the expected annual damages. 59 | 60 | 61 | ![**FIAT Workflow.** 62 | The damage of the flood **water level** in each object is determined by flood depth-damage functions, which relate the water level to the maximum potential damage of an asset, returning a **damage fraction**. The damage fraction is multiplied with the **max. potential damage** to obtain a monetary **damage per object**.](../_static/images/FIAT_Scheme.svg){#fig-fiat} 63 | -------------------------------------------------------------------------------- /docs/user_guide/data/hazard_maps.qmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: Types of hazard maps 3 | --- 4 | ::: {.callout-tip} 5 | The most advised format for both event and risk is netCDF. 6 | ::: 7 | 8 | ## Event 9 | Event maps are generally supplied in singular fashion (i.e. one band per event map). 10 | This band just simply contains the hazard values per cell. Let's have a quick peek at the data using [gdalinfo](https://gdal.org/programs/gdalinfo.html): 11 | 12 | :::: {.columns} 13 | ::: {style="height: 400px; overflow: auto;"} 14 | ```{python} 15 | #| echo: false 16 | !gdalinfo ../../../.testdata/hazard/event_map.nc 17 | ``` 18 | ::: 19 | :::: 20 | ::: {.callout-note} 21 | Allthough netCDF is the preferred format, GeoTIFF's are also very handy for single events. 22 | ::: 23 | 24 | As one can see, this dataset has only one band (variable). 25 | 26 | When created, it is preferred that the event map is compressed to prevent unneccessary data on the hard drive. 27 | More information regarding the compression and others settings can be found [here](https://gdal.org/drivers/raster/netcdf.html#creation-options). 28 | 29 | ## Risk 30 | Risk maps in gerenal should contain multiple bands. 31 | 32 | These bands can be either supplied in either one of two ways: 33 | 34 | - Multiple variables (like normal bands) 35 | - As a subdataset 36 | 37 | Lets take a look quick look at the data (again with `gdalinfo`): 38 | 39 | :::: {.columns} 40 | ::: {style="height: 400px; overflow: auto;"} 41 | ```{python} 42 | #| echo: false 43 | !gdalinfo ../../../.testdata/hazard/risk_map.nc 44 | ``` 45 | ::: 46 | :::: 47 | ::: {.callout-note} 48 | Instead of seeing normal metadata, we only get an overview of the available subdatasets. 49 | ::: 50 | 51 | Multiple netCDF variables are normally not recognized as bands by the [netCDF driver](https://gdal.org/drivers/raster/netcdf.html) of GDAL. 52 | Instead they are interpreted as subdatasets (!). One can set the key `var_as_band` to `true` within the [settings file](../settings/index.qmd) 53 | to read the subdatasets as bands. 54 | 55 | ```toml 56 | [hazard] 57 | file = 58 | risk = true 59 | [hazard.settings] 60 | var_as_band = true 61 | ``` 62 | 63 | When supplied in one subdataset (all bands are within that subdataset), 64 | the `subset` variable within the [settings file](../settings/index.qmd) should be set under the header `hazard.settings`. 65 | The value set to `subset` should be the name of the desired subdataset. 66 | 67 | ```toml 68 | [hazard] 69 | file = 70 | risk = true 71 | [hazard.settings] 72 | var_as_band = false 73 | subset = 74 | ``` 75 | 76 | ### Return periods 77 | 78 | The return periods can be set in the [settings file](../settings/optional.qmd#hazard) by supplying a list via 79 | the `hazard.return_periods` entry. However, the return periods can also be set via the hazard bands directly 80 | from the `return_period` attribute of the bands. This attribute is simply set using either [xarray](https://docs.xarray.dev/en/stable/), 81 | [gdal](https://gdal.org/api/python/raster_api.html) or [hydromt](https://deltares.github.io/hydromt/latest/). 82 | 83 | When present in all bands, this attribute will be preferred over the return periods specified in the settings file. 84 | The reason being that the return period is directly linked to the corresponding band, whereas it is inferred in the 85 | case of setting it via the settings file. 86 | 87 | Let's have a quick peek at the data using [gdalinfo](https://gdal.org/programs/gdalinfo.html) (it will be at the bottom): 88 | 89 | :::: {.columns} 90 | ::: {style="height: 400px; overflow: auto;"} 91 | ```{python} 92 | #| echo: false 93 | !gdalinfo -sd 1 -norat -nogcp ../../../.testdata/hazard/risk_map.nc 94 | ``` 95 | ::: 96 | :::: 97 | -------------------------------------------------------------------------------- /src/fiat/gis/grid.py: -------------------------------------------------------------------------------- 1 | """Only raster methods for FIAT.""" 2 | 3 | import gc 4 | import os 5 | from pathlib import Path 6 | 7 | from osgeo import gdal, osr 8 | 9 | from fiat.fio import Grid, GridSource, open_grid 10 | from fiat.util import NOT_IMPLEMENTED 11 | 12 | 13 | def clip( 14 | band: Grid, 15 | gtf: tuple, 16 | idx: tuple, 17 | ): 18 | """Clip a grid. 19 | 20 | Parameters 21 | ---------- 22 | band : gdal.Band 23 | _description_ 24 | gtf : tuple 25 | _description_ 26 | idx : tuple 27 | _description_ 28 | """ 29 | raise NotImplementedError(NOT_IMPLEMENTED) 30 | 31 | 32 | def reproject( 33 | gs: GridSource, 34 | dst_crs: str, 35 | dst_gtf: list | tuple = None, 36 | dst_width: int = None, 37 | dst_height: int = None, 38 | out_dir: Path | str = None, 39 | resample: int = 0, 40 | ) -> object: 41 | """Reproject (warp) a grid. 42 | 43 | Parameters 44 | ---------- 45 | gs : GridSource 46 | Input object. 47 | dst_crs : str 48 | Coodinates reference system (projection). An accepted format is: `EPSG:3857`. 49 | dst_gtf : list | tuple, optional 50 | The geotransform of the warped dataset. Must be defined in the same 51 | coordinate reference system as dst_crs. When defined, its only used when 52 | both 'dst_width' and 'dst_height' are defined. 53 | dst_width : int, optional 54 | The width of the warped dataset in pixels. 55 | dst_height : int, optional 56 | The height of the warped dataset in pixels. 57 | out_dir : Path | str, optional 58 | Output directory. If not defined, if will be inferred from the input object. 59 | resample : int, optional 60 | Resampling method during warping. Interger corresponds with a resampling 61 | method defined by GDAL. For more information: click \ 62 | [here](https://gdal.org/api/gdalwarp_cpp.html#_CPPv415GDALResampleAlg). 63 | 64 | Returns 65 | ------- 66 | GridSource 67 | Output object. A lazy reading of the just creating raster file. 68 | """ 69 | _gs_kwargs = gs._kwargs 70 | 71 | if not Path(str(out_dir)).is_dir(): 72 | out_dir = gs.path.parent 73 | 74 | fname_int = Path(out_dir, f"{gs.path.stem}_repr.tif") 75 | fname = Path(out_dir, f"{gs.path.stem}_repr{gs.path.suffix}") 76 | 77 | out_srs = osr.SpatialReference() 78 | out_srs.SetFromUserInput(dst_crs) 79 | out_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER) 80 | 81 | warp_kw = {} 82 | if all([item is not None for item in [dst_gtf, dst_width, dst_height]]): 83 | warp_kw.update( 84 | { 85 | "xRes": dst_gtf[1], 86 | "yRes": dst_gtf[5], 87 | "outputBounds": ( 88 | dst_gtf[0], 89 | dst_gtf[3] + dst_gtf[5] * dst_height, 90 | dst_gtf[0] + dst_gtf[1] * dst_width, 91 | dst_gtf[3], 92 | ), 93 | "width": dst_width, 94 | "height": dst_height, 95 | } 96 | ) 97 | 98 | dst_src = gdal.Warp( 99 | str(fname_int), 100 | gs.src, 101 | srcSRS=gs.srs, 102 | dstSRS=out_srs, 103 | resampleAlg=resample, 104 | **warp_kw, 105 | ) 106 | 107 | out_srs = None 108 | 109 | if gs.path.suffix == ".tif": 110 | gs.close() 111 | dst_src = None 112 | return open_grid(fname_int) 113 | 114 | gs.close() 115 | gdal.Translate(str(fname), dst_src) 116 | dst_src = None 117 | gc.collect() 118 | 119 | os.unlink(fname_int) 120 | 121 | return open_grid(fname, **_gs_kwargs) 122 | -------------------------------------------------------------------------------- /src/fiat/gis/geom.py: -------------------------------------------------------------------------------- 1 | """Only vector methods for FIAT.""" 2 | 3 | import gc 4 | from pathlib import Path 5 | 6 | from osgeo import ogr, osr 7 | 8 | from fiat.fio import BufferedGeomWriter, GeomSource, open_geom 9 | 10 | 11 | def point_in_geom( 12 | ft: ogr.Feature, 13 | ) -> tuple: 14 | """Create a point within a polygon. 15 | 16 | This is in essence a very lazy centroid. Keep in mind though, it can differ quite 17 | a bit from the actual centroid. 18 | 19 | Parameters 20 | ---------- 21 | ft : ogr.Feature 22 | The feature (polygon or linestring) in which to create the point. 23 | 24 | Returns 25 | ------- 26 | tuple 27 | The x and y coordinate of the created point. 28 | """ 29 | geom = ft.GetGeometryRef() 30 | p = geom.PointOnSurface() 31 | geom = None 32 | return p.GetX(), p.GetY() 33 | 34 | 35 | def reproject_feature( 36 | geometry: ogr.Geometry, 37 | src_crs: str, 38 | dst_crs: str, 39 | ) -> ogr.Feature: 40 | """Transform geometry/ geometries of a feature. 41 | 42 | Parameters 43 | ---------- 44 | geometry : ogr.Geometry 45 | The geometry. 46 | src_crs : str 47 | Coordinate reference system of the feature. 48 | dst_crs : str 49 | Coordinate reference system to which the feature is transformed. 50 | """ 51 | src_srs = osr.SpatialReference() 52 | src_srs.SetFromUserInput(src_crs) 53 | src_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER) 54 | dst_srs = osr.SpatialReference() 55 | dst_srs.SetFromUserInput(dst_crs) 56 | src_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER) 57 | 58 | transform = osr.CoordinateTransformation(src_srs, dst_srs) 59 | geometry.Transform(transform) 60 | 61 | src_srs = None 62 | dst_srs = None 63 | transform = None 64 | 65 | 66 | def reproject( 67 | gs: GeomSource, 68 | crs: str, 69 | chunk: int = 200000, 70 | out_dir: Path | str = None, 71 | ): 72 | """Reproject a geometry layer. 73 | 74 | Parameters 75 | ---------- 76 | gs : GeomSource 77 | Input object. 78 | crs : str 79 | Coodinates reference system (projection). An accepted format is: `EPSG:3857`. 80 | chunk : int, optional 81 | The size of the chunks used during reprojecting. 82 | out_dir : Path | str, optional 83 | Output directory. If not defined, if will be inferred from the input object. 84 | 85 | Returns 86 | ------- 87 | GeomSource 88 | Output object. A lazy reading of the just creating geometry file. 89 | """ 90 | if not Path(str(out_dir)).is_dir(): 91 | out_dir = gs.path.parent 92 | 93 | fname = Path(out_dir, f"{gs.path.stem}_repr{gs.path.suffix}") 94 | 95 | out_srs = osr.SpatialReference() 96 | out_srs.SetFromUserInput(crs) 97 | out_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER) 98 | layer_defn = gs.layer.GetLayerDefn() 99 | 100 | transform = osr.CoordinateTransformation( 101 | gs.srs, 102 | out_srs, 103 | ) 104 | 105 | with open_geom(fname, mode="w", overwrite=True) as new_gs: 106 | new_gs.create_layer(out_srs, layer_defn.GetGeomType()) 107 | new_gs.set_layer_from_defn(layer_defn) 108 | 109 | mem_gs = BufferedGeomWriter( 110 | fname, 111 | srs=out_srs, 112 | layer_defn=gs.layer.GetLayerDefn(), 113 | buffer_size=chunk, 114 | ) 115 | 116 | for ft in gs.layer: 117 | geom = ft.GetGeometryRef() 118 | geom.Transform(transform) 119 | 120 | new_ft = ogr.Feature(mem_gs.buffer.layer.GetLayerDefn()) 121 | new_ft.SetFrom(ft) 122 | new_ft.SetGeometry(geom) 123 | mem_gs.add_feature(new_ft) 124 | 125 | geom = None 126 | ft = None 127 | new_ft = None 128 | out_srs = None 129 | transform = None 130 | layer_defn = None 131 | 132 | mem_gs.close() 133 | mem_gs = None 134 | gs.close() 135 | gs = None 136 | gc.collect() 137 | 138 | return open_geom(fname) 139 | -------------------------------------------------------------------------------- /test/test_run.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from pathlib import Path 3 | 4 | from osgeo import gdal 5 | 6 | from fiat.fio import open_csv, open_grid 7 | from fiat.models import GeomModel, GridModel 8 | 9 | 10 | def run_model(cfg, p): 11 | # Execute 12 | cfg.setup_output_dir(str(p)) 13 | model_type = cfg.get("model.type") 14 | if model_type == "geom": 15 | mod = GeomModel(cfg) 16 | elif model_type == "grid": 17 | mod = GridModel(cfg) 18 | mod.run() 19 | 20 | 21 | def test_geom_event(tmp_path, configs): 22 | # run the model 23 | run_model(configs["geom_event"], tmp_path) 24 | 25 | # Check the output for this specific case 26 | out = open_csv(Path(str(tmp_path), "output.csv"), index="object_id") 27 | assert int(float(out[2, "total_damage"])) == 740 28 | assert int(float(out[3, "total_damage"])) == 1038 29 | 30 | 31 | def test_geom_missing(tmp_path, configs): 32 | # run the model 33 | run_model(configs["geom_event_missing"], tmp_path) 34 | 35 | # Check the output for this specific case 36 | assert Path(str(tmp_path), "missing.log").exists() 37 | missing = open(Path(str(tmp_path), "missing.log"), "r") 38 | assert sum(1 for _ in missing) == 1 39 | 40 | 41 | def test_geom_outside(tmp_path, configs): 42 | # run the model 43 | run_model(configs["geom_event_outside"], tmp_path) 44 | 45 | # Check the output for this specific case 46 | data = open_csv(Path(tmp_path, "output.csv")) 47 | 48 | assert data[0, "damage_structure"] == "nan" 49 | assert float(data[0, "total_damage"]) == 0.0 50 | assert float(data[1, "damage_structure"]) == 1804.0 51 | 52 | 53 | def test_geom_risk(tmp_path, configs): 54 | # run the model 55 | run_model(configs["geom_risk"], tmp_path) 56 | 57 | # Check the output for this specific case 58 | out = open_csv(Path(str(tmp_path), "output.csv"), index="object_id") 59 | assert int(float(out[2, "damage_structure_5.0y"])) == 1804 60 | assert int(float(out[4, "total_damage_10.0y"])) == 3840 61 | assert int(float(out[3, "ead_damage"]) * 100) == 102247 62 | 63 | 64 | def test_grid_event(tmp_path, configs): 65 | # run the model 66 | run_model(configs["grid_event"], tmp_path) 67 | 68 | # Check the output for this specific case 69 | src = gdal.OpenEx( 70 | str(Path(str(tmp_path), "output.nc")), 71 | ) 72 | arr = src.ReadAsArray() 73 | src = None 74 | assert int(arr[2, 4] * 10) == 14092 75 | assert int(arr[7, 3] * 10) == 8700 76 | 77 | src = gdal.OpenEx( 78 | str(Path(str(tmp_path), "total_damages.nc")), 79 | ) 80 | arr = src.ReadAsArray() 81 | src = None 82 | assert int(arr[2, 4] * 10) == 14092 83 | assert int(arr[7, 3] * 10) == 8700 84 | 85 | 86 | def test_grid_unequal(tmp_path, configs): 87 | # Run the model 88 | cfg = copy.deepcopy(configs["grid_unequal"]) 89 | run_model(cfg, tmp_path) 90 | # Assert the output 91 | file = Path(tmp_path, "output.nc") 92 | assert file.is_file() 93 | # Check the output 94 | gs = open_grid(file) 95 | assert gs.shape == (10, 10) 96 | gs.close() 97 | gs = None 98 | 99 | # Adjust to prefer the hazard data resolution 100 | cfg = copy.deepcopy(configs["grid_unequal"]) 101 | cfg.set("model.grid.prefer", "hazard") 102 | run_model(cfg, tmp_path) 103 | 104 | # Check the output 105 | gs = open_grid(file) 106 | assert gs.shape == (100, 100) 107 | 108 | 109 | def test_grid_risk(tmp_path, configs): 110 | # run the model 111 | run_model(configs["grid_risk"], tmp_path) 112 | 113 | # Check the output for this specific case 114 | src = gdal.OpenEx( 115 | str(Path(str(tmp_path), "ead.nc")), 116 | ) 117 | arr = src.ReadAsArray() 118 | src = None 119 | assert int(arr[1, 2] * 10) == 10920 120 | assert int(arr[5, 6] * 10) == 8468 121 | 122 | src = gdal.OpenEx( 123 | str(Path(str(tmp_path), "ead_total.nc")), 124 | ) 125 | arr = src.ReadAsArray() 126 | src = None 127 | assert int(arr[1, 2] * 10) == 10920 128 | assert int(arr[5, 6] * 10) == 8468 129 | -------------------------------------------------------------------------------- /docs/user_guide/data/vulnerability.qmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Vulnerability data" 3 | format: 4 | html: 5 | code-fold: true 6 | jupyter: python3 7 | --- 8 | The **vulnerability** of an asset is determined by its building type (e.g. 'residential 1-story building') and the inundation depth, also refered to as water depth, during a flood event. Different assests incur different degrees of damage at varying inundation levels. This **vulnerability** can be quantified via **flood depth-damage** functions, see for example @fig-damagefunction. The damage function relates the water depth to the maximum potential damage per asset and returns the damage fraction (a value between 0 and 1). The damage fraction is multiplied by the maximum potential damage to obtain a damage value. The value of the maximum potential damage differs per asset and must be specified in the [exposure data](exposure.qmd). 9 | 10 | ```{python} 11 | #| echo: false 12 | #| label: fig-damagefunction 13 | #| fig-cap: "Damage functions of different assets/structures. " 14 | import numpy as np 15 | import matplotlib.pyplot as plt 16 | import pandas as pd 17 | from pathlib import Path 18 | 19 | 20 | file_path = Path.cwd() 21 | for _ in range(3): 22 | file_path = file_path.parent 23 | 24 | file_path = Path(file_path, ".testdata", "vulnerability", "vulnerability_curves.csv") 25 | file_path = file_path.resolve() 26 | data = df = pd.read_csv(file_path, comment='#') 27 | df = pd.DataFrame({ 28 | ('water depth'): data.iloc[:, 0], 29 | ('STRUCT1'): data.iloc[:, 1], 30 | ('STRUCT2'): data.iloc[:, 2], 31 | 32 | }) 33 | 34 | labels=["STRUCT1","STRUCT2"] 35 | plt.plot(df['water depth'], df['STRUCT1']) 36 | plt.plot(df['water depth'], df['STRUCT2']) 37 | plt.xlabel('Water depth (m)') 38 | plt.ylabel('Fraction of maximum potential damage') 39 | plt.legend(labels) 40 | plt.gca().get_legend().set_title('') 41 | ``` 42 | 43 | 44 | The damage functions must be given in a CSV file (`vulnerability.csv`), located in the [vulnerability folder](index.qmd), see for example @tbl-damage-curve. The first column contains the water depth, and then each additional column provides the damage fraction for the corresponding damage function. Three header rows are required, describing the unit of the water depth; `#UNIT=`, e.g., `#UNIT=m`. The second row, named `#METHOD`, must be defined for each damage-curve separately above the damage curve name. The method refers to the way that multiple flood values will be aggregated per asset in case the *area* method is used for deriving the inundation depth. The damage curve name must coincide with the name of the damage function defined in the [exposure data](exposure.qmd). 45 | 46 | ::: {.callout-important} 47 | Water depth units (e.g. feet or meters) must be consistent with the units of the flood hazard map and the exposure data (i.e., ground elevation, ground floor height). 48 | ::: 49 | 50 | 51 | ```{python} 52 | #| echo: false 53 | #| label: tbl-damage-curve 54 | #| tbl-cap: "Vulnerability data CSV file. The water depth must be in the outer left column followed by the damage functions. The user has the freedom to add multiple damage curves. " 55 | 56 | import pandas as pd 57 | from pathlib import Path 58 | from IPython.display import HTML 59 | 60 | file_path = Path.cwd() 61 | for _ in range(3): 62 | file_path = file_path.parent 63 | 64 | file_path = Path(file_path, ".testdata", "vulnerability", "vulnerability_curves.csv") 65 | file_path = file_path.resolve() 66 | data = pd.read_csv(file_path, comment='#') 67 | 68 | df = pd.DataFrame({ 69 | ('#UNIT=m','#METHOD','water depth'): data.iloc[:, 0], 70 | ('','mean', 'STRUCT1'): data.iloc[:, 1], 71 | ('','max','STRUCT2'): data.iloc[:, 2], 72 | }) 73 | 74 | HTML(df.to_html(index=False)) 75 | ``` 76 | 77 | 78 | Water depths may be negative for assets that incur damage below the ground floor height, and the user is free to choose any water depth increments in the CSV file. The damage functions can have any name. Multiple damage-functions can be described in the vulnerability curves CSV file by simply adding consecutive columns next to one another. 79 | 80 | ::: {.callout-tip} 81 | You can also create damage functions with the [**HydroMT-FIAT model builder**](https://deltares.github.io/hydromt_fiat/latest/#) 82 | ::: 83 | -------------------------------------------------------------------------------- /src/fiat/methods/flood.py: -------------------------------------------------------------------------------- 1 | """Functions specifically for flood risk calculation.""" 2 | 3 | import math 4 | 5 | from numpy import isnan 6 | from osgeo import ogr 7 | 8 | from fiat.fio import Table 9 | from fiat.methods.util import AREA_METHODS 10 | 11 | MANDATORY_COLUMNS = ["ground_flht", "ground_elevtn"] 12 | MANDATORY_ENTRIES = ["hazard.elevation_reference"] 13 | NEW_COLUMNS = ["inun_depth"] 14 | 15 | 16 | def calculate_hazard( 17 | hazard: list, 18 | reference: str, 19 | ground_flht: float, 20 | ground_elevtn: float = 0, 21 | method: str = "mean", 22 | ) -> float: 23 | """Calculate the hazard value for flood hazard. 24 | 25 | Parameters 26 | ---------- 27 | hazard : list 28 | Raw hazard values. 29 | reference : str 30 | Reference, either 'dem' or 'datum'. 31 | ground_flht : float 32 | The height of the floor of an object (.e.g the door elevation). 33 | ground_elevtn : float, optional 34 | Ground height in reference to e.g. the ocean. 35 | (Needed when 'reference' is 'datum') 36 | method : str, optional 37 | Chose 'max' or 'mean' for either the maximum value or the average, 38 | by default 'mean'. 39 | 40 | Returns 41 | ------- 42 | float 43 | A representative hazard value. 44 | """ 45 | _ge = 0 46 | if reference.lower() == "datum" and not math.isnan(ground_elevtn): 47 | # The hazard data is referenced to a Datum 48 | # (e.g., for flooding this is the water elevation). 49 | _ge = ground_elevtn 50 | 51 | # Remove the negative hazard values to 0. 52 | raw_l = len(hazard) 53 | hazard = [n - _ge for n in hazard if (n - _ge) > 0.0001] 54 | 55 | if not hazard: 56 | return math.nan, math.nan 57 | 58 | redf = 1 59 | 60 | if method.lower() == "mean": 61 | redf = len(hazard) / raw_l 62 | 63 | if len(hazard) > 1: 64 | hazard = AREA_METHODS[method.lower()](hazard) 65 | else: 66 | hazard = hazard[0] 67 | 68 | # Subtract the Ground Floor Height from the hazard value 69 | hazard -= ground_flht 70 | 71 | return hazard, redf 72 | 73 | 74 | def calculate_damage( 75 | hazard_value: float | int, 76 | red_fact: float | int, 77 | ft: ogr.Feature | list, 78 | type_dict: dict, 79 | vuln: Table, 80 | vul_min: float | int, 81 | vul_max: float | int, 82 | vul_round: int, 83 | ) -> tuple: 84 | """Calculate the damage corresponding with the hazard value. 85 | 86 | Parameters 87 | ---------- 88 | hazard_value : float | int 89 | The representative hazard value. 90 | red_fact : float | int 91 | The reduction factor. How much to compensate for the lack of touching the grid 92 | by an object (geometry). 93 | ft : ogr.Feature | list 94 | A feature or feature info (whichever has to contain the exposure data). 95 | See docs on running FIAT with an without csv. 96 | type_dict : dict 97 | The exposure types and corresponding column id's. 98 | vuln : Table 99 | Vulnerability data. 100 | vul_min : float | int 101 | Minimum value of the index of the vulnerability data. 102 | vul_max : float | int 103 | Maximum value of the index of the vulnerability data. 104 | vul_round : int 105 | Significant decimals to be used. 106 | 107 | Returns 108 | ------- 109 | tuple 110 | Damage values. 111 | """ 112 | # unpack type_dict 113 | fn = type_dict["fn"] 114 | maxv = type_dict["max"] 115 | 116 | # Define outgoing list of values 117 | out = [0] * (len(fn) + 1) 118 | 119 | # Calculate the damage per catagory, and in total (_td) 120 | total = 0 121 | idx = 0 122 | for key, col in fn.items(): 123 | if isnan(hazard_value) or ft[col] is None or ft[col] == "nan": 124 | val = "nan" 125 | else: 126 | hazard_value = max(min(vul_max, hazard_value), vul_min) 127 | f = vuln[round(hazard_value, vul_round), ft[col]] 128 | val = f * ft[maxv[key]] * red_fact 129 | val = round(val, 2) 130 | total += val 131 | out[idx] = val 132 | idx += 1 133 | 134 | out[-1] = round(total, 2) 135 | 136 | return out 137 | -------------------------------------------------------------------------------- /test/test_checks.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from pathlib import Path 3 | 4 | from fiat import Configurations, GeomModel, open_grid 5 | from fiat.check import ( 6 | check_config_entries, 7 | check_exp_derived_types, 8 | check_grid_exact, 9 | check_hazard_rp, 10 | check_hazard_subsets, 11 | check_internal_srs, 12 | ) 13 | from fiat.error import FIATDataError 14 | from fiat.util import MANDATORY_MODEL_ENTRIES, discover_exp_columns 15 | 16 | 17 | def test_check_config_entries(settings_files): 18 | settings = settings_files["missing_hazard"] 19 | 20 | try: 21 | cfg = Configurations.from_file(settings) 22 | check_config_entries(cfg.keys(), MANDATORY_MODEL_ENTRIES) 23 | except FIATDataError: 24 | t, v, tb = sys.exc_info() 25 | assert v.msg.startswith("Missing mandatory entries") 26 | assert v.msg.endswith("['hazard.file']") 27 | finally: 28 | assert v 29 | 30 | 31 | def test_check_exp_columns(configs): 32 | cfg = configs["geom_event"] 33 | cfg.set( 34 | "exposure.csv.file", 35 | Path(Path.cwd(), ".testdata", "exposure", "spatial_missing.csv"), 36 | ) 37 | 38 | try: 39 | model = GeomModel(cfg) 40 | model.get_exposure_meta() 41 | except FIATDataError: 42 | t, v, tb = sys.exc_info() 43 | assert v.msg == "Missing mandatory exposure columns: ['object_id']" 44 | finally: 45 | assert v 46 | 47 | 48 | def test_check_exp_derived_types(geom_partial_data): 49 | found, found_idx, missing = discover_exp_columns( 50 | geom_partial_data._columns, type="damage" 51 | ) 52 | assert missing == ["_content"] 53 | check_exp_derived_types("damage", found, missing) 54 | 55 | found = [] 56 | try: 57 | check_exp_derived_types("damage", found, missing) 58 | except FIATDataError: 59 | t, v, tb = sys.exc_info() 60 | assert v.msg.startswith("For type: 'damage' no matching") 61 | finally: 62 | assert v 63 | 64 | 65 | def test_check_exp_index_col(configs): 66 | cfg = configs["geom_event"] 67 | cfg.set("exposure.geom.settings.index", "faulty") 68 | 69 | try: 70 | _ = GeomModel(cfg) 71 | except FIATDataError: 72 | t, v, tb = sys.exc_info() 73 | assert v.msg.startswith("Index column ('faulty') not found") 74 | finally: 75 | assert v 76 | 77 | 78 | def test_check_grid_exact(configs): 79 | exact = configs["grid_event"] 80 | equal = check_grid_exact( 81 | open_grid(exact.get("hazard.file")), 82 | open_grid(exact.get("exposure.grid.file")), 83 | ) 84 | assert equal == True 85 | 86 | unequal = configs["grid_unequal"] 87 | equal = check_grid_exact( 88 | open_grid(unequal.get("hazard.file")), 89 | open_grid(unequal.get("exposure.grid.file")), 90 | ) 91 | assert equal == False 92 | assert unequal.get("hazard.file").exists() 93 | 94 | 95 | def test_check_hazard_rp(): 96 | rp_bands = ["a", "b", "c", "d"] 97 | rp_cfg = [1, 2, 5, 10] 98 | 99 | out = check_hazard_rp(rp_bands, rp_cfg, "") 100 | assert out == [1.0, 2.0, 5.0, 10.0] 101 | 102 | rp_cfg.remove(10) 103 | try: 104 | _ = check_hazard_rp(rp_bands, rp_cfg, Path("file.ext")) 105 | except FIATDataError: 106 | t, v, tb = sys.exc_info() 107 | assert v.msg.startswith( 108 | "'file.ext': cannot determine the return periods \ 109 | for the risk calculation" 110 | ) 111 | finally: 112 | assert v 113 | 114 | 115 | def test_check_hazard_subsets(grid_event_data, grid_risk_data): 116 | assert grid_event_data.subset_dict is None 117 | check_hazard_subsets(grid_event_data.subset_dict, "") 118 | 119 | try: 120 | assert grid_risk_data.subset_dict is not None 121 | check_hazard_subsets(grid_risk_data.subset_dict, Path("file.ext")) 122 | except FIATDataError: 123 | t, v, tb = sys.exc_info() 124 | assert v.msg.startswith("'file.ext': cannot read this file as there") 125 | assert v 126 | 127 | 128 | def test_check_internal_srs(): 129 | try: 130 | check_internal_srs(None, "file") 131 | except FIATDataError: 132 | t, v, tb = sys.exc_info() 133 | assert v.msg.startswith("Coordinate reference system is unknown for 'file'") 134 | finally: 135 | assert v 136 | -------------------------------------------------------------------------------- /test/conftest.py: -------------------------------------------------------------------------------- 1 | import shutil 2 | from pathlib import Path 3 | 4 | import pytest 5 | 6 | from fiat.cfg import Configurations 7 | from fiat.cli.main import args_parser 8 | from fiat.fio import open_csv, open_geom, open_grid 9 | from fiat.log import LogItem 10 | from fiat.models import GeomModel, GridModel 11 | 12 | _GEOM_FILES = [ 13 | "hazard.file", 14 | "exposure.geom.file1", 15 | "exposure.csv.file", 16 | "vulnerability.file", 17 | ] 18 | _MODELS = [ 19 | "geom_event", 20 | "geom_event_2g", 21 | "geom_event_missing", 22 | "geom_event_outside", 23 | "geom_risk", 24 | "geom_risk_2g", 25 | "grid_event", 26 | "grid_risk", 27 | "grid_unequal", 28 | "missing_hazard", 29 | "missing_models", 30 | ] 31 | _PATH = Path.cwd() 32 | 33 | 34 | @pytest.fixture 35 | def cli_parser(): 36 | return args_parser() 37 | 38 | 39 | @pytest.fixture 40 | def settings_files(): 41 | _files = {} 42 | for _m in _MODELS: 43 | p = Path(_PATH, ".testdata", f"{_m}.toml") 44 | p_name = p.stem 45 | _files[p_name] = p 46 | return _files 47 | 48 | 49 | @pytest.fixture 50 | def configs(settings_files): 51 | _cfgs = {} 52 | for key, item in settings_files.items(): 53 | if not key.startswith("missing"): 54 | _cfgs[key] = Configurations.from_file(item) 55 | return _cfgs 56 | 57 | 58 | ## Models 59 | @pytest.fixture 60 | def geom_tmp_model(tmp_path, configs): 61 | cfg = configs["geom_event"] 62 | settings_file = Path(tmp_path, "settings.toml") 63 | shutil.copy2(cfg.filepath, settings_file) 64 | for file in _GEOM_FILES: 65 | path = cfg.get(file) 66 | new_path = Path(tmp_path, path.parent.name) 67 | new_path.mkdir(parents=True, exist_ok=True) 68 | shutil.copy2(path, Path(new_path, path.name)) 69 | assert settings_file.is_file() 70 | return settings_file 71 | 72 | 73 | @pytest.fixture 74 | def geom_risk(configs): 75 | model = GeomModel(configs["geom_risk"]) 76 | return model 77 | 78 | 79 | @pytest.fixture 80 | def grid_risk(configs): 81 | model = GridModel(configs["grid_risk"]) 82 | return model 83 | 84 | 85 | ## Data 86 | @pytest.fixture 87 | def geom_data(): 88 | d = open_geom(Path(_PATH, ".testdata", "exposure", "spatial.geojson")) 89 | return d 90 | 91 | 92 | ## Data 93 | @pytest.fixture(scope="session") 94 | def geom_outside_data(): 95 | d = open_geom(Path(_PATH, ".testdata", "exposure", "spatial_outside.geojson")) 96 | return d 97 | 98 | 99 | @pytest.fixture(scope="session") 100 | def geom_partial_data(): 101 | d = open_csv(Path(_PATH, ".testdata", "exposure", "spatial_partial.csv"), lazy=True) 102 | return d 103 | 104 | 105 | @pytest.fixture 106 | def grid_event_data(): 107 | d = open_grid(Path(_PATH, ".testdata", "hazard", "event_map.nc")) 108 | return d 109 | 110 | 111 | @pytest.fixture(scope="session") 112 | def grid_event_highres_data(): 113 | d = open_grid(Path(_PATH, ".testdata", "hazard", "event_map_highres.nc")) 114 | return d 115 | 116 | 117 | @pytest.fixture(scope="session") 118 | def grid_exp_data(): 119 | d = open_grid(Path(_PATH, ".testdata", "exposure", "spatial.nc")) 120 | return d 121 | 122 | 123 | @pytest.fixture(scope="session") 124 | def grid_risk_data(): 125 | d = open_grid(Path(_PATH, ".testdata", "hazard", "risk_map.nc")) 126 | return d 127 | 128 | 129 | @pytest.fixture(scope="session") 130 | def vul_path(): 131 | path = Path(_PATH, ".testdata", "vulnerability", "vulnerability_curves.csv") 132 | assert path.exists() 133 | return path 134 | 135 | 136 | @pytest.fixture(scope="session") 137 | def vul_raw_data(vul_path): 138 | with open(vul_path, mode="rb") as f: 139 | data = f.read() 140 | return data 141 | 142 | 143 | @pytest.fixture(scope="session") 144 | def vul_data(vul_path): 145 | d = open_csv(vul_path) 146 | return d 147 | 148 | 149 | @pytest.fixture(scope="session") 150 | def vul_data_win(): 151 | d = open_csv( 152 | Path(_PATH, ".testdata", "vulnerability", "vulnerability_curves_win.csv"), 153 | ) 154 | return d 155 | 156 | 157 | @pytest.fixture 158 | def log1(): 159 | obj = LogItem(level=2, msg="Hello!") 160 | return obj 161 | 162 | 163 | @pytest.fixture 164 | def log2(): 165 | obj = LogItem(level=2, msg="Good Bye!") 166 | return obj 167 | -------------------------------------------------------------------------------- /test/test_gis.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from numpy import mean 4 | 5 | from fiat.gis import geom, grid, overlay 6 | from fiat.util import get_srs_repr 7 | 8 | 9 | def test_get_srs_repr(geom_data): 10 | out = get_srs_repr(geom_data.srs) 11 | assert out == "EPSG:4326" 12 | 13 | try: 14 | out = get_srs_repr(None) 15 | except ValueError: 16 | t, v, tb = sys.exc_info() 17 | assert v.args[0].endswith("'srs' can not be 'None'.") 18 | finally: 19 | assert v 20 | 21 | 22 | def test_clip(geom_data, grid_event_data): 23 | ft = geom_data[3] 24 | hazard = overlay.clip( 25 | ft, 26 | grid_event_data[1], 27 | grid_event_data.geotransform, 28 | ) 29 | ft = None 30 | 31 | assert len(hazard) == 6 32 | assert int(round(mean(hazard) * 100, 0)) == 170 33 | 34 | 35 | def test_clip_outside(geom_outside_data, grid_event_data): 36 | ft = geom_outside_data[0] 37 | hazard = overlay.clip( 38 | ft, 39 | grid_event_data[1], 40 | grid_event_data.geotransform, 41 | ) 42 | ft = None 43 | 44 | assert len(hazard) == 0 45 | 46 | ft = geom_outside_data[1] 47 | hazard = overlay.clip( 48 | ft, 49 | grid_event_data[1], 50 | grid_event_data.geotransform, 51 | ) 52 | ft = None 53 | 54 | assert len(hazard) == 2 55 | assert int(round(mean(hazard) * 100, 0)) == 270 56 | 57 | 58 | def test_clip_weighted(geom_data, grid_event_data): 59 | ft = geom_data[3] 60 | _, weights = overlay.clip_weighted( 61 | ft, 62 | grid_event_data[1], 63 | grid_event_data.geotransform, 64 | upscale=10, 65 | ) 66 | assert int(weights[0, 0] * 100) == 90 67 | 68 | _, weights = overlay.clip_weighted( 69 | ft, 70 | grid_event_data[1], 71 | grid_event_data.geotransform, 72 | upscale=100, 73 | ) 74 | assert int(weights[0, 0] * 100) == 81 75 | 76 | 77 | def test_pin(geom_data, grid_event_data): 78 | for ft in geom_data: 79 | XY = geom.point_in_geom(ft) 80 | 81 | hazard = overlay.pin( 82 | XY, 83 | grid_event_data[1], 84 | grid_event_data.geotransform, 85 | ) 86 | 87 | assert int(round(hazard[0] * 100, 0)) == 160 88 | 89 | 90 | def test_pin_outside(geom_outside_data, grid_event_data): 91 | ft = geom_outside_data[0] 92 | XY = geom.point_in_geom(ft) 93 | hazard = overlay.pin( 94 | XY, 95 | grid_event_data[1], 96 | grid_event_data.geotransform, 97 | ) 98 | ft = None 99 | 100 | assert len(hazard) == 0 101 | 102 | ft = geom_outside_data[2] 103 | XY = geom.point_in_geom(ft) 104 | hazard = overlay.pin( 105 | XY, 106 | grid_event_data[1], 107 | grid_event_data.geotransform, 108 | ) 109 | ft = None 110 | 111 | assert len(hazard) == 1 112 | assert int(round(hazard[0] * 100, 0)) == 200 113 | 114 | 115 | def test_geom_reproject(tmp_path, geom_data): 116 | dst_crs = "EPSG:3857" 117 | new_gm = geom.reproject( 118 | geom_data, 119 | dst_crs, 120 | out_dir=str(tmp_path), 121 | ) 122 | 123 | assert new_gm.srs.GetAuthorityCode(None) == "3857" 124 | 125 | 126 | def test_geom_reproject_single(geom_data): 127 | ft = geom_data[1] 128 | geometry = ft.GetGeometryRef() 129 | 130 | vertices = geometry.GetGeometryRef(0).GetPoints() 131 | assert 4.39 < vertices[0][0] < 4.4 132 | 133 | geom.reproject_feature( 134 | geometry, 135 | src_crs="EPSG:4326", 136 | dst_crs="EPSG:28992", 137 | ) 138 | 139 | vertices = geometry.GetGeometryRef(0).GetPoints() 140 | assert 80000 < vertices[0][0] < 90000 141 | 142 | 143 | def test_grid_reproject(tmp_path, grid_event_data): 144 | dst_crs = "EPSG:3857" 145 | new_gr = grid.reproject( 146 | grid_event_data, 147 | dst_crs, 148 | out_dir=str(tmp_path), 149 | ) 150 | 151 | assert new_gr.srs.GetAuthorityCode(None) == "3857" 152 | 153 | 154 | def test_grid_reproject_gtf(tmp_path, grid_event_data, grid_event_highres_data): 155 | assert grid_event_highres_data.shape == (100, 100) 156 | new_gr = grid.reproject( 157 | grid_event_highres_data, 158 | get_srs_repr(grid_event_data.srs), 159 | dst_gtf=grid_event_data.geotransform, 160 | dst_width=10, 161 | dst_height=10, 162 | out_dir=str(tmp_path), 163 | ) 164 | 165 | assert new_gr.shape == (10, 10) 166 | -------------------------------------------------------------------------------- /make_env.py: -------------------------------------------------------------------------------- 1 | """A simple script to generate enviroment.yml files from pyproject.toml.""" 2 | 3 | import argparse 4 | import fnmatch 5 | import platform 6 | import re 7 | from pathlib import Path 8 | from sys import version_info 9 | from typing import List 10 | 11 | if version_info.minor >= 11: 12 | from tomllib import load 13 | else: 14 | from tomli import load 15 | 16 | _FILE_DIR = Path(__file__).parent 17 | 18 | 19 | # our quick and dirty implementation of recursive depedencies 20 | def _parse_profile(profile_str: str, opt_deps: dict, project_name: str) -> List[str]: 21 | if profile_str is None or profile_str == "": 22 | return [] 23 | 24 | pat = re.compile(r"\s*" + project_name + r"\[(.*)\]\s*") 25 | parsed = [] 26 | queue = [f"{project_name}[{x.strip()}]" for x in profile_str.split(",")] 27 | while len(queue) > 0: 28 | dep = queue.pop(0) 29 | if dep == "": 30 | continue 31 | m = pat.match(dep) 32 | if m: 33 | # if we match the patern, all list elts have to be dependenciy groups 34 | dep_groups = [d.strip() for d in m.groups(0)[0].split(",")] 35 | unknown_dep_groups = set(dep_groups) - set(opt_deps.keys()) 36 | if len(unknown_dep_groups) > 0: 37 | raise RuntimeError(f"unknown dependency group(s): {unknown_dep_groups}") 38 | queue.extend(dep_groups) 39 | continue 40 | 41 | if dep in opt_deps: 42 | queue.extend([x.strip() for x in opt_deps[dep]]) 43 | else: 44 | parsed.append(dep) 45 | 46 | return parsed 47 | 48 | 49 | parser = argparse.ArgumentParser() 50 | 51 | parser.add_argument("profile", default="dev", nargs="?") 52 | parser.add_argument("--output", "-o", default="environment.yml") 53 | parser.add_argument("--channels", "-c", default=None) 54 | parser.add_argument("--name", "-n", default=None) 55 | parser.add_argument("--py-version", "-p", default=None) 56 | args = parser.parse_args() 57 | 58 | # 59 | with open(Path(_FILE_DIR, "pyproject.toml"), "rb") as f: 60 | toml = load(f) 61 | deps = toml["project"]["dependencies"] 62 | opt_deps = toml["project"]["optional-dependencies"] 63 | project_name = toml["project"]["name"] 64 | # specific conda_install settings 65 | install_config = toml["tool"].get("make_env", {}) 66 | conda_only = install_config.get("conda_only", []) 67 | deps_not_in_conda = install_config.get("deps_not_in_conda", []) 68 | channels = install_config.get("channels", ["conda-forge"]) 69 | if args.channels is not None: 70 | channels.extend(args.channels.split(",")) 71 | channels = list(set(channels)) 72 | 73 | # parse environment name 74 | name = args.name 75 | if name is None: 76 | name = project_name.split("_")[1] 77 | if args.profile: 78 | name += f"_{args.profile}" 79 | print(f"Environment name: {name}") 80 | 81 | # parse dependencies groups and flavours 82 | # "min" equals no optional dependencies 83 | deps_to_install = deps.copy() 84 | if args.profile not in ["", "min"]: 85 | extra_deps = _parse_profile(args.profile, opt_deps, project_name) 86 | deps_to_install.extend(extra_deps) 87 | 88 | conda_deps = [] 89 | pip_deps = [] 90 | for dep in deps_to_install: 91 | if dep in deps_not_in_conda: 92 | pip_deps.append(dep) 93 | else: 94 | conda_deps.append(dep) 95 | if args.py_version is not None: 96 | conda_deps.append(f"python=={args.py_version}") 97 | 98 | pip_deps = sorted(list(set(pip_deps))) 99 | 100 | # Make an exception for the build environment 101 | if args.profile == "build": 102 | if platform.system().lower() == "windows": 103 | py = fnmatch.filter(conda_deps, "python*") 104 | gd = fnmatch.filter(conda_deps, "gdal*") 105 | np = fnmatch.filter(conda_deps, "numpy*") 106 | conda_deps.remove(*gd) 107 | conda_deps.remove(*np) 108 | if py: 109 | conda_deps.remove(*py) 110 | py = ["python==3.12.*"] 111 | pip_deps += conda_deps 112 | conda_deps = [] 113 | if py: 114 | conda_deps += py 115 | pip_deps.append( 116 | "https://github.com/cgohlke/geospatial-wheels/releases/download/v2024.2.18/GDAL-3.8.4-cp312-cp312-win_amd64.whl", 117 | ) 118 | pip_deps.append("numpy<2.0.0") 119 | 120 | for item in conda_only: 121 | im = fnmatch.filter(pip_deps, item) 122 | pip_deps.remove(*im) 123 | conda_deps.append(*im) 124 | 125 | pip_deps = sorted(list(set(pip_deps))) 126 | pip_deps.append("-e .") 127 | 128 | # add pip as a conda dependency if we have pip deps 129 | if len(pip_deps) > 0: 130 | conda_deps.append("pip") 131 | 132 | # the list(set()) is to remove duplicates 133 | conda_deps_to_install_string = "\n- ".join(sorted(list(set(conda_deps)))) 134 | channels_string = "\n- ".join(set(channels)) 135 | 136 | # create environment.yml 137 | env_spec = f"""name: {name} 138 | 139 | channels: 140 | - {channels_string} 141 | 142 | dependencies: 143 | - {conda_deps_to_install_string} 144 | """ 145 | if len(pip_deps) > 0: 146 | pip_deps_to_install_string = "\n - ".join(pip_deps) 147 | env_spec += f"""- pip: 148 | - {pip_deps_to_install_string} 149 | """ 150 | 151 | with open(Path(_FILE_DIR, args.output), "w") as out: 152 | out.write(env_spec) 153 | -------------------------------------------------------------------------------- /docs/user_guide/settings/index.qmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Settings" 3 | --- 4 | The user must set the model settings in the `settings.toml` configuration file. 5 | 6 | Besides the necessary/ required setttings one can set: 7 | 8 | - input in regard to the computational side of FIAT (e.g. chunking, number of threads etc), 9 | - see this [page](computation.qmd) 10 | - optional/ additional input that is not necessary or more data specific, 11 | - see this [page](optional.qmd) 12 | 13 | ### Basic input 14 | This section pertains to all input that is vital for running a FIAT model. 15 | 16 | These inputs/ entries are listed in the table down below with more detailed information per entry underneath the table. 17 | 18 | ::: {.callout-note} 19 | File paths in the settings can be relative to the settings.toml file or absolute. 20 | ::: 21 | 22 | | Entry | Type | Required | Default | 23 | |:-------------------------------|---------|----------|-----------------| 24 | | **[model]** | | | | 25 | | [model](#type) | string | No | geom | 26 | | **[output]** | | | | 27 | | [path](#output) | string | No | output | 28 | | **[output.csv]** | | | | 29 | | [name[n]](#output.csv) | string | No | - | 30 | | **[output.geom]** | | | | 31 | | [name[n]](#output.geom) | string | No | spatial[n].gpkg | 32 | | **[output.grid]** | | | | 33 | | [name](#output.grid) | string | No | ead.nc | 34 | | **[hazard]** | | | | 35 | | [file](#hazard) | string | Yes | | 36 | | [elevation_reference](#hazard) | string | Yes | | 37 | | **[exposure.csv]** | | | | 38 | | [file](#exposure.csv) | string | No | - | 39 | | **[exposure.geom]** | | | | 40 | | [file[n]](#exposure.geom) | string | Yes | | 41 | | **[exposure.grid]** | | | | 42 | | [file](#exposure.grid) | string | Yes | | 43 | | **[vulnerability]** | | | | 44 | | [file](#vulnerability) | string | Yes | | 45 | : Most basic settings file input {#tbl-toml .hover} 46 | 47 | #### [model] 48 | 49 | - `type`: The type of model. Choice between 'geom' and 'grid'. 50 | 51 | #### [output] 52 | 53 | - `path`: The path to the output folder in the working directory. 54 | 55 | #### [output.csv] 56 | 57 | - `name[n]`: The path to the output CSV file(s) that will be created. These are linked to the input geometry files. 58 | 59 | #### [output.geom] 60 | 61 | - `name[n]`: This sets the name and location of the output vector file that contains the geometry, location and the damages per asset. 62 | 63 | ::: {.callout-warning} 64 | If provided, the suffix is mandatory. The suffix should match the suffix of the input geometry file for which it is set. 65 | ::: 66 | 67 | #### [output.grid] 68 | 69 | - `name`: This sets the name and location of the output raster file that contains damages per grid cell. 70 | 71 | #### [hazard] 72 | 73 | - `file`: The file path to the hazard file. 74 | 75 | - `elevation_reference`: This indicates the elevation reference of the flood map. In case of a flood-depth map this should be "DEM" while in case of a flood-elevation map this should be "datum". 76 | 77 | #### [exposure.csv] 78 | 79 | - `file`: The path to the exposure CSV file (recommended to be within the [exposure folder](../data/index.qmd)) that contains the [required information](../data/exposure.qmd) per asset. There can only be one exposure CSV file. 80 | 81 | #### [exposure.geom] 82 | 83 | - `file[n]`: The path to the exposure vector file (recommended to be within the [exposure folder](../data/index.qmd)) with the assets' geometry and object_id. The user can provide multiple vector files. Therefore the '[n]' suffix, as the user can create mulitple entries for vector files (e.g. `file1`, `file2` etc.). 84 | 85 | ::: {.callout-warning} 86 | The suffix is mandatory. So if only one file is provided, name it `file1`. 87 | ::: 88 | ::: {.callout-note} 89 | Only required when running the geometry based model. 90 | ::: 91 | 92 | #### [exposure.grid] 93 | 94 | - `file`: The path to the exposure raster file (recommended to be within the [exposure folder](../data/index.qmd)). 95 | 96 | ::: {.callout-note} 97 | Only required when running the raster based model. 98 | ::: 99 | 100 | #### [vulnerability] 101 | 102 | - `file`: The path to the vulnerability curves CSV file within the [vulnerability folder](../data/index.qmd) that contains the [damage curves](../data/vulnerability.qmd). Only one vulnerability curves file is allowed. 103 | 104 | ### Example 105 | 106 | An example of settings file for running a geometry model is given below: 107 | 108 | ```toml 109 | [output] 110 | path = "output" 111 | 112 | [output.csv] 113 | name = "output.csv" 114 | 115 | [output.geom] 116 | name1 = "spatial.gpkg" 117 | 118 | [hazard] 119 | file = "hazard/SL_10yr_reprojected.tif" 120 | elevation_reference = "DEM" 121 | risk = false 122 | 123 | [exposure.geom] 124 | file1 = "./exposure/buildings.gpkg" 125 | 126 | [exposure.csv] 127 | file = "./exposure/exposure.csv" 128 | 129 | [vulnerability] 130 | file = "./vulnerability/vulnerability_curves.csv" 131 | ``` 132 | -------------------------------------------------------------------------------- /res/fiat.svg: -------------------------------------------------------------------------------- 1 | 2 | 15 | 17 | 36 | 40 | 47 | 54 | 61 | 68 | 75 | 82 | 89 | 96 | 103 | 110 | 115 | 125 | 133 | 141 | 147 | 155 | 156 | -------------------------------------------------------------------------------- /docs/_static/fiat.svg: -------------------------------------------------------------------------------- 1 | 2 | 15 | 17 | 36 | 40 | 47 | 54 | 61 | 68 | 75 | 82 | 89 | 96 | 103 | 110 | 115 | 125 | 133 | 141 | 147 | 155 | 156 | -------------------------------------------------------------------------------- /docs/_static/version.js: -------------------------------------------------------------------------------- 1 | function checkPathExists(url) { 2 | return new Promise((resolve, reject) => { 3 | var xhr = new XMLHttpRequest(); 4 | xhr.open('HEAD', url, true); 5 | xhr.onreadystatechange = function() { 6 | if (xhr.readyState === 4) { 7 | if (xhr.status === 200) { 8 | resolve(true); 9 | } else if (xhr.status === 404) { 10 | resolve(false); 11 | } else { 12 | reject(new Error(xhr.statusText)); 13 | } 14 | } 15 | }; 16 | xhr.onerror = function() { 17 | reject(new Error('Network Error')); 18 | }; 19 | xhr.send(); 20 | }); 21 | } 22 | 23 | window.onload = function() { 24 | // Assuming you have a ul element in your HTML like this: 25 | //
    26 | 27 | // Fetch the JSON data 28 | fetch("https://raw.githubusercontent.com/Deltares/Delft-FIAT/gh-pages/switcher.json") 29 | .then(response => response.json()) 30 | .then(data => { 31 | console.log('Data loaded:', data); // Log the loaded data 32 | 33 | const dropdown = document.querySelector('#nav-menu-version').nextElementSibling; 34 | console.log('Dropdown element:', dropdown); // Log the dropdown element 35 | 36 | // Clear all existing dropdown items 37 | dropdown.innerHTML = ''; 38 | 39 | data.forEach(item => { 40 | console.log('Adding item:', item); // Log the item being added 41 | 42 | // Create a new li element 43 | const li = document.createElement('li'); 44 | 45 | // Create a new a element 46 | const a = document.createElement('a'); 47 | a.className = 'dropdown-item'; 48 | a.href = item.url; // Use the 'url' property as the href 49 | a.textContent = item.name; // Use the 'name' property as the text 50 | 51 | // Add the a element to the li 52 | li.appendChild(a); 53 | 54 | // Add the li to the dropdown 55 | dropdown.appendChild(li); 56 | }); 57 | 58 | console.log('Dropdown after adding items:', dropdown); // Log the dropdown after adding items 59 | 60 | // Get all dropdown items within the specific dropdown menu 61 | var dropdownMenu = document.querySelector('#nav-menu-version').nextElementSibling; 62 | 63 | var dropdownItems = dropdownMenu.querySelectorAll('.dropdown-item'); 64 | 65 | // Get the current page in chunks 66 | var currentPagePath = window.location.pathname.split('/'); 67 | 68 | for (var i = 0; i < dropdownItems.length; i++) { 69 | // Get textcontent 70 | var textContent = dropdownItems[i].textContent; 71 | 72 | // Get the index of the current version 73 | var index = currentPagePath.indexOf(textContent); 74 | 75 | if (index !== -1) { 76 | // Remove the active-item class from all items 77 | for (var j = 0; j < dropdownItems.length; j++) { 78 | dropdownItems[j].classList.remove('active-item'); 79 | } 80 | 81 | dropdownItems[i].classList.add('active-item'); 82 | break 83 | } 84 | } 85 | 86 | console.log('current page path', currentPagePath); 87 | 88 | // Loop through each dropdown item 89 | for (var i = 0; i < dropdownItems.length; i++) { 90 | // Add click event listener to each item 91 | dropdownItems[i].addEventListener('click', function(event) { 92 | // Prevent default action 93 | event.preventDefault(); 94 | 95 | // Get the clicked item's text 96 | var itemText = this.textContent; 97 | // var itemHref = this.getAttribute('href') 98 | 99 | // Loop through each dropdown item again to find a match in the current page's path 100 | for (var j = 0; j < dropdownItems.length; j++) { 101 | // Get the dropdown item's text 102 | var dropdownText = dropdownItems[j].textContent; 103 | console.log('Dropdown item:', dropdownText); 104 | 105 | // Find the index of the dropdownText in the current page's path 106 | var index = currentPagePath.indexOf(dropdownText); 107 | 108 | // If the dropdownText is found in the current page's path 109 | if (index !== -1) { 110 | // Construct the new URL relative to the dropdownText and append the itemText 111 | addElements = currentPagePath.slice(index + 1, ) 112 | relativePath = '../'.repeat(addElements.length) 113 | var newUrl = relativePath + itemText + '/' + addElements.join('/') 114 | console.log('Clicked item:', newUrl); 115 | 116 | // Redirect to the new URL 117 | checkPathExists(newUrl) 118 | .then(exists => { 119 | if (exists) { 120 | window.location.href = newUrl; 121 | } else { 122 | console.log('Path does not exist, referring to home page'); 123 | window.location.href = relativePath + itemText + '/'; 124 | } 125 | }) 126 | 127 | // Exit the loop 128 | break; 129 | } 130 | } 131 | }); 132 | } 133 | 134 | }) 135 | .catch(error => console.error('Error:', error)); // Log any errors 136 | } 137 | --------------------------------------------------------------------------------