├── fiat_toolbox
├── equity
│ ├── __init__.py
│ ├── fiat_functions.py
│ └── equity.py
├── infographics
│ ├── __init__.py
│ ├── infographics_interface.py
│ ├── infographics_factory.py
│ └── risk_infographics.py
├── metrics_writer
│ ├── __init__.py
│ ├── fiat_metrics_interface.py
│ ├── fiat_write_return_period_threshold.py
│ ├── fiat_read_metrics_file.py
│ └── fiat_write_metrics_file.py
├── spatial_output
│ ├── __init__.py
│ └── aggregation_areas.py
├── well_being
│ └── __init__.py
├── __init__.py
└── utils.py
├── ruff.toml
├── .github
├── ISSUE_TEMPLATE
│ ├── config.yml
│ ├── feature_request.yaml
│ ├── documentation_improvement.yaml
│ └── bug_report.yaml
└── workflows
│ ├── lint.yml
│ ├── tests.yml
│ └── publish-to-pypi.yml
├── sonar-project.properties
├── tests
├── metrics_writer
│ ├── data
│ │ ├── test_metrics_no_aggregation.csv
│ │ ├── test_metrics_subbasin.csv
│ │ └── test_metrics_taxuse.csv
│ ├── test_fiat_read_metrics_file.py
│ └── test_fiat_write_return_period_threshold.py
├── infographics
│ ├── data
│ │ ├── risk
│ │ │ ├── test_scenario_metrics.csv
│ │ │ └── config_risk_charts.toml
│ │ └── single_event
│ │ │ ├── config_people.toml
│ │ │ ├── config_roads.toml
│ │ │ ├── test_scenario_metrics.csv
│ │ │ └── config_charts.toml
│ └── test_risk_infographics.py
├── spatial_output
│ ├── data
│ │ ├── aggr_lvl_1.geojson
│ │ ├── current_extreme12ft_comb_test_metrics_aggr_lvl_1.csv
│ │ ├── aggr_lvl_2.geojson
│ │ └── current_extreme12ft_comb_test_metrics_aggr_lvl_2.csv
│ ├── test_aggregation_areas_output.py
│ └── test_points_to_footprints.py
├── equity
│ ├── test_equity.py
│ └── data
│ │ └── population_income_data.csv
└── well_being
│ ├── test_methods.py
│ └── test_household.py
├── .vscode
├── extensions.json
└── settings.json
├── envs
└── fiat-toolbox-dev.yml
├── .pre-commit-config.yaml
├── LICENSE
├── README.md
├── pyproject.toml
├── .gitignore
└── examples
├── metrics
├── test_read_metrics.ipynb
└── test_write_metrics.ipynb
└── equity
└── run_equity.ipynb
/fiat_toolbox/equity/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/fiat_toolbox/infographics/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/fiat_toolbox/metrics_writer/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/fiat_toolbox/spatial_output/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ruff.toml:
--------------------------------------------------------------------------------
1 | [lint]
2 | select = ["E", "F", "NPY", "PD", "C4", "I"]
3 | ignore = ["E501"]
4 |
--------------------------------------------------------------------------------
/fiat_toolbox/well_being/__init__.py:
--------------------------------------------------------------------------------
1 | from .household import Household
2 |
3 | __all__ = ["Household"]
4 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | blank_issues_enabled: false
4 | contact_links:
5 | - name: Ask a question
6 | url: https://github.com/Deltares/hydromt/discussions
7 | about: Ask questions and discuss with other community members
8 |
--------------------------------------------------------------------------------
/sonar-project.properties:
--------------------------------------------------------------------------------
1 | sonar.projectKey=deltares_fiat-toolbox
2 | sonar.organization=deltares
3 | sonar.python.version=3.10, 3.11, 3.12, 3.13
4 | sonar.exclusions=examples/**/*, environment/**/*, tests/**/*
5 | sonar.sources=fiat_toolbox
6 | sonar.sourceEncoding=UTF-8
7 |
--------------------------------------------------------------------------------
/tests/metrics_writer/data/test_metrics_no_aggregation.csv:
--------------------------------------------------------------------------------
1 | ,Description,Show In Metrics Map,Show In Metrics Table,Long Name,Value
2 | Total Damage Sum,Total of the damage event,True,True,Total Damage Sum,1200
3 | Single Family Damage Sum,Total of the damage event for only single families,True,True,Single Family Damage Sum,300
4 |
--------------------------------------------------------------------------------
/tests/metrics_writer/data/test_metrics_subbasin.csv:
--------------------------------------------------------------------------------
1 | ,Total Damage Sum,Single Family Damage Sum
2 | Description,Total of the damage event,Total of the damage event for only single families
3 | Show In Metrics Map,True,True
4 | Show In Metrics Table,True,True
5 | Long Name,Total Damage Sum,Single Family Damage Sum
6 | BAYLURE,900,0
7 | OAKFOREST,300,300
8 |
--------------------------------------------------------------------------------
/.github/workflows/lint.yml:
--------------------------------------------------------------------------------
1 | name: Lint
2 |
3 | on:
4 | pull_request:
5 | branches:
6 | - main
7 | types: [opened, synchronize, reopened]
8 |
9 | jobs:
10 | pre-commit:
11 | runs-on: ubuntu-latest
12 | steps:
13 | - uses: actions/checkout@v5
14 | - uses: actions/setup-python@v5
15 | - uses: pre-commit/action@v3.0.1
16 |
--------------------------------------------------------------------------------
/.vscode/extensions.json:
--------------------------------------------------------------------------------
1 | {
2 | "recommendations": [
3 | "ms-python.python",
4 | "ms-python.vscode-pylance",
5 | "charliermarsh.ruff",
6 | "ms-toolsai.jupyter",
7 | "github.copilot",
8 | "tamasfe.even-better-toml",
9 | "aaron-bond.better-comments",
10 | "sonarsource.sonarlint-vscode",
11 | ]
12 | }
13 |
--------------------------------------------------------------------------------
/tests/metrics_writer/data/test_metrics_taxuse.csv:
--------------------------------------------------------------------------------
1 | ,Total Damage Sum,Single Family Damage Sum
2 | Description,Total of the damage event,Total of the damage event for only single families
3 | Show In Metrics Map,True,True
4 | Show In Metrics Table,True,True
5 | Long Name,Total Damage Sum,Single Family Damage Sum
6 | VACANT COMMERCIAL,900,0
7 | VACANT RESIDENTIAL,300,0
8 | SINGLE FAMILY,0,300
9 |
--------------------------------------------------------------------------------
/envs/fiat-toolbox-dev.yml:
--------------------------------------------------------------------------------
1 | name: fiat_toolbox
2 |
3 | channels:
4 | - conda-forge
5 |
6 | dependencies:
7 | - black
8 | - numpy < 2.0
9 | - pandas == 2.2.2
10 | - geopandas == 1.0.1
11 | - tomli
12 | - pytest # tests
13 | - pytest-cov # tests
14 | - pytest-benchmark # tests
15 | - python==3.10
16 | - plotly
17 | - parse
18 | # - gdal
19 | - pydantic
20 | - seaborn
21 | - notebook
22 | - scipy==1.15.2
23 | - pip
24 | - pip:
25 | - duckdb==1.2.1
26 |
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "[python]": {
3 | "editor.formatOnSave": true,
4 | "editor.defaultFormatter": "charliermarsh.ruff",
5 | "editor.codeActionsOnSave": {
6 | "source.fixAll": "explicit"
7 | }
8 | },
9 | "sonarlint.connectedMode.project": {
10 | "connectionId": "deltares",
11 | "projectKey": "deltares_fiat-toolbox"
12 | },
13 | "python.testing.pytestArgs": [
14 | "tests"
15 | ],
16 | "python.testing.unittestEnabled": false,
17 | "python.testing.pytestEnabled": true
18 | }
19 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/pre-commit/pre-commit-hooks
3 | rev: v4.6.0
4 | hooks:
5 | - id: check-yaml
6 | - id: end-of-file-fixer
7 | - id: check-case-conflict
8 | - id: trailing-whitespace
9 | - id: check-toml
10 | - id: check-merge-conflict
11 | - id: check-added-large-files
12 | - id: check-merge-conflict
13 | - id: trailing-whitespace
14 |
15 | - repo: https://github.com/astral-sh/ruff-pre-commit
16 | rev: v0.13.1
17 | hooks:
18 | - id: ruff-check
19 | args: [ --fix ]
20 | - id: ruff-format
21 |
22 | - repo: https://github.com/crate-ci/typos
23 | rev: v1.36.2
24 | hooks:
25 | - id: typos
26 |
--------------------------------------------------------------------------------
/tests/infographics/data/risk/test_scenario_metrics.csv:
--------------------------------------------------------------------------------
1 | ,ExpectedAnnualDamages,FloodedHomes,ImpactedHomes2Y,ImpactedHomes5Y,ImpactedHomes10Y,ImpactedHomes25Y,ImpactedHomes50Y,ImpactedHomes100Y
2 | Description,Expected annual damages,Homes flooded (Inundation Depth > 0.5) in 30 years,Homes impacted (Inundation Depth > 0.2) in 2 years,Homes impacted (Inundation Depth > 0.2) in 5 years,Homes impacted (Inundation Depth > 0.2) in 10 years,Homes impacted (Inundation Depth > 0.2) in 25 years,Homes impacted (Inundation Depth > 0.2) in 50 years,Homes impacted (Inundation Depth > 0.2) in 100 years
3 | Show In Metrics Table,True,True,True,True,True,True,True,True
4 | Long Name,Expected Annual Damages,Flooded homes,Impacted Homes 2 Years,Impacted Homes 5 Years,Impacted Homes 10 Years,Impacted Homes 25 Years,Impacted Homes 50 Years,Impacted Homes 100 Years
5 | Value,166119191.2,1122.0,128.0,1122.0,1122.0,1122.0,1122.0,1122.0
6 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Deltares
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/tests/spatial_output/data/aggr_lvl_1.geojson:
--------------------------------------------------------------------------------
1 | {
2 | "type": "FeatureCollection",
3 | "name": "aggr_lvl_1",
4 | "crs": { "type": "name", "properties": { "name": "urn:ogc:def:crs:OGC:1.3:CRS84" } },
5 | "features": [
6 | { "type": "Feature", "properties": { "id": 1, "name": "name1" }, "geometry": { "type": "MultiPolygon", "coordinates": [ [ [ [ -79.945014118305835, 32.775077769633178 ], [ -79.935822279928857, 32.776007518802345 ], [ -79.935616255965229, 32.774148020464011 ], [ -79.935822279928857, 32.774095193806673 ], [ -79.93540494933589, 32.770719570402711 ], [ -79.935542298644961, 32.769525687946853 ], [ -79.942489004085047, 32.772240978134079 ], [ -79.943841366512927, 32.773466556584339 ], [ -79.945014118305835, 32.775077769633178 ] ] ] ] } },
7 | { "type": "Feature", "properties": { "id": 2, "name": "name2" }, "geometry": { "type": "MultiPolygon", "coordinates": [ [ [ [ -79.935758887940068, 32.775996953470894 ], [ -79.926936836164458, 32.776884441314181 ], [ -79.927296057434361, 32.773038660659907 ], [ -79.928923118480398, 32.769192880005626 ], [ -79.935463058658968, 32.769604927932875 ], [ -79.935346840012826, 32.770724853068458 ], [ -79.935769453271533, 32.774000105823475 ], [ -79.935526450647771, 32.774137455132553 ], [ -79.935758887940068, 32.775996953470894 ] ] ] ] } }
8 | ]
9 | }
10 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | Delft-FIAT Toolbox
2 | ------------------
3 | This toolbox contains post-processing modules for Delft-FIAT output.
4 |
5 | Installation
6 | ============
7 | Fiat toolbox uses [uv](https://docs.astral.sh/uv/) to build and manage python environments.
8 | If you do not have `uv` installed, you can install it using `pip install uv`.
9 |
10 | - Install with: `uv sync`
11 |
12 | - Run the tests with: `uv run pytest`
13 |
14 | - Run the linter with: `uv run pre-commit run --all-files`
15 |
16 | Modules:
17 |
18 | metrics_writer
19 | ==============
20 | This module contains functions to write out custom aggregated metrics from Delft-FIAT output for the whole model an/or different aggregation levels.
21 |
22 | infographics
23 | ============
24 | This module contains functions to write customized infographics in html format using metric files .
25 |
26 | spatial_output
27 | ==============
28 | This module contains functions to aggregate point output from FIAT to building footprints. Moreover, it has methods to join aggregated metrics to spatial files.
29 |
30 | equity
31 | ======
32 | This module contains functions to calculate equity weights and equity weighted risk metrics based on socio-economic inputs at an aggregation level.
33 |
34 | well_being
35 | ==================
36 | This module contains functions to estimate household level well-being impacts.
37 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature Request
3 | description: Suggest an idea/enhancement for this project
4 | labels: [enhancement]
5 |
6 | body:
7 | - type: dropdown
8 | id: checks
9 | attributes:
10 | description: What kind of feature request is this?
11 | label: Kind of request
12 | options:
13 | - Adding new functionality
14 | - Changing existing functionality
15 | - Removing existing functionality
16 | - type: textarea
17 | id: description
18 | attributes:
19 | description: >
20 | Please provide a clear and concise description of the feature you're requesting
21 | label: Enhancement Description
22 | validations:
23 | required: true
24 | - type: textarea
25 | id: task-list
26 | attributes:
27 | description: >
28 | Please provide a list of tasks that need to be completed before this issue can be closed.
29 | label: Task list
30 | validations:
31 | required: true
32 | - type: textarea
33 | id: use-case
34 | attributes:
35 | description: >
36 | Please describe a situation in which this feature would be useful to you, with code or cli examples if possible
37 | label: Use case
38 | - type: textarea
39 | id: context
40 | attributes:
41 | description: >
42 | Please add any other context about the enhancement here
43 | label: Additional Context
44 |
--------------------------------------------------------------------------------
/fiat_toolbox/infographics/infographics_interface.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from abc import ABC, abstractmethod
3 | from pathlib import Path
4 | from typing import Union
5 |
6 |
7 | class IInfographicsParser(ABC):
8 | """Interface for creating the infographic"""
9 |
10 | logger: logging.Logger
11 |
12 | @abstractmethod
13 | def __init__(
14 | self,
15 | scenario_name: str,
16 | metrics_full_path: Union[Path, str],
17 | config_base_path: Union[Path, str],
18 | output_base_path: Union[Path, str],
19 | logger: logging.Logger = logging.getLogger(__name__),
20 | ) -> None: ...
21 |
22 | @abstractmethod
23 | def get_infographics(self) -> str:
24 | """Get the infographic for a scenario
25 |
26 | Returns
27 | -------
28 | str
29 | The infographic for the scenario as a string in html format
30 | """
31 | pass
32 |
33 | @abstractmethod
34 | def write_infographics_to_file() -> str:
35 | """Write the infographic for a scenario to file
36 |
37 | Returns
38 | -------
39 | str
40 | The path to the infographic file
41 | """
42 | pass
43 |
44 | @abstractmethod
45 | def get_infographics_html() -> str:
46 | """Get the path to the infographic html file
47 |
48 | Returns
49 | -------
50 | str
51 | The path to the infographic html file
52 | """
53 | pass
54 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/documentation_improvement.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | name: Documentation Improvement
3 | description: Report wrong or missing documentation
4 | labels: [documentation]
5 |
6 | body:
7 | - type: checkboxes
8 | attributes:
9 | label: HydroMT version checks
10 | options:
11 | - label: >
12 | I have checked that the issue still exists on the latest versions of the docs
13 | on `main` [here](https://github.com/Deltares/hydromt)
14 | required: true
15 | - type: dropdown
16 | id: kind
17 | attributes:
18 | description: What kind of documentation issue is this?
19 | label: Kind of issue
20 | options:
21 | - Docs are wrong
22 | - Docs are unclear
23 | - Docs are missing
24 | validations:
25 | required: true
26 | - type: textarea
27 | id: location
28 | attributes:
29 | description: >
30 | If the docs are wrong or unclear please provide the URL of the documentation in question
31 | label: Location of the documentation
32 | - type: textarea
33 | id: problem
34 | attributes:
35 | description: >
36 | Please provide a description of the documentation problem
37 | label: Documentation problem
38 | validations:
39 | required: true
40 | - type: textarea
41 | id: suggested-fix
42 | attributes:
43 | description: >
44 | Please explain your suggested fix and why it's better than the existing documentation
45 | label: Suggested fix for documentation
46 |
--------------------------------------------------------------------------------
/fiat_toolbox/infographics/infographics_factory.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | from typing import Union
3 |
4 | from fiat_toolbox.infographics.infographics import InfographicsParser
5 | from fiat_toolbox.infographics.infographics_interface import IInfographicsParser
6 | from fiat_toolbox.infographics.risk_infographics import RiskInfographicsParser
7 |
8 |
9 | class InforgraphicFactory:
10 | @staticmethod
11 | def create_infographic_file_writer(
12 | infographic_mode: str,
13 | scenario_name: str,
14 | metrics_full_path: Union[Path, str],
15 | config_base_path: Union[Path, str],
16 | output_base_path: Union[Path, str],
17 | ) -> IInfographicsParser:
18 | """
19 | Create a infographic file writer.
20 |
21 | Parameters
22 | ----------
23 | infographic_mode : str
24 | The mode of the infographic file writer to create.
25 | config_file : Path
26 | The path to the infographic file.
27 |
28 | Returns
29 | -------
30 | IInfographicsFileWriter
31 | A infographic file writer.
32 | """
33 | if infographic_mode == "single_event":
34 | return InfographicsParser(
35 | scenario_name, metrics_full_path, config_base_path, output_base_path
36 | )
37 | elif infographic_mode == "risk":
38 | return RiskInfographicsParser(
39 | scenario_name, metrics_full_path, config_base_path, output_base_path
40 | )
41 | else:
42 | raise ValueError(f"Infographic_mode {infographic_mode} not supported")
43 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["hatchling>=1.24.2"]
3 | build-backend = "hatchling.build"
4 |
5 | [project]
6 | name = "fiat_toolbox"
7 | description = "A collection of modules for post-processing Delft-FIAT output."
8 | readme = {file = "README.md", content-type = "text/markdown"}
9 | license = { file = "LICENSE" }
10 | authors = [
11 | {name = "Panos Athanasiou", email = "Panos.Athanasiou@deltares.nl"},
12 | {name = "Luuk Blom", email = "Luuk.Blom@deltares.nl"},
13 | {name = "Sarah Rautenbach", email = "sarah.rautenbach@deltares.nl"},
14 | {name = "Daley Adrichem", email = "Daley.Adrichem@deltares.nl"},
15 | ]
16 | classifiers = [
17 | "Intended Audience :: Science/Research",
18 | "License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication",
19 | "Topic :: Scientific/Engineering :: Hydrology",
20 | ]
21 | requires-python = ">=3.10"
22 | dynamic = ["version"]
23 | dependencies = [
24 | "numpy < 2.0",
25 | "pandas",
26 | "tomli",
27 | "toml",
28 | "plotly",
29 | "parse",
30 | "geopandas",
31 | "duckdb>=1.0, <1.3",
32 | "validators",
33 | "pydantic",
34 | "pillow",
35 | "matplotlib",
36 | "numpy",
37 | "scipy",
38 | "seaborn"
39 | ]
40 |
41 | [dependency-groups]
42 | dev = [
43 | {include-group = "lint"},
44 | {include-group = "test"}
45 | ]
46 | test = ["pytest", "pytest-cov"]
47 | lint = ["pre-commit", "ruff"]
48 |
49 | [project.optional-dependencies]
50 | test = ["pytest", "pytest-cov"]
51 | lint = ["pre-commit", "ruff"]
52 |
53 | [project.urls]
54 | Source = "https://github.com/Deltares/fiat_toolbox"
55 |
56 | [tool.pytest.ini_options]
57 | addopts = "--cov fiat_toolbox --cov-report xml"
58 | testpaths = ["tests"]
59 |
60 | [tool.hatch.version]
61 | path = "fiat_toolbox/__init__.py"
62 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Visual Studio
2 | .vs/
3 |
4 | # C++ compilation
5 | *.manifest
6 | !.build/gui/*.manifest
7 | src/bin/
8 | bin/
9 |
10 | # Byte-compiled / optimized / DLL files
11 | __pycache__/
12 | *.py[cod]
13 | *$py.class
14 |
15 | # Local documentation and temporary files (calculation etc.)
16 | docs/
17 | tmp/
18 | whl/
19 |
20 | # Data folder
21 | # data/
22 |
23 | # C extensions
24 | *.so
25 |
26 | # Distribution / packaging
27 | .Python
28 | build/
29 | develop-eggs/
30 | dist/
31 | downloads/
32 | eggs/
33 | .eggs/
34 | lib/
35 | lib64/
36 | parts/
37 | sdist/
38 | var/
39 | wheels/
40 | share/python-wheels/
41 | *.egg-info/
42 | *.whl
43 | .installed.cfg
44 | *.egg
45 | MANIFEST
46 |
47 | # Installer logs
48 | pip-log.txt
49 | pip-delete-this-directory.txt
50 |
51 | # Unit test / coverage reports
52 | htmlcov/
53 | .tox/
54 | .nox/
55 | .coverage
56 | .coverage.*
57 | .cache
58 | nosetests.xml
59 | coverage.xml
60 | *.cover
61 | *.py,cover
62 | .hypothesis/
63 | .pytest_cache/
64 | .testdata/output/
65 | .testdata/*.log
66 | cover/
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | .docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 | .ipynb
81 |
82 | # IPython
83 | profile_default/
84 | ipython_config.py
85 |
86 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
87 | __pypackages__/
88 |
89 | # Environments
90 | .env
91 | .venv
92 | env/
93 | venv/
94 | ENV/
95 | env.bak/
96 | venv.bak/
97 | environment.yml
98 |
99 | # Rope project settings
100 | .ropeproject
101 |
102 | # mkdocs documentation
103 | /site
104 |
105 | # pytype static type analyzer
106 | .pytype/
107 |
108 | # Cython debug symbols
109 | cython_debug/
110 |
--------------------------------------------------------------------------------
/tests/infographics/data/single_event/config_people.toml:
--------------------------------------------------------------------------------
1 | [Charts]
2 | [Charts.Flooded]
3 | Name = "Flooded"
4 | Image = "https://openclipart.org/image/800px/217511"
5 | [Charts.Displaced]
6 | Name = "Displaced"
7 | Image = "https://openclipart.org/image/800px/229840"
8 |
9 | [Categories]
10 | [Categories.LowVulnerability]
11 | Name = "LowVulnerability"
12 | Color = "#D5DEE1"
13 | [Categories.HighVulnerability]
14 | Name = "HighVulnerability"
15 | Color = "#88A2AA"
16 |
17 | [Slices]
18 | [Slices.Flooded_Low_Vulnerability_People]
19 | Name = "Flooded low vulnerability people"
20 | Query = "FloodedLowVulnerability"
21 | Chart = "Flooded"
22 | Category = "LowVulnerability"
23 | [Slices.Flooded_High_Vulnerability_People]
24 | Name = "Flooded high vulnerability people"
25 | Query = "FloodedHighVulnerability"
26 | Chart = "Flooded"
27 | Category = "HighVulnerability"
28 | [Slices.Displaced_Low_Vulnerability_People]
29 | Name = "Displaced low vulnerability people"
30 | Query = "DisplacedLowVulnerability"
31 | Chart = "Displaced"
32 | Category = "LowVulnerability"
33 | [Slices.Displaced_High_Vulnerability_People]
34 | Name = "Displaced high vulnerability people"
35 | Query = "DisplacedHighVulnerability"
36 | Chart = "Displaced"
37 | Category = "HighVulnerability"
38 |
39 | [Other]
40 | [Other.Plot]
41 | image_scale = 0.15
42 | numbers_font = 20
43 | height = 400
44 | width = 700
45 | [Other.Title]
46 | text = "People"
47 | font = 30
48 | [Other.Subtitle]
49 | font = 25
50 | [Other.Legend]
51 | font = 20
52 | [Other.Info]
53 | text="Hi im the info for the people chart"
54 | image="https://openclipart.org/image/800px/302413"
55 | scale=0.1
56 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug Report
3 | description: Report incorrect behavior
4 | labels: [bug]
5 |
6 | body:
7 | - type: checkboxes
8 | id: checks
9 | attributes:
10 | label: HydroMT version checks
11 | options:
12 | - label: I have checked that this issue has not already been reported.
13 | required: true
14 | - label: I have checked that this bug exists on the latest version of HydroMT.
15 | required: true
16 | - type: textarea
17 | id: example
18 | attributes:
19 | description: >
20 | Please provide a minimal, copy-pastable example or a link to a public repository that reproduces the behavior. If providing a copy pastable example,
21 | you may assume your in a clean up to date version of hydromt with a python environment active. In the case of a repository, ensure the repository
22 | has a README.md which includes instructions to reproduce the behaviour.
23 | label: Reproducible Example
24 | validations:
25 | required: true
26 | - type: textarea
27 | id: current-behaviour
28 | attributes:
29 | description: >
30 | Please provide a description of the incorrect behaviour shown in the reproducible example
31 | label: Current behaviour
32 | validations:
33 | required: true
34 | - type: textarea
35 | id: expected-behaviour
36 | attributes:
37 | description: >
38 | Please provide a description of what you think the behaviour should be
39 | label: Desired behaviour
40 | validations:
41 | required: true
42 | - type: textarea
43 | id: task-list
44 | attributes:
45 | description: >
46 | Please provide a list of tasks that need to be completed before this issue can be closed.
47 | label: Task list
48 | validations:
49 | required: true
50 | - type: textarea
51 | id: additional
52 | attributes:
53 | description: >
54 | Please add any other context about the bug here
55 | label: Additional context
56 | validations:
57 | required: false
58 |
--------------------------------------------------------------------------------
/tests/infographics/data/single_event/config_roads.toml:
--------------------------------------------------------------------------------
1 | [Charts]
2 | [Charts.Flooded_roads]
3 | Name = "Flooded Roads"
4 |
5 | [Categories]
6 | [Categories.Slightly_Flooded]
7 | Name = "Slightly Flooded"
8 | Color = "#D5DEE1"
9 | Image = "https://openclipart.org/image/800px/201733"
10 | [Categories.Minor_Flooded]
11 | Name = "Minor Flooded"
12 | Color = "#D5DEE1"
13 | Image = "https://openclipart.org/image/800px/190006"
14 | [Categories.Major_Flooded]
15 | Name = "Major Flooded"
16 | Color = "#D5DEE1"
17 | Image = "{image_path}/truck.png"
18 | [Categories.Fully_Flooded]
19 | Name = "Fully Flooded"
20 | Color = "#D5DEE1"
21 | Image = "https://openclipart.org/image/800px/314219"
22 |
23 | [Slices]
24 | [Slices.Slightly_Flooded]
25 | Name = "Slightly Flooded"
26 | Query = "SlightlyFloodedRoads"
27 | Chart = "Flooded Roads"
28 | Category = "Slightly Flooded"
29 | [Slices.Minor_Flooded]
30 | Name = "Minor Flooded"
31 | Query = "MinorFloodedRoads"
32 | Chart = "Flooded Roads"
33 | Category = "Minor Flooded"
34 | [Slices.Major_Flooded]
35 | Name = "Major Flooded"
36 | Query = "MajorFloodedRoads"
37 | Chart = "Flooded Roads"
38 | Category = "Major Flooded"
39 | [Slices.Fully_Flooded]
40 | Name = "Fully Flooded"
41 | Query = "FullyFloodedRoads"
42 | Chart = "Flooded Roads"
43 | Category = "Fully Flooded"
44 |
45 | [Other]
46 | [Other.Plot]
47 | image_scale = 0.1
48 | numbers_font = 20
49 | height = 400
50 | width = 700
51 | [Other.Title]
52 | text = "Interrupted roads"
53 | font = 30
54 | [Other.Subtitle]
55 | font = 25
56 | [Other.Info]
57 | text="""Hi im the info for the roads chart
58 | I'm a very long sting
59 | mostly
60 | containing
61 | multiple
62 | lines"""
63 | image="https://openclipart.org/image/800px/302413"
64 | scale=0.1
65 |
--------------------------------------------------------------------------------
/.github/workflows/tests.yml:
--------------------------------------------------------------------------------
1 | name: Tests
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | pull_request:
8 | branches:
9 | - main
10 | types: [opened, synchronize, reopened]
11 |
12 | jobs:
13 | test:
14 | name: Test on ${{ matrix.os }} with Python ${{ matrix.python-version }}
15 | runs-on: "${{ matrix.os }}"
16 | defaults:
17 | run:
18 | shell: bash -l {0}
19 | strategy:
20 | matrix:
21 | python-version: ["3.10", "3.11", "3.12", "3.13"]
22 | os: [ubuntu-latest, macos-latest, windows-latest]
23 | include:
24 | - python-version: "3.12"
25 | os: ubuntu-latest
26 | coverage: true # Mark only this combination for coverage to prevent error: https://sonarsource.atlassian.net/browse/SQSCANGHA-83 & https://community.sonarsource.com/t/my-first-analyses-has-failed-when-added-project-analysis-id-38152260-0ab1-4a18-846f-1f2371079b5f/134798/27
27 | steps:
28 | - uses: actions/checkout@v5
29 | with:
30 | fetch-depth: 0
31 |
32 | - name: Install uv
33 | uses: astral-sh/setup-uv@v6
34 | with:
35 | python-version: ${{ matrix.python-version }}
36 |
37 | - name: Install test dependencies
38 | run: |
39 | uv sync --locked --all-extras
40 |
41 | - name: Test with pytest
42 | run: |
43 | uv run pytest
44 |
45 | - name: Upload coverage report
46 | if: matrix.coverage == true
47 | uses: actions/upload-artifact@v4
48 | with:
49 | if-no-files-found: error
50 | name: coverage-report
51 | path: coverage.xml
52 |
53 | sonar_scan:
54 | name: SonarQube Scan
55 | runs-on: ubuntu-latest
56 | needs: test
57 | steps:
58 | - uses: actions/checkout@v5
59 | with:
60 | fetch-depth: 0
61 |
62 | - name: Download coverage report
63 | uses: actions/download-artifact@v4
64 | with:
65 | name: coverage-report
66 |
67 | - name: SonarQube Scan
68 | uses: SonarSource/sonarqube-scan-action@v5
69 | env:
70 | SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
71 |
--------------------------------------------------------------------------------
/tests/spatial_output/data/current_extreme12ft_comb_test_metrics_aggr_lvl_1.csv:
--------------------------------------------------------------------------------
1 | ,TotalDamageEvent,ResidentialAffectedCount,ResidentialMinorCount,ResidentialMajorCount,ResidentialDestroyedCount,CommercialAffectedCount,CommercialMinorCount,CommercialMajorCount,CommercialDestroyedCount,PublicAffectedCount,PublicMinorCount,PublicMajorCount,PublicDestroyedCount,IndustrialAffectedCount,IndustrialMinorCount,IndustrialMajorCount,IndustrialDestroyedCount,FloodedLowVulnerability,FloodedHighVulnerability,DisplacedLowVulnerability,DisplacedHighVulnerability
2 | Description,Total Damage of the Event on buldings,Number of affected damaged residential buildings,Number of minor damaged residential buildings,Number of major damaged residential buildings,Number of destroyed residential buildings,Number of affected damaged commercial buildings,Number of minor damaged commercial buildings,Number of major damaged commercial buildings,Number of destroyed commercial buildings,Number of affected damaged Public buildings,Number of minor damaged Public buildings,Number of major damaged Public buildings,Number of destroyed Public buildings,Number of affected damaged Industrial buildings,Number of minor damaged Industrial buildings,Number of major damaged Industrial buildings,Number of destroyed Industrial buildings,Number of flooded people with low vulnerability,Number of flooded people with high vulnerability,Number of displaced people with low vulnerability,Number of displaced people with low vulnerability
3 | Show In Metrics Table,True,True,True,True,True,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False
4 | Long Name,Total Damage of the Event,Affected Residential Buildings,Minor Damaged Residential Buildings,Major Damaged Residential Buildings,Destroyed Residential Buildings,Affected Commercial Buildings,Minor Damaged Commercial Buildings,Major Damaged Commercial Buildings,Destroyed Commercial Buildings,Affected Public Buildings,Minor Damaged Public Buildings,Major Damaged Public Buildings,Destroyed Public Buildings,Affected Industrial Buildings,Minor Damaged Industrial Buildings,Major Damaged Industrial Buildings,Destroyed Industrial Buildings,Flooded Low Vulnerability,Flooded High Vulnerability,Displaced Low Vulnerability,Displaced High Vulnerability
5 | ,39580.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,1.0,0.0,0.0
6 | name1,8456800.0,0.0,220.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,220.0,220.0,0.0,0.0
7 | name2,38039340.0,0.0,563.0,0.0,0.0,0.0,403.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,563.0,563.0,0.0,0.0
8 |
--------------------------------------------------------------------------------
/fiat_toolbox/equity/fiat_functions.py:
--------------------------------------------------------------------------------
1 | import math
2 |
3 | # Instead of having fiat core as a dependency for this one function that doesnt use any other functions from fiat core,
4 | # the function was copied here (26/9/2024, 661e0f2b2d6396346140316412c5957bc10eb03b) from https://github.com/Deltares/Delft-FIAT/blob/master/src/fiat/models/calc.py
5 |
6 |
7 | def calc_rp_coef(
8 | rp: list | tuple,
9 | ):
10 | """
11 | Calculates coefficients used to compute the EAD as a linear function of
12 | the known damages.
13 |
14 | Parameters
15 | ----------
16 | rp : list or tuple of int
17 | Return periods T1 … Tn for which damages are known.
18 |
19 | Returns
20 | -------
21 | list of float
22 | Coefficients a1, …, an used to compute the EAD as a linear function of the known damages.
23 |
24 | Notes
25 | -----
26 | In which D(f) is the damage, D, as a function of the frequency of exceedance, f.
27 | In order to compute this EAD, function D(f) needs to be known for the entire range of frequencies.
28 | Instead, D(f) is only given for the n frequencies as mentioned in the table above.
29 | So, in order to compute the integral above, some assumptions need to be made for function D(f):
30 | (i) For f > f1 the damage is assumed to be equal to 0.
31 | (ii) For f < fn, the damage is assumed to be equal to Dn.
32 | (iii) For all other frequencies, the damage is estimated from log-linear interpolation
33 | between the known damages and frequencies.
34 | """
35 | # Step 1: Compute frequencies associated with T-values.
36 | _rp = sorted(rp)
37 | idxs = [_rp.index(n) for n in rp]
38 | rp_u = sorted(rp)
39 | rp_l = len(rp_u)
40 |
41 | f = [1 / n for n in rp_u]
42 | lf = [math.log(1 / n) for n in rp_u]
43 |
44 | if rp_l == 1:
45 | return f
46 |
47 | # Step 2:
48 | c = [(1 / (lf[idx] - lf[idx + 1])) for idx in range(rp_l - 1)]
49 |
50 | # Step 3:
51 | G = [(f[idx] * lf[idx] - f[idx]) for idx in range(rp_l)]
52 |
53 | # Step 4:
54 | a = [
55 | (
56 | (1 + c[idx] * lf[idx + 1]) * (f[idx] - f[idx + 1])
57 | + c[idx] * (G[idx + 1] - G[idx])
58 | )
59 | for idx in range(rp_l - 1)
60 | ]
61 | b = [
62 | (c[idx] * (G[idx] - G[idx + 1] + lf[idx + 1] * (f[idx + 1] - f[idx])))
63 | for idx in range(rp_l - 1)
64 | ]
65 |
66 | # Step 5:
67 | alpha = [
68 | (
69 | b[0]
70 | if idx == 0
71 | else f[idx] + a[idx - 1]
72 | if idx == rp_l - 1
73 | else a[idx - 1] + b[idx]
74 | )
75 | for idx in range(rp_l)
76 | ]
77 |
78 | return [alpha[idx] for idx in idxs]
79 |
--------------------------------------------------------------------------------
/tests/spatial_output/data/aggr_lvl_2.geojson:
--------------------------------------------------------------------------------
1 | {
2 | "type": "FeatureCollection",
3 | "name": "aggr_lvl_2",
4 | "crs": { "type": "name", "properties": { "name": "urn:ogc:def:crs:OGC:1.3:CRS84" } },
5 | "features": [
6 | { "type": "Feature", "properties": { "id": 1, "name": "name1" }, "geometry": { "type": "MultiPolygon", "coordinates": [ [ [ [ -79.945542384879232, 32.774972116318509 ], [ -79.941210598977435, 32.77546868689749 ], [ -79.94090420436487, 32.77350353524448 ], [ -79.94316518529898, 32.773218271294851 ], [ -79.945542384879232, 32.774972116318509 ] ] ] ] } },
7 | { "type": "Feature", "properties": { "id": 2, "name": "name2" }, "geometry": { "type": "MultiPolygon", "coordinates": [ [ [ [ -79.943250707944472, 32.773150147424822 ], [ -79.938083260862967, 32.773809929857052 ], [ -79.937861388902149, 32.77221456480541 ], [ -79.938928487380394, 32.770777679725789 ], [ -79.94255239607385, 32.772489263423573 ], [ -79.943250707944472, 32.773150147424822 ] ] ] ] } },
8 | { "type": "Feature", "properties": { "id": 3, "name": "name3" }, "geometry": { "type": "MultiPolygon", "coordinates": [ [ [ [ -79.941157772320096, 32.775479252228962 ], [ -79.935822279928871, 32.776018084133817 ], [ -79.935632103962448, 32.774116324469617 ], [ -79.94088307370194, 32.773492969913015 ], [ -79.941157772320096, 32.775479252228962 ] ] ] ] } },
9 | { "type": "Feature", "properties": { "id": 4, "name": "name4" }, "geometry": { "type": "MultiPolygon", "coordinates": [ [ [ [ -79.938051564868573, 32.773820495188517 ], [ -79.934290306866032, 32.774274804441632 ], [ -79.93375147496117, 32.771411599613863 ], [ -79.933793736287043, 32.770640330416711 ], [ -79.932029325931921, 32.770545242433499 ], [ -79.931849715296977, 32.769457013292318 ], [ -79.936128674541436, 32.769668319921678 ], [ -79.938875660723056, 32.770672026411113 ], [ -79.937861388902149, 32.772225130136881 ], [ -79.938051564868573, 32.773820495188517 ] ] ] ] } },
10 | { "type": "Feature", "properties": { "id": 5, "name": "name5" }, "geometry": { "type": "MultiPolygon", "coordinates": [ [ [ [ -79.935780018602998, 32.776007518802352 ], [ -79.927031924147656, 32.776895006645645 ], [ -79.92720096945115, 32.775130596290523 ], [ -79.935605690633778, 32.774126889801082 ], [ -79.935780018602998, 32.776007518802352 ] ] ] ] } },
11 | { "type": "Feature", "properties": { "id": 6, "name": "name6" }, "geometry": { "type": "MultiPolygon", "coordinates": [ [ [ [ -79.934237480208694, 32.774274804441632 ], [ -79.927190404119671, 32.77515172695346 ], [ -79.927200969451135, 32.773070356654301 ], [ -79.928891422485989, 32.769214010668563 ], [ -79.931744061982286, 32.769457013292318 ], [ -79.931987064606048, 32.770566373096436 ], [ -79.933740909629705, 32.770661461079648 ], [ -79.933709213635296, 32.771432730276793 ], [ -79.934237480208694, 32.774274804441632 ] ] ] ] } }
12 | ]
13 | }
14 |
--------------------------------------------------------------------------------
/tests/infographics/data/single_event/test_scenario_metrics.csv:
--------------------------------------------------------------------------------
1 | ,Description,Show In Metrics Table,Long Name,Value
2 | TotalDamageEvent,Total building damage,True,Total building damage,28498776790.659584
3 | ResidentialMinorCount,Number of residences with minor damage,True,Minor Damage Residential Buildings,11689.0
4 | ResidentialMajorCount,Number of residences with major damage,True,Major Damaged Residential Buildings,47374.0
5 | ResidentialDestroyedCount,Number of destroyed residences,True,Destroyed Residential Buildings,9799.0
6 | CommercialMinorCount,Number of commercial buildings with minor damage,False,Minor Damaged Commercial Buildings,959.0
7 | CommercialMajorCount,Number of commercial buildings with major damage,False,Major Damaged Commercial Buildings,2475.0
8 | CommercialDestroyedCount,Number of destroyed commercial buildings,False,Destroyed Commercial Buildings,1708.0
9 | HealthMinorCount,Number of minor damaged health facilities,False,Minor Damaged Health Buildings,67.0
10 | HealthMajorCount,Number of major damaged health facilities,False,Major Damaged Health Buildings,162.0
11 | HealthDestroyedCount,Number of destroyed health facilities,False,Destroyed Health Buildings,42.0
12 | SchoolsMinorCount,Number of minor damaged Schools facilities,False,Minor Damaged Schools Buildings,0.0
13 | SchoolsMajorCount,Number of major damaged Schools facilities,False,Major Damaged Schools Buildings,0.0
14 | SchoolsDestroyedCount,Number of destroyed Schools facilities,False,Destroyed Schools Buildings,0.0
15 | EmergencyMinorCount,Number of minor damaged emergency response buildings,False,Minor Damaged Emergency Buildings,2.0
16 | EmergencyMajorCount,Number of major damaged emergency response buildings,False,Major Damaged Emergency Buildings,4.0
17 | EmergencyDestroyedCount,Number of destroyed emergency response buildings,False,Destroyed Emergency Buildings,2.0
18 | FloodedLowVulnerability,Number of flooded people with low vulnerability,False,Flooded Low Vulnerability,62502.0
19 | FloodedHighVulnerability,Number of flooded people with high vulnerability,False,Flooded High Vulnerability,3569.0
20 | DisplacedLowVulnerability,Number of displaced people with low vulnerability,False,Displaced Low Vulnerability,9633.0
21 | DisplacedHighVulnerability,Number of displaced people with high vulnerability,False,Displaced High Vulnerability,164.0
22 | SlightlyFloodedRoads,Total length of the roads with an indundation depth between 2 and 4,True,Slightly flooded roads,654.45045
23 | MinorFloodedRoads,Total length of the roads with an indundation depth between 6 and 12,True,Minor flooded roads,265.248648
24 | MajorFloodedRoads,Total length of the roads with an indundation depth between 12 and 24,True,Major flooded roads,164.453645
25 | FullyFloodedRoads,Total length of the roads with an indundation depth over 24,True,Fully flooded roads,18.65446
26 |
--------------------------------------------------------------------------------
/tests/spatial_output/data/current_extreme12ft_comb_test_metrics_aggr_lvl_2.csv:
--------------------------------------------------------------------------------
1 | ,TotalDamageEvent,ResidentialAffectedCount,ResidentialMinorCount,ResidentialMajorCount,ResidentialDestroyedCount,CommercialAffectedCount,CommercialMinorCount,CommercialMajorCount,CommercialDestroyedCount,PublicAffectedCount,PublicMinorCount,PublicMajorCount,PublicDestroyedCount,IndustrialAffectedCount,IndustrialMinorCount,IndustrialMajorCount,IndustrialDestroyedCount,FloodedLowVulnerability,FloodedHighVulnerability,DisplacedLowVulnerability,DisplacedHighVulnerability
2 | Description,Total Damage of the Event on buldings,Number of affected damaged residential buildings,Number of minor damaged residential buildings,Number of major damaged residential buildings,Number of destroyed residential buildings,Number of affected damaged commercial buildings,Number of minor damaged commercial buildings,Number of major damaged commercial buildings,Number of destroyed commercial buildings,Number of affected damaged Public buildings,Number of minor damaged Public buildings,Number of major damaged Public buildings,Number of destroyed Public buildings,Number of affected damaged Industrial buildings,Number of minor damaged Industrial buildings,Number of major damaged Industrial buildings,Number of destroyed Industrial buildings,Number of flooded people with low vulnerability,Number of flooded people with high vulnerability,Number of displaced people with low vulnerability,Number of displaced people with low vulnerability
3 | Show In Metrics Table,True,True,True,True,True,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False,False
4 | Long Name,Total Damage of the Event,Affected Residential Buildings,Minor Damaged Residential Buildings,Major Damaged Residential Buildings,Destroyed Residential Buildings,Affected Commercial Buildings,Minor Damaged Commercial Buildings,Major Damaged Commercial Buildings,Destroyed Commercial Buildings,Affected Public Buildings,Minor Damaged Public Buildings,Major Damaged Public Buildings,Destroyed Public Buildings,Affected Industrial Buildings,Minor Damaged Industrial Buildings,Major Damaged Industrial Buildings,Destroyed Industrial Buildings,Flooded Low Vulnerability,Flooded High Vulnerability,Displaced Low Vulnerability,Displaced High Vulnerability
5 | name1,2460160.0,0.0,64.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,64.0,64.0,0.0,0.0
6 | name2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
7 | name3,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
8 | name4,12442160.0,0.0,220.0,0.0,0.0,0.0,100.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,220.0,220.0,0.0,0.0
9 | name5,14600640.0,0.0,372.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,372.0,372.0,0.0,0.0
10 | name6,17032760.0,0.0,128.0,0.0,0.0,0.0,303.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,128.0,128.0,0.0,0.0
11 |
--------------------------------------------------------------------------------
/tests/equity/test_equity.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | import pytest
4 |
5 | from fiat_toolbox.equity.equity import Equity
6 |
7 | DATASET = Path(__file__).parent / "data"
8 |
9 | _cases = {
10 | "fiat_output": {
11 | "census_data": "population_income_data.csv",
12 | "fiat_data": "aggregated_damage_fiat.csv",
13 | "aggregation_label": "Census_Bg",
14 | "percapitaincome_label": "PerCapitaIncomeBG",
15 | "totalpopulation_label": "TotalPopulationBG",
16 | "gamma": 1.2,
17 | "output_file_equity": "aggregated_ewced1.csv",
18 | "damage_column_pattern": "TotalDamageRP{rp}",
19 | "ead_column": "ExpectedAnnualDamages",
20 | },
21 | "general_output": {
22 | "census_data": "population_income_data.csv",
23 | "fiat_data": "aggregated_damage_gen.csv",
24 | "aggregation_label": "Census_Bg",
25 | "percapitaincome_label": "PerCapitaIncomeBG",
26 | "totalpopulation_label": "TotalPopulationBG",
27 | "gamma": 1.2,
28 | "output_file_equity": "aggregated_ewced2.csv",
29 | },
30 | }
31 |
32 |
33 | @pytest.mark.parametrize("case", list(_cases.keys()))
34 | def test_equity(case):
35 | census_data = DATASET.joinpath(_cases[case]["census_data"])
36 | fiat_data = DATASET.joinpath(_cases[case]["fiat_data"])
37 | aggregation_label = _cases[case]["aggregation_label"]
38 | percapitaincome_label = _cases[case]["percapitaincome_label"]
39 | totalpopulation_label = _cases[case]["totalpopulation_label"]
40 | gamma = _cases[case]["gamma"]
41 | output_file_equity = DATASET.joinpath(_cases[case]["output_file_equity"])
42 |
43 | if "damage_column_pattern" in _cases[case].keys():
44 | equity = Equity(
45 | census_data,
46 | fiat_data,
47 | aggregation_label,
48 | percapitaincome_label,
49 | totalpopulation_label,
50 | damage_column_pattern=_cases[case]["damage_column_pattern"],
51 | )
52 | else:
53 | # Use default
54 | equity = Equity(
55 | census_data,
56 | fiat_data,
57 | aggregation_label,
58 | percapitaincome_label,
59 | totalpopulation_label,
60 | )
61 |
62 | df_equity = equity.equity_calculation(
63 | gamma,
64 | output_file_equity,
65 | )
66 | assert "EWCEAD" in df_equity.columns
67 | if "ead_column" in _cases[case].keys():
68 | ranking = equity.rank_ewced(ead_column=_cases[case]["ead_column"])
69 | else:
70 | ranking = equity.rank_ewced()
71 | assert "rank_diff_EWCEAD" in ranking.columns
72 | if "ead_column" in _cases[case].keys():
73 | sri = equity.calculate_resilience_index(ead_column=_cases[case]["ead_column"])
74 | else:
75 | sri = equity.calculate_resilience_index()
76 | assert "SRI" in sri.columns
77 |
78 | # Delete file
79 | output_file_equity.unlink()
80 |
--------------------------------------------------------------------------------
/fiat_toolbox/metrics_writer/fiat_metrics_interface.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from abc import ABC, abstractmethod
3 | from pathlib import Path
4 | from typing import Union
5 |
6 | import pandas as pd
7 |
8 |
9 | class IMetricsFileWriter(ABC):
10 | """Interface for writing metrics to a file."""
11 |
12 | logger: logging.Logger
13 |
14 | @abstractmethod
15 | def __init__(
16 | self,
17 | config_file: Union[str, Path],
18 | logger: logging.Logger = logging.getLogger(__name__),
19 | ): ...
20 |
21 | @abstractmethod
22 | def parse_metrics_to_file(
23 | self,
24 | df_results: pd.DataFrame,
25 | metrics_path: Path,
26 | write_aggregate: str = None,
27 | overwrite: bool = False,
28 | ) -> None:
29 | """
30 | Parse a metrics file and write the metrics to a file.
31 |
32 | Parameters
33 | ----------
34 | df_results : pd.DataFrame
35 | The results dataframe.
36 | metrics_path : Path
37 | The path to where to store the metrics file.
38 | write_aggregate : str
39 | The name of the aggregation label to write to the metrics file
40 | (None for no aggregation label, 'all' for all possible ones).
41 | overwrite : bool
42 | Whether to overwrite the existing metrics file if it already exists.
43 | """
44 | pass
45 |
46 |
47 | class IMetricsFileReader(ABC):
48 | """Interface for reading metrics from a file."""
49 |
50 | logger: logging.Logger
51 |
52 | @abstractmethod
53 | def __init__(
54 | self,
55 | metrics_file_path: Union[str, Path],
56 | logger: logging.Logger = logging.getLogger(__name__),
57 | ): ...
58 |
59 | @abstractmethod
60 | def read_metrics_from_file(self, **kwargs) -> pd.DataFrame:
61 | """
62 | Reads metrics from a file.
63 |
64 | Parameters
65 | ----------
66 | **kwargs
67 | Keyword arguments.
68 |
69 | Returns
70 | -------
71 | pd.DataFrame
72 | The metrics read from the file.
73 |
74 | Raises
75 | ------
76 | KeyError
77 | If the metric is not found in the file.
78 | """
79 |
80 | pass
81 |
82 | @abstractmethod
83 | def read_aggregated_metric_from_file(self, metric: str) -> pd.DataFrame:
84 | """
85 | Reads metrics from a file. These metrics are aggregated metrics.
86 |
87 | Parameters
88 | ----------
89 | metric : str
90 | The metric to read from the file.
91 |
92 | Returns
93 | -------
94 | pd.DataFrame
95 | The metrics read from the file.
96 |
97 | Raises
98 | ------
99 | KeyError
100 | If the metric is not found in the file.
101 | """
102 |
103 | pass
104 |
--------------------------------------------------------------------------------
/tests/well_being/test_methods.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from fiat_toolbox.well_being import methods
4 |
5 |
6 | def test_utility_basic():
7 | assert methods.utility(10, 1.5) < 10
8 | arr = np.array([10, 20, 30])
9 | result = methods.utility(arr, 1.5)
10 | assert isinstance(result, np.ndarray)
11 | assert result.shape == arr.shape
12 |
13 |
14 | def test_inverse_utility():
15 | u = methods.utility(10, 1.5)
16 | c = methods.inverse_utility(u, 1.5)
17 | assert np.isclose(c, 10)
18 |
19 |
20 | def test_recovery_time_and_rate():
21 | rate = 0.5
22 | rebuilt_per = 90
23 | t = methods.recovery_time(rate, rebuilt_per)
24 | r = methods.recovery_rate(t, rebuilt_per)
25 | assert np.isclose(r, rate)
26 |
27 |
28 | def test_reconstruction_cost_t():
29 | t = np.linspace(0, 1, 5)
30 | rec_rate, v, k_str = 0.5, 0.2, 100000
31 | cost = methods.reconstruction_cost_t(t, rec_rate, v, k_str)
32 | assert cost.shape == t.shape
33 | assert np.all(cost >= 0)
34 |
35 |
36 | def test_income_loss_t():
37 | t = np.linspace(0, 1, 5)
38 | rec_rate, v, k_str, pi = 0.5, 0.2, 100000, 0.1
39 | loss = methods.income_loss_t(t, rec_rate, v, k_str, pi)
40 | assert loss.shape == t.shape
41 | assert np.all(loss >= 0)
42 |
43 |
44 | def test_consumption_loss_t():
45 | t = np.linspace(0, 1, 5)
46 | rec_rate, v, k_str, pi = 0.5, 0.2, 100000, 0.1
47 | cl = methods.consumption_loss_t(
48 | t, rec_rate, v, k_str, pi, savings=1000, insurance=500, support=200
49 | )
50 | assert cl.shape == t.shape
51 | assert np.all(cl >= 0)
52 |
53 |
54 | def test_consumption_t():
55 | t = np.linspace(0, 1, 5)
56 | rec_rate, v, k_str, pi, c0 = 0.5, 0.2, 100000, 0.1, 15000
57 | ct = methods.consumption_t(
58 | t,
59 | rec_rate,
60 | v,
61 | k_str,
62 | pi,
63 | c0,
64 | cmin=1000,
65 | savings=1000,
66 | insurance=500,
67 | support=200,
68 | )
69 | assert ct.shape == t.shape
70 |
71 |
72 | def test_utility_loss_t():
73 | t = np.linspace(0, 1, 5)
74 | rec_rate, v, k_str, pi, c0, eta = 0.5, 0.2, 100000, 0.1, 15000, 1.5
75 | ul = methods.utility_loss_t(
76 | t,
77 | rec_rate,
78 | v,
79 | k_str,
80 | pi,
81 | c0,
82 | eta,
83 | cmin=1000,
84 | savings=1000,
85 | insurance=500,
86 | support=200,
87 | )
88 | assert ul.shape == t.shape
89 |
90 |
91 | def test_wellbeing_loss():
92 | du = 100
93 | c_avg = 15000
94 | eta = 1.5
95 | wl = methods.wellbeing_loss(du, c_avg, eta)
96 | assert isinstance(wl, float)
97 |
98 |
99 | def test_equity_weight():
100 | c = 10000
101 | c_avg = 15000
102 | eta = 1.5
103 | ew = methods.equity_weight(c, c_avg, eta)
104 | assert isinstance(ew, float)
105 |
106 |
107 | def test_opt_lambda_runs():
108 | result = methods.opt_lambda(
109 | v=0.2,
110 | k_str=100000,
111 | c0=20000,
112 | pi=0.1,
113 | eta=1.5,
114 | l_min=0.3,
115 | l_max=1.0,
116 | t_max=2.0,
117 | times=np.linspace(0, 2, 10),
118 | method="trapezoid",
119 | cmin=1000,
120 | eps_rel=0.01,
121 | savings=1000,
122 | insurance=500,
123 | support=200,
124 | )
125 | assert "l_opt" in result
126 | assert "loss_opt" in result
127 |
--------------------------------------------------------------------------------
/tests/spatial_output/test_aggregation_areas_output.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | import geopandas as gpd
4 | import pandas as pd
5 | import pytest
6 |
7 | from fiat_toolbox.spatial_output.aggregation_areas import AggregationAreas
8 |
9 | file_path = Path(__file__).parent.resolve()
10 |
11 |
12 | def test_write_aggr_areas():
13 | # Aggegation levels to test
14 | names = ["aggr_lvl_1", "aggr_lvl_2"]
15 | for name in names:
16 | # Get metrics file
17 | metrics_path = (
18 | file_path / "data" / f"current_extreme12ft_comb_test_metrics_{name}.csv"
19 | )
20 |
21 | # Get areas file
22 | aggr_areas_path = file_path / "data" / f"{name}.geojson"
23 |
24 | # Define output name
25 | outpath = file_path / f"aggregation_areas_{name}.gpkg"
26 |
27 | # Read files
28 | metrics = pd.read_csv(metrics_path)
29 | aggr_areas = gpd.read_file(aggr_areas_path)
30 |
31 | # Write output
32 | AggregationAreas.write_spatial_file(metrics, aggr_areas, outpath)
33 |
34 | # Assert
35 | assert outpath.exists()
36 |
37 | out = gpd.read_file(outpath)
38 | assert isinstance(out, gpd.GeoDataFrame)
39 | assert sorted(out["name"]) == sorted(aggr_areas["name"])
40 | index_name = metrics.columns[0]
41 | metrics = metrics.set_index(index_name)
42 | values0 = pd.to_numeric(
43 | metrics.loc[sorted(aggr_areas["name"]), "TotalDamageEvent"]
44 | ).tolist()
45 | out = out.set_index("name")
46 | values1 = out.loc[sorted(aggr_areas["name"]), "TotalDamageEvent"].tolist()
47 | assert values0 == values1
48 |
49 | # Delete created files
50 | for file in list(outpath.parent.glob(outpath.stem + ".*")):
51 | file.unlink()
52 |
53 |
54 | def test_error_handling():
55 | name = "aggr_lvl_2"
56 |
57 | # Get metrics file
58 | metrics_path = (
59 | file_path / "data" / f"current_extreme12ft_comb_test_metrics_{name}.csv"
60 | )
61 |
62 | # Get areas file
63 | aggr_areas_path = file_path / "data" / f"{name}.geojson"
64 |
65 | # Define output name
66 | outpath = file_path / f"aggregation_areas_{name}.gpkg"
67 |
68 | # Read files
69 | metrics = pd.read_csv(metrics_path)
70 | aggr_areas = gpd.read_file(aggr_areas_path)
71 |
72 | # Assert error when unknown file format is given
73 | with pytest.raises(ValueError):
74 | AggregationAreas.write_spatial_file(
75 | metrics, aggr_areas, outpath, file_format="matlab"
76 | )
77 |
78 | outpath = file_path / f"aggregation_areas_{name}.shp"
79 |
80 | # Assert error when there is a file_format and extension mismatch
81 | with pytest.raises(ValueError):
82 | AggregationAreas.write_spatial_file(
83 | metrics, aggr_areas, outpath, file_format="geopackage"
84 | )
85 |
86 | AggregationAreas.write_spatial_file(
87 | metrics, aggr_areas, outpath, file_format="shapefile"
88 | )
89 |
90 | assert outpath.exists()
91 | # Delete created files
92 | for file in list(outpath.parent.glob(outpath.stem + ".*")):
93 | file.unlink()
94 |
95 | outpath = file_path / f"aggregation_areas_{name}.geojson"
96 | AggregationAreas.write_spatial_file(
97 | metrics, aggr_areas, outpath, file_format="GeoJSON"
98 | )
99 |
100 | assert outpath.exists()
101 | # Delete created files
102 | for file in list(outpath.parent.glob(outpath.stem + ".*")):
103 | file.unlink()
104 |
--------------------------------------------------------------------------------
/tests/spatial_output/test_points_to_footprints.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | import geopandas as gpd
4 | import pandas as pd
5 | import pytest
6 |
7 | from fiat_toolbox import get_fiat_columns
8 | from fiat_toolbox.spatial_output.footprints import Footprints
9 |
10 | file_path = Path(__file__).parent.resolve()
11 |
12 | _FIAT_VERSION = "0.1.0rc2"
13 | _FIAT_COLUMNS = get_fiat_columns(fiat_version=_FIAT_VERSION)
14 |
15 |
16 | def test_write_footprints_event():
17 | # Get footprints file
18 | footprints_path = file_path / "data" / "building_footprints.geojson"
19 | # Get fiat results file
20 | results_path = file_path / "data" / "output_event.csv"
21 |
22 | footprints = gpd.read_file(footprints_path)
23 | results = pd.read_csv(results_path)
24 |
25 | # Define output name
26 | outpath = file_path / "building_footprints_event.gpkg"
27 |
28 | # Aggregate results
29 | footprints = Footprints(footprints, field_name="BF_FID", fiat_version=_FIAT_VERSION)
30 | footprints.aggregate(results)
31 | footprints.calc_normalized_damages()
32 | footprints.write(outpath)
33 |
34 | out = footprints.aggregated_results
35 |
36 | out_example = out[_FIAT_COLUMNS.total_damage][
37 | out[_FIAT_COLUMNS.object_id] == "1393_1394"
38 | ].to_numpy()[0]
39 | in_example = (
40 | results[_FIAT_COLUMNS.total_damage][
41 | results[_FIAT_COLUMNS.object_id] == 1393
42 | ].to_numpy()[0]
43 | + results[_FIAT_COLUMNS.total_damage][
44 | results[_FIAT_COLUMNS.object_id] == 1394
45 | ].to_numpy()[0]
46 | )
47 | assert out_example == in_example
48 | # Delete created files
49 | outpath.unlink()
50 |
51 |
52 | def test_write_footprints_risk():
53 | # Get footprints file
54 | footprints_path = file_path / "data" / "building_footprints.geojson"
55 | # Get fiat results file
56 | results_path = file_path / "data" / "output_risk.csv"
57 |
58 | footprints = gpd.read_file(footprints_path)
59 | results = pd.read_csv(results_path)
60 |
61 | # Define output name
62 | outpath = file_path / "building_footprints_risk.gpkg"
63 |
64 | # Aggregate results
65 | footprints = Footprints(footprints, field_name="BF_FID", fiat_version=_FIAT_VERSION)
66 | footprints.aggregate(results)
67 | footprints.calc_normalized_damages()
68 | footprints.write(outpath)
69 |
70 | out = footprints.aggregated_results
71 |
72 | out_example = out[_FIAT_COLUMNS.risk_ead][
73 | out[_FIAT_COLUMNS.object_id] == "1393_1394"
74 | ].to_numpy()[0]
75 | in_example = (
76 | results[_FIAT_COLUMNS.risk_ead][
77 | results[_FIAT_COLUMNS.object_id] == 1393
78 | ].to_numpy()[0]
79 | + results[_FIAT_COLUMNS.risk_ead][
80 | results[_FIAT_COLUMNS.object_id] == 1394
81 | ].to_numpy()[0]
82 | )
83 | assert out_example == round(in_example)
84 | # Delete created files
85 | outpath.unlink()
86 |
87 |
88 | def test_error_handling():
89 | # Get footprints file
90 | footprints_path = file_path / "data" / "building_footprints.geojson"
91 | # Get fiat results file
92 | results_path = file_path / "data" / "output_risk.csv"
93 |
94 | footprints = gpd.read_file(footprints_path)
95 | results = pd.read_csv(results_path)
96 | del results[_FIAT_COLUMNS.risk_ead]
97 |
98 | with pytest.raises(ValueError):
99 | footprints = Footprints(
100 | footprints, field_name="BF_FID", fiat_version=_FIAT_VERSION
101 | )
102 | footprints.aggregate(results)
103 |
--------------------------------------------------------------------------------
/examples/metrics/test_read_metrics.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "attachments": {},
5 | "cell_type": "markdown",
6 | "metadata": {},
7 | "source": [
8 | "## Metrics reader examples\n",
9 | "In this notebook, you'll find some example code of how to use the use the metrics reader. This class reads the metrics file created by the metrics writer and returns this as a dictionary"
10 | ]
11 | },
12 | {
13 | "attachments": {},
14 | "cell_type": "markdown",
15 | "metadata": {},
16 | "source": [
17 | "**Step 1**: Import the fiat_toolbox folder. If the environment includes an installation of this folder, this step can be skipped. The latter van easily be done by running `pip install -e .`"
18 | ]
19 | },
20 | {
21 | "cell_type": "code",
22 | "execution_count": null,
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "import sys\n",
27 | "\n",
28 | "sys.path.append(\"../\")"
29 | ]
30 | },
31 | {
32 | "attachments": {},
33 | "cell_type": "markdown",
34 | "metadata": {},
35 | "source": [
36 | "**Step 2**: Load the MetricsFileReader class"
37 | ]
38 | },
39 | {
40 | "cell_type": "code",
41 | "execution_count": null,
42 | "metadata": {},
43 | "outputs": [],
44 | "source": [
45 | "from fiat_toolbox.metrics_writer.fiat_read_metrics_file import MetricsFileReader"
46 | ]
47 | },
48 | {
49 | "attachments": {},
50 | "cell_type": "markdown",
51 | "metadata": {},
52 | "source": [
53 | "**Step 3**: Initialize the metrics reader with the metrics path"
54 | ]
55 | },
56 | {
57 | "cell_type": "code",
58 | "execution_count": null,
59 | "metadata": {},
60 | "outputs": [],
61 | "source": [
62 | "metrics_path = \"../tests/metrics_writer/data/test_metrics_no_aggregation.csv\"\n",
63 | "metrics_file_reader = MetricsFileReader(metrics_path)"
64 | ]
65 | },
66 | {
67 | "attachments": {},
68 | "cell_type": "markdown",
69 | "metadata": {},
70 | "source": [
71 | "**Step 4.1**: Run the metrics reader function `read_metrics_from_file` with the dataset that contains metrics calculated over the total area (without aggregations)"
72 | ]
73 | },
74 | {
75 | "cell_type": "code",
76 | "execution_count": null,
77 | "metadata": {},
78 | "outputs": [],
79 | "source": [
80 | "metrics_file_reader.read_metrics_from_file()"
81 | ]
82 | },
83 | {
84 | "attachments": {},
85 | "cell_type": "markdown",
86 | "metadata": {},
87 | "source": [
88 | "**Step 4.2**: Run the metrics reader function `read_aggregated_metric_from_file` with the dataset that contains metrics calculated over the aggregation areas"
89 | ]
90 | },
91 | {
92 | "cell_type": "code",
93 | "execution_count": null,
94 | "metadata": {},
95 | "outputs": [],
96 | "source": [
97 | "metrics_path = \"../tests/metrics_writer/data/test_metrics_subbasin.csv\"\n",
98 | "metrics_file_reader = MetricsFileReader(metrics_path)\n",
99 | "metrics_file_reader.read_aggregated_metric_from_file(\"Total Damage Sum\")"
100 | ]
101 | }
102 | ],
103 | "metadata": {
104 | "kernelspec": {
105 | "display_name": "fiat_toolbox",
106 | "language": "python",
107 | "name": "python3"
108 | },
109 | "language_info": {
110 | "codemirror_mode": {
111 | "name": "ipython",
112 | "version": 3
113 | },
114 | "file_extension": ".py",
115 | "mimetype": "text/x-python",
116 | "name": "python",
117 | "nbconvert_exporter": "python",
118 | "pygments_lexer": "ipython3",
119 | "version": "3.11.11"
120 | },
121 | "orig_nbformat": 4
122 | },
123 | "nbformat": 4,
124 | "nbformat_minor": 2
125 | }
126 |
--------------------------------------------------------------------------------
/examples/metrics/test_write_metrics.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "attachments": {},
5 | "cell_type": "markdown",
6 | "metadata": {},
7 | "source": [
8 | "## Metrics writer examples\n",
9 | "In this notebook, you'll find some example code of how to use the use the metrics reader. This class reads the metrics file created by the metrics writer and returns this as a dictionary"
10 | ]
11 | },
12 | {
13 | "attachments": {},
14 | "cell_type": "markdown",
15 | "metadata": {},
16 | "source": [
17 | "**Step 1**: Import the fiat_toolbox folder. If the environment includes an installation of this folder, this step can be skipped. The latter van easily be done by running `pip install -e .`"
18 | ]
19 | },
20 | {
21 | "cell_type": "code",
22 | "execution_count": null,
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "import sys\n",
27 | "\n",
28 | "sys.path.append(\"../\")"
29 | ]
30 | },
31 | {
32 | "attachments": {},
33 | "cell_type": "markdown",
34 | "metadata": {},
35 | "source": [
36 | "**Step 2**: Load the MetricsFileWriter class"
37 | ]
38 | },
39 | {
40 | "cell_type": "code",
41 | "execution_count": null,
42 | "metadata": {},
43 | "outputs": [],
44 | "source": [
45 | "import pandas as pd\n",
46 | "\n",
47 | "from fiat_toolbox.metrics_writer.fiat_write_metrics_file import MetricsFileWriter"
48 | ]
49 | },
50 | {
51 | "attachments": {},
52 | "cell_type": "markdown",
53 | "metadata": {},
54 | "source": [
55 | "**Step 3**: Initialize the metrics writer with the config path"
56 | ]
57 | },
58 | {
59 | "cell_type": "code",
60 | "execution_count": null,
61 | "metadata": {},
62 | "outputs": [],
63 | "source": [
64 | "config_path = \"../tests/metrics_writer/config/test_metrics_config.toml\"\n",
65 | "writer = MetricsFileWriter(config_path)"
66 | ]
67 | },
68 | {
69 | "attachments": {},
70 | "cell_type": "markdown",
71 | "metadata": {},
72 | "source": [
73 | "**Step 4**: Create a pandas dataframe from the metrics data. This is normally the Delft-FIAT output path"
74 | ]
75 | },
76 | {
77 | "cell_type": "code",
78 | "execution_count": null,
79 | "metadata": {},
80 | "outputs": [],
81 | "source": [
82 | "metrics_path = \"../tests/metrics_writer/data/#PLACE_YOUR_FILE_PATH_HERE#\"\n",
83 | "df_results = pd.read_csv(metrics_path, delimiter=\";\").head(10)"
84 | ]
85 | },
86 | {
87 | "cell_type": "code",
88 | "execution_count": null,
89 | "metadata": {},
90 | "outputs": [],
91 | "source": [
92 | "df_results.keys()"
93 | ]
94 | },
95 | {
96 | "attachments": {},
97 | "cell_type": "markdown",
98 | "metadata": {},
99 | "source": [
100 | "**Step 5**: Parse the metrics and write them to the specified output path"
101 | ]
102 | },
103 | {
104 | "cell_type": "code",
105 | "execution_count": null,
106 | "metadata": {},
107 | "outputs": [],
108 | "source": [
109 | "output_path = \"../tests/metrics_writer/data/temp_metrics.csv\"\n",
110 | "writer.parse_metrics_to_file(df_results, output_path, None)"
111 | ]
112 | }
113 | ],
114 | "metadata": {
115 | "kernelspec": {
116 | "display_name": "fiat_toolbox",
117 | "language": "python",
118 | "name": "python3"
119 | },
120 | "language_info": {
121 | "codemirror_mode": {
122 | "name": "ipython",
123 | "version": 3
124 | },
125 | "file_extension": ".py",
126 | "mimetype": "text/x-python",
127 | "name": "python",
128 | "nbconvert_exporter": "python",
129 | "pygments_lexer": "ipython3",
130 | "version": "3.11.11"
131 | },
132 | "orig_nbformat": 4
133 | },
134 | "nbformat": 4,
135 | "nbformat_minor": 2
136 | }
137 |
--------------------------------------------------------------------------------
/examples/equity/run_equity.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import seaborn as sns\n",
10 | "\n",
11 | "from fiat_toolbox.equity.equity import Equity"
12 | ]
13 | },
14 | {
15 | "cell_type": "markdown",
16 | "metadata": {},
17 | "source": [
18 | "Use example for US where a FIAT calculation has already been run (aggregated_damage.csv) and data from census at a block level containing total population and income per capita are saved (population_income_data.csv)"
19 | ]
20 | },
21 | {
22 | "cell_type": "code",
23 | "execution_count": null,
24 | "metadata": {},
25 | "outputs": [],
26 | "source": [
27 | "# Input data\n",
28 | "fiat_data = \"../tests/equity/data/aggregated_damage_gen.csv\"\n",
29 | "census_data = \"../tests/equity/data/population_income_data.csv\"\n",
30 | "aggregation_label = \"Census_Bg\"\n",
31 | "percapitaincome_label = \"PerCapitaIncomeBG\"\n",
32 | "totalpopulation_label = \"TotalPopulationBG\""
33 | ]
34 | },
35 | {
36 | "cell_type": "markdown",
37 | "metadata": {},
38 | "source": [
39 | "We can then create an Equity object which will take care of merging the data"
40 | ]
41 | },
42 | {
43 | "cell_type": "code",
44 | "execution_count": null,
45 | "metadata": {},
46 | "outputs": [],
47 | "source": [
48 | "equity = Equity(\n",
49 | " census_data,\n",
50 | " fiat_data,\n",
51 | " aggregation_label,\n",
52 | " percapitaincome_label,\n",
53 | " totalpopulation_label,\n",
54 | ")\n",
55 | "equity.df"
56 | ]
57 | },
58 | {
59 | "cell_type": "markdown",
60 | "metadata": {},
61 | "source": [
62 | "We can then do an equity calculation using a gamma value"
63 | ]
64 | },
65 | {
66 | "cell_type": "code",
67 | "execution_count": null,
68 | "metadata": {},
69 | "outputs": [],
70 | "source": [
71 | "gamma = 1.2 # elasticity\n",
72 | "df_equity = equity.equity_calculation(gamma)\n",
73 | "df_equity"
74 | ]
75 | },
76 | {
77 | "cell_type": "markdown",
78 | "metadata": {},
79 | "source": [
80 | "We can then even check the difference between the standard EAD calculation and the equity weighted approach."
81 | ]
82 | },
83 | {
84 | "cell_type": "code",
85 | "execution_count": null,
86 | "metadata": {},
87 | "outputs": [],
88 | "source": [
89 | "ranking = equity.rank_ewced()\n",
90 | "ranking"
91 | ]
92 | },
93 | {
94 | "cell_type": "code",
95 | "execution_count": null,
96 | "metadata": {},
97 | "outputs": [],
98 | "source": [
99 | "sns.scatterplot(equity.df, x=\"EW\", y=\"rank_diff_EWCEAD\")"
100 | ]
101 | },
102 | {
103 | "cell_type": "markdown",
104 | "metadata": {},
105 | "source": [
106 | "Or calculate a simple socioeconomic resilience index"
107 | ]
108 | },
109 | {
110 | "cell_type": "code",
111 | "execution_count": null,
112 | "metadata": {},
113 | "outputs": [],
114 | "source": [
115 | "sri = equity.calculate_resilience_index()\n",
116 | "sri"
117 | ]
118 | }
119 | ],
120 | "metadata": {
121 | "kernelspec": {
122 | "display_name": "fiat_toolbox",
123 | "language": "python",
124 | "name": "python3"
125 | },
126 | "language_info": {
127 | "codemirror_mode": {
128 | "name": "ipython",
129 | "version": 3
130 | },
131 | "file_extension": ".py",
132 | "mimetype": "text/x-python",
133 | "name": "python",
134 | "nbconvert_exporter": "python",
135 | "pygments_lexer": "ipython3",
136 | "version": "3.11.11"
137 | },
138 | "orig_nbformat": 4
139 | },
140 | "nbformat": 4,
141 | "nbformat_minor": 2
142 | }
143 |
--------------------------------------------------------------------------------
/tests/well_being/test_household.py:
--------------------------------------------------------------------------------
1 | from fiat_toolbox.well_being.household import Household, LossType
2 |
3 |
4 | def test_household_initialization():
5 | hh = Household(
6 | v=0.2,
7 | k_str=100000,
8 | c0=20000,
9 | c_avg=18000,
10 | rec_rate=0.5,
11 | pi=0.1,
12 | eta=1.5,
13 | rho=0.05,
14 | t_max=5,
15 | dt=0.1,
16 | currency="€",
17 | cmin=1000,
18 | recovery_per=90.0,
19 | savings=5000,
20 | insurance=2000,
21 | support=1000,
22 | )
23 | assert hh.v == 0.2
24 | assert hh.k_str == 100000
25 | assert hh.c0 == 20000
26 | assert hh.currency == "€"
27 | assert hh.savings == 5000
28 | assert hh.insurance == 2000
29 | assert hh.support == 1000
30 |
31 |
32 | def test_calc_loss_reconstruction():
33 | hh = Household(0.1, 50000, 15000, 14000, rec_rate=0.7)
34 | loss = hh.calc_loss(LossType.RECONSTRUCTION)
35 | assert loss > 0
36 |
37 |
38 | def test_calc_loss_income():
39 | hh = Household(0.1, 50000, 15000, 14000, rec_rate=0.7)
40 | loss = hh.calc_loss(LossType.INCOME)
41 | assert loss > 0
42 |
43 |
44 | def test_calc_loss_utility():
45 | hh = Household(0.1, 50000, 15000, 14000, rec_rate=0.7)
46 | loss = hh.calc_loss(LossType.UTILITY)
47 |
48 | assert loss > 0
49 |
50 |
51 | def test_get_losses():
52 | hh = Household(0.1, 50000, 15000, 14000, rec_rate=0.7)
53 | losses = hh.get_losses()
54 | assert "Wellbeing Loss" in losses
55 | assert "Asset Loss" in losses
56 | assert "Equity Weighted Loss" in losses
57 | for lt in LossType:
58 | assert lt in losses
59 |
60 |
61 | def test_opt_lambda_runs():
62 | hh = Household(0.1, 50000, 15000, 14000)
63 | hh.opt_lambda(no_steps=10)
64 | # No assertion, just check it runs without error
65 |
66 |
67 | def test_repr():
68 | hh = Household(0.1, 50000, 15000, 14000)
69 | s = repr(hh)
70 | assert "Household(" in s
71 | assert "v = 0.1" in s
72 |
73 |
74 | def test_plot_loss_all_types():
75 | import matplotlib
76 |
77 | matplotlib.use("Agg")
78 | import matplotlib.pyplot as plt
79 |
80 | hh = Household(0.1, 50000, 15000, 14000, rec_rate=0.7)
81 | for lt in LossType:
82 | hh.calc_loss(lt)
83 | # Test with no ax provided
84 | fig = hh.plot_loss(lt)
85 | assert fig is None or hasattr(fig, "savefig")
86 | # Test with ax provided
87 | fig2, ax2 = plt.subplots()
88 | result = hh.plot_loss(lt, ax=ax2)
89 | assert result is None
90 |
91 |
92 | def test_plot_consumption():
93 | import matplotlib
94 |
95 | matplotlib.use("Agg")
96 | import matplotlib.pyplot as plt
97 |
98 | hh = Household(0.1, 50000, 15000, 14000, rec_rate=0.7)
99 | hh.get_losses()
100 | # Test with no ax, plot_cmin False
101 | fig = hh.plot_consumption()
102 | assert fig is None or hasattr(fig, "savefig")
103 | # Test with no ax, plot_cmin True
104 | fig2 = hh.plot_consumption(plot_cmin=True)
105 | assert fig2 is None or hasattr(fig2, "savefig")
106 | # Test with ax provided
107 | fig3, ax3 = plt.subplots()
108 | result = hh.plot_consumption(ax=ax3)
109 | assert result is None
110 | # Test with ax and plot_cmin True
111 | fig4, ax4 = plt.subplots()
112 | result2 = hh.plot_consumption(ax=ax4, plot_cmin=True)
113 | assert result2 is None
114 |
115 |
116 | def test_plot_opt_lambda():
117 | import matplotlib
118 |
119 | matplotlib.use("Agg")
120 | hh = Household(0.1, 50000, 15000, 14000, rec_rate=0.7)
121 | hh.opt_lambda(no_steps=10)
122 | # Test with default x_type ("rate")
123 | fig = hh.plot_opt_lambda()
124 | assert fig is None or hasattr(fig, "savefig")
125 | # Test with x_type="time"
126 | fig2 = hh.plot_opt_lambda(x_type="time")
127 | assert fig2 is None or hasattr(fig2, "savefig")
128 |
--------------------------------------------------------------------------------
/tests/infographics/data/single_event/config_charts.toml:
--------------------------------------------------------------------------------
1 | [Charts]
2 | [Charts.Homes]
3 | Name = "Homes"
4 | Image = "https://openclipart.org/image/800px/217511"
5 | [Charts.Businesses]
6 | Name = "Businesses"
7 | Image = "https://openclipart.org/image/800px/229840"
8 | [Charts.Health]
9 | Name = "Health facilities"
10 | Image = "https://openclipart.org/image/800px/190967"
11 | [Charts.Schools]
12 | Name = "Schools"
13 | Image = "https://openclipart.org/image/800px/190006"
14 | [Charts.Emergency]
15 | Name = "Emergency facilities"
16 | Image = "https://openclipart.org/image/800px/190006"
17 |
18 |
19 | [Categories]
20 | [Categories.Minor]
21 | Name = "Minor"
22 | Color = "#ffa500"
23 | [Categories.Major]
24 | Name = "Major"
25 | Color = "#ff0000"
26 | [Categories.Destroyed]
27 | Name = "Destroyed"
28 | Color = "#000000"
29 |
30 | [Slices]
31 | [Slices.Minor_Homes]
32 | Name = "Minor Homes"
33 | Query = "ResidentialMinorCount"
34 | Chart = "Homes"
35 | Category = "Minor"
36 | [Slices.Major_Homes]
37 | Name = "Major Homes"
38 | Query = "ResidentialMajorCount"
39 | Chart = "Homes"
40 | Category = "Major"
41 | [Slices.Destroyed_Homes]
42 | Name = "Destroyed Homes"
43 | Query = "ResidentialDestroyedCount"
44 | Chart = "Homes"
45 | Category = "Destroyed"
46 | [Slices.Minor_Businesses]
47 | Name = "Minor Businesses"
48 | Query = "CommercialMinorCount"
49 | Chart = "Businesses"
50 | Category = "Minor"
51 | [Slices.Major_Businesses]
52 | Name = "Major Businesses"
53 | Query = "CommercialMajorCount"
54 | Chart = "Businesses"
55 | Category = "Major"
56 | [Slices.Destroyed_Businesses]
57 | Name = "Destroyed Businesses"
58 | Query = "CommercialDestroyedCount"
59 | Chart = "Businesses"
60 | Category = "Destroyed"
61 | [Slices.Minor_Health]
62 | Name = "Minor Health"
63 | Query = "HealthMinorCount"
64 | Chart = "Health facilities"
65 | Category = "Minor"
66 | [Slices.Major_Health]
67 | Name = "Major Health"
68 | Query = "HealthMajorCount"
69 | Chart = "Health facilities"
70 | Category = "Major"
71 | [Slices.Destroyed_Health]
72 | Name = "Destroyed Health"
73 | Query = "HealthDestroyedCount"
74 | Chart = "Health facilities"
75 | Category = "Destroyed"
76 | [Slices.Minor_Schools]
77 | Name = "Minor Schools"
78 | Query = "SchoolsMinorCount"
79 | Chart = "Schools"
80 | Category = "Minor"
81 | [Slices.Major_Schools]
82 | Name = "Major Schools"
83 | Query = "SchoolsMajorCount"
84 | Chart = "Schools"
85 | Category = "Major"
86 | [Slices.Destroyed_Schools]
87 | Name = "Destroyed Schools"
88 | Query = "SchoolsDestroyedCount"
89 | Chart = "Schools"
90 | Category = "Destroyed"
91 | [Slices.Minor_Emergency]
92 | Name = "Minor Emergency"
93 | Query = "EmergencyMinorCount"
94 | Chart = "Emergency facilities"
95 | Category = "Minor"
96 | [Slices.Major_Emergency]
97 | Name = "Major Emergency"
98 | Query = "EmergencyMajorCount"
99 | Chart = "Emergency facilities"
100 | Category = "Major"
101 | [Slices.Destroyed_Emergency]
102 | Name = "Destroyed Emergency"
103 | Query = "EmergencyDestroyedCount"
104 | Chart = "Emergency facilities"
105 | Category = "Destroyed"
106 |
107 | [Other]
108 | [Other.Plot]
109 | image_scale = 0.15
110 | numbers_font = 20
111 | height = 400
112 | width = 1400
113 | [Other.Title]
114 | text = "Building damage"
115 | font = 30
116 | [Other.Subtitle]
117 | font = 25
118 | [Other.Legend]
119 | font = 20
120 | [Other.Info]
121 | text="Hi im the info for the charts plot"
122 | image="https://openclipart.org/image/800px/302413"
123 | scale=0.1
124 |
--------------------------------------------------------------------------------
/tests/infographics/data/risk/config_risk_charts.toml:
--------------------------------------------------------------------------------
1 | [Charts]
2 | [Charts.2Y]
3 | Name = "2Y"
4 | Image = "https://openclipart.org/image/800px/217511"
5 | [Charts.5Y]
6 | Name = "5Y"
7 | Image = "https://openclipart.org/image/800px/217511"
8 | [Charts.10Y]
9 | Name = "10Y"
10 | Image = "https://openclipart.org/image/800px/217511"
11 | [Charts.25Y]
12 | Name = "25Y"
13 | Image = "https://openclipart.org/image/800px/217511"
14 | [Charts.50Y]
15 | Name = "50Y"
16 | Image = "https://openclipart.org/image/800px/217511"
17 | [Charts.100Y]
18 | Name = "100Y"
19 | Image = "https://openclipart.org/image/800px/217511"
20 |
21 | [Categories]
22 | [Categories.LowVulnerability]
23 | Name = "LowVulnerability"
24 | Color = "#89CFF0"
25 | [Categories.HighVulnerability]
26 | Name = "HighVulnerability"
27 | Color = "#F0FFFF"
28 |
29 | [Slices]
30 | [Slices.Low_Vulnerability_2Y]
31 | Name = "2Y Low Vulnerability"
32 | Query = "ImpactedHomes2Y"
33 | Chart = "2Y"
34 | Category = "LowVulnerability"
35 | [Slices.High_Vulnerability_2Y]
36 | Name = "2Y High Vulnerability"
37 | Query = "ImpactedHomes2Y"
38 | Chart = "2Y"
39 | Category = "HighVulnerability"
40 | [Slices.Low_Vulnerability_5Y]
41 | Name = "5Y Low Vulnerability"
42 | Query = "ImpactedHomes5Y"
43 | Chart = "5Y"
44 | Category = "LowVulnerability"
45 | [Slices.High_Vulnerability_5Y]
46 | Name = "5Y High Vulnerability"
47 | Query = "ImpactedHomes5Y"
48 | Chart = "5Y"
49 | Category = "HighVulnerability"
50 | [Slices.Low_Vulnerability_10Y]
51 | Name = "10Y Low Vulnerability"
52 | Query = "ImpactedHomes10Y"
53 | Chart = "10Y"
54 | Category = "LowVulnerability"
55 | [Slices.High_Vulnerability_10Y]
56 | Name = "10Y High Vulnerability"
57 | Query = "ImpactedHomes10Y"
58 | Chart = "10Y"
59 | Category = "HighVulnerability"
60 | [Slices.Low_Vulnerability_25Y]
61 | Name = "25Y Low Vulnerability"
62 | Query = "ImpactedHomes25Y"
63 | Chart = "25Y"
64 | Category = "LowVulnerability"
65 | [Slices.High_Vulnerability_25Y]
66 | Name = "25Y High Vulnerability"
67 | Query = "ImpactedHomes25Y"
68 | Chart = "25Y"
69 | Category = "HighVulnerability"
70 | [Slices.Low_Vulnerability_50Y]
71 | Name = "50Y Low Vulnerability"
72 | Query = "ImpactedHomes50Y"
73 | Chart = "50Y"
74 | Category = "LowVulnerability"
75 | [Slices.High_Vulnerability_50Y]
76 | Name = "50Y High Vulnerability"
77 | Query = "ImpactedHomes50Y"
78 | Chart = "50Y"
79 | Category = "HighVulnerability"
80 | [Slices.Low_Vulnerability_100Y]
81 | Name = "100Y Low Vulnerability"
82 | Query = "ImpactedHomes100Y"
83 | Chart = "100Y"
84 | Category = "LowVulnerability"
85 | [Slices.High_Vulnerability_100Y]
86 | Name = "100Y High Vulnerability"
87 | Query = "ImpactedHomes100Y"
88 | Chart = "100Y"
89 | Category = "HighVulnerability"
90 |
91 | [Other]
92 | [Other.Expected_Damages]
93 | title = "Expected annual damages"
94 | image = "{image_path}/money.png"
95 | image_scale = 1.3
96 | title_font_size = 25
97 | numbers_font_size = 20
98 | height = 300
99 |
100 | [Other.Flooded]
101 | title = "Number of homes with a high chance of being flooded in a 30-year period"
102 | image = "{image_path}/house.png"
103 | image_scale = 0.7
104 | title_font_size = 25
105 | numbers_font_size = 20
106 | height = 300
107 |
108 | [Other.Return_Periods]
109 | title = "Building damages"
110 | font_size = 25
111 | image_scale = 0.2
112 | numbers_font = 15
113 | subtitle_font = 22
114 | legend_font = 20
115 | plot_height = 300
116 |
117 | [Other.Info]
118 | text="Hi im the info for the charts plot"
119 | image="https://openclipart.org/image/800px/302413"
120 | scale=0.1
121 |
--------------------------------------------------------------------------------
/fiat_toolbox/__init__.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.1.23"
2 |
3 | from packaging import version
4 | from pydantic import BaseModel
5 |
6 |
7 | class FiatColumns(BaseModel):
8 | """
9 | Model defining the FIAT column types and their naming format.
10 | All attributes are strings that can be:
11 | - static: with a standard name, e.g. 'object_id'
12 | - dynamic: with wildcard parts, e.g. 'max_damage_{name}' or 'damage_{name}_{years}y'
13 | """
14 |
15 | object_id: str
16 | object_name: str
17 | primary_object_type: str
18 | secondary_object_type: str
19 | extraction_method: str
20 | ground_floor_height: str
21 | ground_elevation: str
22 | damage_function: str
23 | max_potential_damage: str
24 | aggregation_label: str
25 | inundation_depth: str
26 | inundation_depth_rp: str
27 | reduction_factor: str
28 | reduction_factor_rp: str
29 | damage: str
30 | damage_rp: str
31 | total_damage: str
32 | total_damage_rp: str
33 | risk_ead: str
34 | segment_length: str # TODO should this be here since it is not a FIAT attribute?
35 |
36 |
37 | def get_fiat_columns(fiat_version: str = "0.2") -> FiatColumns:
38 | """
39 | Returns the column mappings for different versions of FIAT.
40 | Parameters:
41 | fiat_version (str): The version of the FIAT. Default is "0.2".
42 | Returns:
43 | FiatColumns: An instance of FiatColumns with the appropriate column mappings for the specified version.
44 | Raises:
45 | ValueError: If the specified version is not supported.
46 | Supported Versions:
47 | - "0.2" and greater: Uses a specific set of column names.
48 | - "0.1.0rc2": Uses a different set of column names.
49 | """
50 | fiat_version = version.parse(fiat_version)
51 | # Columns for versions > 0.1
52 | if fiat_version > version.parse("0.1"):
53 | fiat_columns = FiatColumns(
54 | object_id="object_id",
55 | object_name="object_name",
56 | primary_object_type="primary_object_type",
57 | secondary_object_type="secondary_object_type",
58 | extraction_method="extract_method",
59 | ground_floor_height="ground_flht",
60 | ground_elevation="ground_elevtn",
61 | damage_function="fn_damage_{name}",
62 | max_potential_damage="max_damage_{name}",
63 | aggregation_label="aggregation_label:{name}",
64 | inundation_depth="inun_depth",
65 | inundation_depth_rp="inun_depth_{years}y",
66 | reduction_factor="red_fact",
67 | reduction_factor_rp="red_fact_{years}y",
68 | damage="damage_{name}",
69 | damage_rp="damage_{name}_{years}y",
70 | total_damage="total_damage",
71 | total_damage_rp="total_damage_{years}y",
72 | risk_ead="ead_damage",
73 | segment_length="segment_length",
74 | )
75 | # Columns for version 0.1.0rc2
76 | elif fiat_version == version.parse("0.1.0rc2"):
77 | fiat_columns = FiatColumns(
78 | object_id="Object ID",
79 | object_name="Object Name",
80 | primary_object_type="Primary Object Type",
81 | secondary_object_type="Secondary Object Type",
82 | extraction_method="Extraction Method",
83 | ground_floor_height="Ground Floor Height",
84 | ground_elevation="Ground Elevation",
85 | damage_function="Damage Function: {name}",
86 | max_potential_damage="Max Potential Damage: {name}",
87 | aggregation_label="Aggregation Label: {name}",
88 | inundation_depth="Inundation Depth",
89 | inundation_depth_rp="Inundation Depth ({years}Y)",
90 | reduction_factor="Reduction Factor",
91 | reduction_factor_rp="Reduction Factor ({years}Y)",
92 | damage="Damage: {name}",
93 | damage_rp="Damage: {name} ({years}Y)",
94 | total_damage="Total Damage",
95 | total_damage_rp="Total Damage ({years}Y)",
96 | risk_ead="Risk (EAD)",
97 | segment_length="Segment Length",
98 | )
99 | else:
100 | raise ValueError(f"Unsupported version: {fiat_version}")
101 |
102 | return fiat_columns
103 |
--------------------------------------------------------------------------------
/fiat_toolbox/metrics_writer/fiat_write_return_period_threshold.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import re
3 |
4 | import numpy as np
5 | import pandas as pd
6 |
7 |
8 | class ExceedanceProbabilityCalculator:
9 | def __init__(
10 | self, column_prefix, logger: logging.Logger = logging.getLogger(__name__)
11 | ):
12 | self.column_prefix = column_prefix
13 | self.logger = logger
14 |
15 | def append_probability(
16 | self, df: pd.DataFrame, threshold: float, T: float
17 | ) -> pd.DataFrame:
18 | """Append exceedance probability to dataframe.
19 |
20 | Parameters
21 | ----------
22 | df : pandas.DataFrame
23 | Dataframe containing the data.
24 | threshold : float
25 | Threshold value.
26 | T : float
27 | Time horizon.
28 |
29 | Returns
30 | -------
31 | pandas.DataFrame
32 | Dataframe containing the data and the exceedance probability.
33 | """
34 |
35 | # Initialize result dataframe
36 | result = df.copy()
37 |
38 | # Calculate exceedance probability
39 | result["Exceedance Probability"] = self.calculate(df, threshold, T)
40 |
41 | return result
42 |
43 | def calculate(self, df: pd.DataFrame, threshold: float, T: float) -> pd.DataFrame:
44 | """Calculate exceedance probability.
45 |
46 | Parameters
47 | ----------
48 | df : pandas.DataFrame
49 | Dataframe containing the data.
50 | threshold : float
51 | Threshold value.
52 | T : float
53 | Time horizon.
54 |
55 | Returns
56 | -------
57 | pandas.DataFrame
58 | Dataframe containing the exceedance probability.
59 | """
60 |
61 | # Extract return periods from column names
62 | return_periods = [
63 | re.findall(r"\d+", col)
64 | for col in df.columns
65 | if col.startswith(self.column_prefix)
66 | ]
67 | return_periods = [float(rp[0]) for rp in return_periods]
68 |
69 | # Calculate exceedance probability
70 | return self._calculate(df, return_periods, threshold, T).to_frame()
71 |
72 | def append_to_file(
73 | self, input_file: str, output_file: str, threshold: float, T: float
74 | ) -> None:
75 | """Append exceedance probability to file.
76 |
77 | Parameters
78 | ----------
79 | input_file : str
80 | Path to input file.
81 | output_file : str
82 | Path to output file.
83 | threshold : float
84 | Threshold value.
85 | T : float
86 | Time horizon.
87 | """
88 |
89 | # Read data from file
90 | df = pd.read_csv(input_file, index_col=0)
91 |
92 | # Append exceedance probability
93 | result = self.append_probability(df, threshold, T)
94 |
95 | # Write data to file
96 | result.to_csv(output_file)
97 |
98 | def _calculate(
99 | self, df: pd.DataFrame, return_periods: list, threshold: float, T: float
100 | ) -> pd.Series:
101 | """Calculate exceedance probability.
102 |
103 | Parameters
104 | ----------
105 | df : pandas.DataFrame
106 | Dataframe containing the data.
107 | return_periods : list
108 | List of return periods.
109 | threshold : float
110 | Threshold value.
111 | T : float
112 | Time horizon.
113 |
114 | Returns
115 | -------
116 | pandas.Series
117 | Series containing the exceedance probability.
118 | """
119 |
120 | # Convert all non-numerical values to nan
121 | df = df.apply(lambda x: pd.to_numeric(x, errors="coerce"))
122 |
123 | # Extract values for the selected columns
124 | values = df.filter(like=self.column_prefix).to_numpy()
125 |
126 | # Create a mask where True indicates a NaN value
127 | nan_mask = np.isnan(values)
128 |
129 | # Check if there are any NaN values after the first non-NaN value in each row
130 | invalid_rows = np.any(np.diff(nan_mask.astype(int), axis=1) == 1, axis=1)
131 |
132 | # Add the check if all elements in a row are NaN
133 | invalid_rows = invalid_rows | np.all(nan_mask, axis=1)
134 |
135 | # Custom interpolation function
136 | def custom_interp(x, xp, fp):
137 | if x > xp[-1]:
138 | return np.nan
139 | elif x < xp[0]:
140 | return fp[0]
141 | else:
142 | return np.interp(x, xp, fp)
143 |
144 | # Interpolate to find the return period for which the threshold is first exceeded
145 | RP = np.array([custom_interp(threshold, row, return_periods) for row in values])
146 |
147 | # Calculate exceedance probability
148 | mask = ~invalid_rows
149 | result = np.full(len(df), np.nan)
150 | result[mask] = np.round((1 - np.exp(-T / RP[mask])) * 100, 1)
151 |
152 | return pd.Series(result, name="Exceedance Probability", index=df.index)
153 |
--------------------------------------------------------------------------------
/fiat_toolbox/spatial_output/aggregation_areas.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from pathlib import Path
3 | from typing import Optional, Union
4 |
5 | import geopandas as gpd
6 | import pandas as pd
7 |
8 | _FORMATS = ["geopackage", "shapefile", "GeoJSON"]
9 |
10 |
11 | class IAggregationAreas(ABC):
12 | """Interface for writing an aggregation areas spatial file."""
13 |
14 | @abstractmethod
15 | def write_spatial_file(
16 | df_metrics: pd.DataFrame,
17 | gdf_aggr_areas: gpd.GeoDataFrame,
18 | out_path: Union[str, Path],
19 | id_name: Optional[str] = "name",
20 | file_format: Optional[str] = "geopackage",
21 | ) -> None:
22 | """Saves a geospatial file where the aggregation areas are join with metrics from a metric table.
23 |
24 | Parameters
25 | ----------
26 | df_metrics : pd.DataFrame
27 | dataframe containing the metrics
28 | gdf_aggr_areas : gpd.GeoDataFrame
29 | geodataframe with the aggregation areas (with an identifier column provided with argument "id_name")
30 | out_path : Union[str, Path]
31 | path where the geospatial file should be saved
32 | id_name : Optional[str], optional
33 | name of the identified column in gdf_aggr_areas to be used for the join, by default "name"
34 | file_format : Optional[str], optional
35 | file format of the output geospatial file, by default "geopackage"
36 |
37 | Raises
38 | ------
39 | ValueError
40 | If the given file format is not implemented.
41 | """
42 | pass
43 |
44 |
45 | class AggregationAreas(IAggregationAreas):
46 | """Write an aggregation areas spatial file."""
47 |
48 | @staticmethod
49 | def _check_extension(out_path, ext):
50 | out_path = Path(out_path)
51 | if out_path.suffix != ext:
52 | raise ValueError(
53 | f"File extension given: '{out_path.suffix}' does not much the file format specified: {ext}."
54 | )
55 |
56 | @staticmethod
57 | def write_spatial_file(
58 | df_metrics: pd.DataFrame,
59 | gdf_aggr_areas: gpd.GeoDataFrame,
60 | out_path: Union[str, Path],
61 | id_name: Optional[str] = "name",
62 | file_format: Optional[str] = "geopackage",
63 | ) -> None:
64 | """Saves a geospatial file where the aggregation areas are join with metrics from a metric map.
65 |
66 | Parameters
67 | ----------
68 | df_metrics : pd.DataFrame
69 | dataframe containing the metrics
70 | gdf_aggr_areas : gpd.GeoDataFrame
71 | geodataframe with the aggregation areas (with an identifier column provided with argument "id_name")
72 | out_path : Union[str, Path]
73 | path where the geospatial file should be saved
74 | id_name : Optional[str], optional
75 | name of the identified column in gdf_aggr_areas to be used for the join, by default "name"
76 | file_format : Optional[str], optional
77 | file format of the output geospatial file, by default "geopackage"
78 |
79 | Raises
80 | ------
81 | ValueError
82 | If the given file format is not implemented.
83 | """
84 | # Get index as the first column
85 | index_name = df_metrics.columns[0]
86 | df_metrics = df_metrics.set_index(index_name)
87 |
88 | # Only keep metrics that are supposed to be in the metrics table
89 | if "Show In Metrics Map" in df_metrics.index:
90 | metrics_to_keep = (
91 | df_metrics.loc["Show In Metrics Map", :]
92 | .map(lambda x: True if x == "True" else False)
93 | .astype(bool)
94 | )
95 | else:
96 | metrics_to_keep = df_metrics.columns # keep all columns if not present
97 |
98 | df = df_metrics.loc[:, metrics_to_keep]
99 |
100 | # Drop rows containing other variables
101 | # Drop specific rows if they exist in the index
102 | rows_to_drop = [
103 | "Description",
104 | "Show In Metrics Table",
105 | "Show In Metrics Map",
106 | "Long Name",
107 | ]
108 | rows_present = [row for row in rows_to_drop if row in df.index]
109 | if rows_present:
110 | df = df.drop(rows_present)
111 | df = df.apply(pd.to_numeric)
112 |
113 | # Joins based on provided column name
114 | joined = gdf_aggr_areas.join(df, on=id_name)
115 |
116 | # Save file
117 | out_path = Path(out_path).resolve()
118 | out_path.parent.mkdir(parents=True, exist_ok=True)
119 | if file_format == "geopackage":
120 | AggregationAreas._check_extension(out_path, ".gpkg")
121 | joined.to_file(out_path, driver="GPKG")
122 | elif file_format == "shapefile":
123 | AggregationAreas._check_extension(out_path, ".shp")
124 | joined.to_file(out_path)
125 | elif file_format == "GeoJSON":
126 | AggregationAreas._check_extension(out_path, ".geojson")
127 | joined.to_file(out_path, driver="GeoJSON")
128 | else:
129 | raise ValueError(
130 | f"File format specified: {file_format} not in implemented formats: {(*_FORMATS,)}."
131 | )
132 |
--------------------------------------------------------------------------------
/tests/metrics_writer/test_fiat_read_metrics_file.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from unittest.mock import patch
3 |
4 | import pandas as pd
5 |
6 | from fiat_toolbox.metrics_writer.fiat_read_metrics_file import MetricsFileReader
7 |
8 |
9 | class TestReadMetricsFile(unittest.TestCase):
10 | @patch("fiat_toolbox.metrics_writer.fiat_read_metrics_file.pd.read_csv")
11 | @patch("fiat_toolbox.metrics_writer.fiat_read_metrics_file.os.path.exists")
12 | def test_read_metrics_file(self, mock_path_exists, mock_read_csv):
13 | # Arrange
14 | metrics_file_path = "metrics_file.csv"
15 | mock_path_exists.return_value = True
16 | mock_read_csv.return_value = pd.DataFrame(
17 | {
18 | "": ["Description", 1, 2, 3, 4, 5],
19 | "Total Damage Event": ["Total of the events", 100, 200, 300, 400, 500],
20 | "Other metric": ["Just another metric", 0, 0, 0, 0, 0],
21 | },
22 | columns=["", "Total Damage Event", "Other metric"],
23 | ).set_index("")
24 |
25 | # Act
26 | read_class = MetricsFileReader(metrics_file_path)
27 | df_results = read_class.read_aggregated_metric_from_file(
28 | metric="Total Damage Event"
29 | ).to_dict()
30 |
31 | # Assert
32 | df_expected = {
33 | 1: 100,
34 | 2: 200,
35 | 3: 300,
36 | 4: 400,
37 | 5: 500,
38 | }
39 |
40 | self.assertEqual(df_results, df_expected)
41 |
42 | @patch("fiat_toolbox.metrics_writer.fiat_read_metrics_file.os.path.exists")
43 | def test_read_metrics_file_no_file(self, mock_path_exists):
44 | # Arrange
45 | metrics_file_path = "metrics_file.csv"
46 | mock_path_exists.return_value = False
47 |
48 | # Act & Assert
49 | with self.assertRaises(FileNotFoundError) as context:
50 | MetricsFileReader(metrics_file_path)
51 | self.assertTrue("The file does not exist." in str(context.exception))
52 |
53 | @patch("fiat_toolbox.metrics_writer.fiat_read_metrics_file.os.path.exists")
54 | def test_read_metrics_file_not_csv(self, mock_path_exists):
55 | # Arrange
56 | metrics_file_path = "metrics_file.txt"
57 | mock_path_exists.return_value = True
58 |
59 | # Act & Assert
60 | with self.assertRaises(ValueError) as context:
61 | MetricsFileReader(metrics_file_path)
62 | self.assertTrue("The file must be a csv file." in str(context.exception))
63 |
64 | @patch("fiat_toolbox.metrics_writer.fiat_read_metrics_file.os.path.exists")
65 | @patch("fiat_toolbox.metrics_writer.fiat_read_metrics_file.pd.read_csv")
66 | def test_read_metrics_file_no_metric(self, mock_read_csv, mock_path_exists):
67 | # Arrange
68 | metrics_file_path = "metrics_file.csv"
69 | mock_path_exists.return_value = True
70 | mock_read_csv.return_value = pd.DataFrame(
71 | {
72 | "": ["Description", 1, 2, 3, 4, 5],
73 | "Total Damage Event": ["Total of the events", 100, 200, 300, 400, 500],
74 | "Other metric": ["Put", "whatever", "you", "want", "to", "hai"],
75 | },
76 | columns=["", "Total Damage Event", "Other metric"],
77 | ).set_index("")
78 |
79 | # Act
80 | read_class = MetricsFileReader(metrics_file_path)
81 |
82 | # Assert
83 | with self.assertRaises(KeyError) as context:
84 | read_class.read_aggregated_metric_from_file(metric="Bullocks metric name")
85 | self.assertTrue(
86 | "The metric Bullocks metric name is not found in the file."
87 | in str(context.exception)
88 | )
89 |
90 | @patch("fiat_toolbox.metrics_writer.fiat_read_metrics_file.pd.read_csv")
91 | @patch("fiat_toolbox.metrics_writer.fiat_read_metrics_file.os.path.exists")
92 | def test_read_metrics_file_no_aggregates(self, mock_path_exists, mock_read_csv):
93 | # Arrange
94 | mock_path_exists.return_value = True
95 | mock_read_csv.return_value = (
96 | pd.DataFrame(
97 | {
98 | "": ["Name1", "Name2", "Name3", "Name4", "Name5"],
99 | "Long Name": [
100 | "Long Name1",
101 | "Long Name2",
102 | "Long Name3",
103 | "Long Name4",
104 | "Long Name5",
105 | ],
106 | "Show In Metrics Table": [
107 | True,
108 | True,
109 | True,
110 | True,
111 | True,
112 | ],
113 | "Description": [
114 | "Description1",
115 | "Description2",
116 | "Description3",
117 | "Description4",
118 | "Description5",
119 | ],
120 | "Value": [1, 2, 3, 4, 5],
121 | }
122 | )
123 | .set_index("")
124 | .transpose()
125 | )
126 |
127 | metrics_file_path = "metrics_file.csv"
128 |
129 | # Act
130 | read_class = MetricsFileReader(metrics_file_path)
131 | df_results = read_class.read_metrics_from_file().to_dict()["Value"]
132 |
133 | # Assert
134 | df_expected = {
135 | "Name1": 1,
136 | "Name2": 2,
137 | "Name3": 3,
138 | "Name4": 4,
139 | "Name5": 5,
140 | }
141 | self.assertEqual(df_results, df_expected)
142 |
--------------------------------------------------------------------------------
/.github/workflows/publish-to-pypi.yml:
--------------------------------------------------------------------------------
1 | name: release
2 |
3 | # This workflow will build a Python package and publish it to PyPI when a new tag is created.
4 |
5 | # Usage:
6 | # - Create a new tag with the version number in the format v*.*.* where * are integers: `git tag v1.2.3`
7 | # - Push the tag to the repository: `git push origin tag v1.2.3`
8 |
9 | # Result:
10 | # - Check if the new version is greater than the latest version on PyPI
11 | # - Install dependencies
12 | # - Build the package
13 | # - Publish it to PyPI
14 | # - Create a GitHub release with the tag name and the release notes
15 |
16 | # Checklist for using this workflow up for a new project:
17 |
18 | # 1. In github settings:
19 | # - Create an environment called `release`
20 | # - Setup the permissions (https://github.com///settings/environments)
21 |
22 | # 2. On PyPi:
23 | # - Create the project and add a trusted publisher (https://pypi.org/manage/project//settings/publishing/ or https://pypi.org/manage/account/publishing if the project is not on pypi yet)
24 | # - Ensure the publisher is configured to use:
25 | # - the filename of this workflow yml (in this case: publish-to-pypi.yml)
26 |
27 | # 3. In this file:
28 | # - Add the following variables to the `env` section:
29 | # - PACKAGE_NAME: the name of your package on pypi
30 | # - PYTHON_VERSION: the version of Python you want to use
31 | # - In the `setup_and_build` job:
32 | # - Update the the shell commands to install your package
33 |
34 | on:
35 | push:
36 | tags:
37 | - v[0-9]+\.[0-9]+\.[0-9]+
38 | # https://peps.python.org/pep-0440/
39 |
40 | env:
41 | PACKAGE_NAME: "fiat-toolbox"
42 | PYTHON_VERSION: "3.10"
43 |
44 | jobs:
45 |
46 | details:
47 | runs-on: ubuntu-latest
48 | outputs:
49 | new_version: ${{ steps.release.outputs.new_version }}
50 | tag_name: ${{ steps.release.outputs.tag_name }}
51 | steps:
52 | - uses: actions/checkout@v2
53 |
54 | - name: Extract tag and Details
55 | id: release
56 | run: |
57 | if [ "${{ github.ref_type }}" = "tag" ]; then
58 | TAG_NAME=${GITHUB_REF#refs/tags/}
59 | NEW_VERSION=$(echo $TAG_NAME | sed 's/^v//' | awk -F'-' '{print $1}')
60 | echo "new_version=$NEW_VERSION" >> "$GITHUB_OUTPUT"
61 | echo "tag_name=$TAG_NAME" >> "$GITHUB_OUTPUT"
62 | echo "Version is $NEW_VERSION"
63 | echo "Tag name is $TAG_NAME"
64 | else
65 | echo "No tag found"
66 | exit 1
67 | fi
68 |
69 | check_pypi:
70 | needs: details
71 | runs-on: ubuntu-latest
72 | steps:
73 | - name: Fetch information from PyPI
74 | run: |
75 | response=$(curl -s https://pypi.org/pypi/${{ env.PACKAGE_NAME }}/json || echo "{}")
76 | latest_previous_version=$(echo $response | jq --raw-output "select(.releases != null) | .releases | keys_unsorted[]" | sort -V | tail -n 1)
77 | if [ -z "$latest_previous_version" ]; then
78 | echo "Package not found on PyPI."
79 | latest_previous_version="0.0.0"
80 | fi
81 | echo "Latest version on PyPI: $latest_previous_version"
82 | echo "latest_previous_version=$latest_previous_version" >> $GITHUB_ENV
83 |
84 | - name: Compare versions and exit if not newer
85 | run: |
86 | NEW_VERSION=${{ needs.details.outputs.new_version }}
87 | LATEST_VERSION=$latest_previous_version
88 | if [ "$(printf '%s\n' "$LATEST_VERSION" "$NEW_VERSION" | sort -rV | head -n 1)" != "$NEW_VERSION" ] || [ "$NEW_VERSION" == "$LATEST_VERSION" ]; then
89 | echo "The new version $NEW_VERSION is not greater than the latest version $LATEST_VERSION on PyPI."
90 | exit 1
91 | else
92 | echo "The new version $NEW_VERSION is greater than the latest version $LATEST_VERSION on PyPI."
93 | fi
94 |
95 | setup_and_build:
96 | needs: [details, check_pypi]
97 | runs-on: ubuntu-latest
98 | steps:
99 | - uses: actions/checkout@v4
100 |
101 | - uses: conda-incubator/setup-miniconda@v3
102 | with:
103 | auto-update-conda: true
104 | channels: conda-forge
105 |
106 | - name: Create environment
107 | shell: bash -el {0}
108 | run: |
109 | conda create -n publish python=${{ env.PYTHON_VERSION }}
110 |
111 | - name: Install Build tools
112 | shell: bash -el {0}
113 | run: |
114 | conda install -n publish pip setuptools wheel python-build
115 |
116 | - name: Install dependencies
117 | shell: bash -el {0}
118 | run: |
119 | conda install -n publish gdal --channel conda-forge
120 | conda run -n publish pip install .
121 |
122 | - name: Build source and wheel distribution
123 | run: |
124 | conda run -n publish python -s -m build
125 |
126 | - name: Upload artifacts
127 | uses: actions/upload-artifact@v4
128 | with:
129 | name: dist
130 | path: dist/
131 |
132 | pypi_publish:
133 | name: Upload release to PyPI
134 | needs: [setup_and_build, details]
135 | runs-on: ubuntu-latest
136 | environment:
137 | name: release
138 |
139 | permissions:
140 | id-token: write
141 | steps:
142 | - name: Download artifacts
143 | uses: actions/download-artifact@v4
144 | with:
145 | name: dist
146 | path: dist/
147 |
148 | - name: Publish distribution to PyPI
149 | uses: pypa/gh-action-pypi-publish@release/v1
150 |
151 | github_release:
152 | name: Create GitHub Release
153 | needs: [setup_and_build, details]
154 | runs-on: ubuntu-latest
155 | permissions:
156 | contents: write
157 | steps:
158 | - name: Checkout Code
159 | uses: actions/checkout@v3
160 | with:
161 | fetch-depth: 0
162 |
163 | - name: Download artifacts
164 | uses: actions/download-artifact@v4
165 | with:
166 | name: dist
167 | path: dist/
168 |
169 | - name: Create GitHub Release
170 | id: create_release
171 | env:
172 | GH_TOKEN: ${{ github.token }}
173 | run: |
174 | gh release create ${{ needs.details.outputs.tag_name }} dist/* --title ${{ needs.details.outputs.tag_name }} --generate-notes
175 |
--------------------------------------------------------------------------------
/fiat_toolbox/utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | import re
3 | import shutil
4 | from pathlib import Path
5 |
6 | import geopandas as gpd
7 | import pandas as pd
8 | import toml
9 |
10 | from fiat_toolbox import get_fiat_columns
11 |
12 |
13 | def _compile_pattern(pattern):
14 | """
15 | Compile a pattern with placeholders into a regex pattern.
16 | Args:
17 | pattern (str): The pattern containing placeholders in the format '{var}'.
18 | Returns:
19 | tuple: A tuple containing the compiled regex pattern and a list of placeholders.
20 | """
21 | # Escape special characters in pattern except for '{var}'
22 | escaped_pattern = re.escape(pattern)
23 | # Find all placeholders in the pattern
24 | placeholders = re.findall(r"\\{(.*?)\\}", escaped_pattern)
25 | # Replace placeholders with regex groups
26 | for placeholder in placeholders:
27 | escaped_pattern = escaped_pattern.replace(
28 | f"\\{{{placeholder}\\}}", f"(?P<{placeholder}>.*?)"
29 | )
30 | # Compile the regex pattern
31 | regex = re.compile(f"^{escaped_pattern}$")
32 | return regex, placeholders
33 |
34 |
35 | def matches_pattern(string: str, pattern: str) -> bool:
36 | """
37 | Check if a string matches a pattern with placeholders.
38 | Args:
39 | string (str): The input string to be checked.
40 | pattern (str): The pattern containing placeholders in the format '{var}'.
41 | Returns:
42 | bool: True if the string matches the pattern, False otherwise.
43 | """
44 | regex, _ = _compile_pattern(pattern)
45 | return bool(regex.match(string))
46 |
47 |
48 | def extract_variables(string: str, pattern: str) -> dict:
49 | """
50 | Extract variables from a string based on a pattern with placeholders.
51 |
52 | Args:
53 | string (str): The input string to be processed.
54 | pattern (str): The pattern containing placeholders in the format '{var}'.
55 |
56 | Returns:
57 | dict: A dictionary with the extracted variables and their values.
58 | If the pattern does not match the input string, an empty dictionary is returned.
59 | """
60 | regex, placeholders = _compile_pattern(pattern)
61 |
62 | # Find the match
63 | match = regex.match(string)
64 | if match:
65 | # Extract the captured groups into a dictionary
66 | extracted_vars = {
67 | placeholder: match.group(placeholder) for placeholder in placeholders
68 | }
69 | return extracted_vars
70 | return {}
71 |
72 |
73 | def replace_pattern(string: str, pattern: str, replacement: str) -> str:
74 | """
75 | Replace placeholders in a string based on a pattern with a replacement string.
76 | Args:
77 | string (str): The input string to be processed.
78 | pattern (str): The pattern containing placeholders in the format '{var}'.
79 | replacement (str): The replacement string where placeholders will be replaced with corresponding values from the input string.
80 | Returns:
81 | str: The processed string with placeholders replaced by corresponding values from the input string.
82 | If the pattern does not match the input string, the original string is returned.
83 | """
84 | regex, placeholders = _compile_pattern(pattern)
85 |
86 | # Find the match
87 | match = regex.match(string)
88 | if match:
89 | # Replace placeholders in the replacement with the captured groups
90 | for placeholder in placeholders:
91 | replacement = replacement.replace(
92 | f"{{{placeholder}}}", match.group(placeholder)
93 | )
94 | return replacement
95 | return string
96 |
97 |
98 | def convert_fiat(
99 | path_in: os.PathLike,
100 | path_out: os.PathLike,
101 | version_in: str = "0.1.0rc2",
102 | version_out: str = "0.2.1",
103 | ):
104 | """
105 | Converts FIAT data from one version to another by copying the input directory to the output directory,
106 | updating the settings file, and renaming columns in the exposure CSV file according to the specified versions.
107 | Args:
108 | path_in (os.PathLike): The input directory containing the FIAT data to be converted.
109 | path_out (os.PathLike): The output directory where the converted FIAT data will be saved.
110 | version_in (str, optional): The version of the input FIAT data. Defaults to "0.1.0rc2".
111 | version_out (str, optional): The version of the output FIAT data. Defaults to "0.2.1".
112 | Raises:
113 | FileNotFoundError: If the settings file or exposure CSV file is not found in the input directory.
114 | KeyError: If the expected keys are not found in the settings file.
115 | """
116 | path_in, path_out = Path(path_in), Path(path_out)
117 | if path_out.exists():
118 | shutil.rmtree(path_out)
119 | shutil.copytree(path_in, path_out)
120 |
121 | settings_path = path_out.joinpath("settings.toml")
122 |
123 | with open(settings_path, "r") as file:
124 | settings = toml.load(file)
125 |
126 | exposure_csv_path = settings_path.parent.joinpath(
127 | settings["exposure"]["csv"]["file"]
128 | )
129 | exposure_csv = pd.read_csv(exposure_csv_path)
130 |
131 | format_in = get_fiat_columns(fiat_version=version_in)
132 | format_out = get_fiat_columns(fiat_version=version_out)
133 |
134 | name_translation = {}
135 | for col in exposure_csv.columns: # iterate through output columns
136 | for field in list(format_out.model_fields): # check for each field
137 | fiat_col = getattr(format_in, field)
138 | if matches_pattern(col, fiat_col):
139 | impact_col = getattr(format_out, field)
140 | new_col = replace_pattern(col, fiat_col, impact_col)
141 | name_translation[col] = new_col # save mapping
142 |
143 | # Rename exposure csv
144 | exposure_csv = exposure_csv.rename(columns=name_translation)
145 | exposure_csv.to_csv(exposure_csv_path, index=False)
146 |
147 | # Get geoms
148 | keys = [key for key in settings["exposure"]["geom"] if "file" in key]
149 | geoms_paths = [
150 | settings_path.parent.joinpath(settings["exposure"]["geom"][key]) for key in keys
151 | ]
152 |
153 | # Rename geoms
154 | for geom_path in geoms_paths:
155 | geom = gpd.read_file(geom_path)
156 | geom = geom.rename(columns=name_translation)
157 | geom_path.unlink()
158 | geom.to_file(geom_path)
159 |
--------------------------------------------------------------------------------
/fiat_toolbox/metrics_writer/fiat_read_metrics_file.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 | from pathlib import Path
4 | from typing import Union
5 |
6 | import pandas as pd
7 |
8 | from fiat_toolbox.metrics_writer.fiat_metrics_interface import IMetricsFileReader
9 |
10 |
11 | class MetricsFileReader(IMetricsFileReader):
12 | """Reads metrics from a file."""
13 |
14 | def __init__(
15 | self,
16 | metrics_file_path: Union[str, Path],
17 | logger: logging.Logger = logging.getLogger(__name__),
18 | ):
19 | """
20 | Initializes a new instance of the MetricsFileReader class.
21 |
22 | Parameters
23 | ----------
24 | metrics_file_path : str
25 | The path to the file containing the metrics.
26 |
27 | Raises
28 | ------
29 | FileNotFoundError
30 | If the file cannot be found.
31 | ValueError
32 | If the file is not a valid metrics file.
33 | """
34 |
35 | # Convert the path to a Path object
36 | if not isinstance(metrics_file_path, Path):
37 | metrics_file_path = Path(metrics_file_path)
38 |
39 | # Check if the file is a csv file
40 | if not metrics_file_path.suffix == ".csv":
41 | raise ValueError("The file must be a csv file.")
42 |
43 | # Check if the file exists
44 | if not os.path.exists(metrics_file_path):
45 | raise FileNotFoundError("The file does not exist.")
46 |
47 | # Set the metrics file path
48 | self.metrics_file_path = metrics_file_path
49 | self.logger = logger
50 |
51 | def read_aggregated_metric_from_file(self, metric: str) -> pd.DataFrame:
52 | """Reads metrics from a file. These metrics are aggregated metrics.
53 |
54 | Parameters:
55 | ----------
56 | metric: str
57 | The metric to read from the file.
58 |
59 | Returns:
60 | -------
61 | pd.DataFrame
62 | The metrics read from the file.
63 |
64 | Raises:
65 | ------
66 | KeyError
67 | If the metric is not found in the file.
68 | """
69 |
70 | # Read the metrics from the file
71 | df_metrics = pd.read_csv(self.metrics_file_path, index_col=0)
72 |
73 | # Remove the desctioption row
74 | df_metrics = df_metrics.iloc[1:]
75 |
76 | # Check if the metric is in the dataframe
77 | if metric not in df_metrics.columns:
78 | raise KeyError(f"The metric {metric} was not found in the file.")
79 |
80 | # Return the metric
81 | return df_metrics[metric]
82 |
83 | def read_metrics_from_file(self, **kwargs) -> pd.DataFrame:
84 | """
85 | Reads metrics from a file.
86 |
87 | Parameters
88 | ----------
89 | include_long_names : bool
90 | Include the long names of the metrics.
91 | include_metrics_table_selection : bool
92 | Include the metrics table selection.
93 | include_metrics_map_selection : bool
94 | Include the metrics map selection.
95 | include_description : bool
96 | Include the description of the metrics.
97 |
98 | Returns
99 | -------
100 | pd.DataFrame
101 | The metrics read from the file.
102 |
103 | Raises
104 | ------
105 | KeyError
106 | If the metric is not found in the file.
107 | """
108 |
109 | # Set the default values
110 | include_long_names = kwargs.get("include_long_names", False)
111 | include_metrics_table_selection = kwargs.get(
112 | "include_metrics_table_selection", False
113 | )
114 | include_metrics_map_selection = kwargs.get(
115 | "include_metrics_map_selection", False
116 | )
117 | include_description = kwargs.get("include_description", False)
118 |
119 | # Read the metrics from the file
120 | df_metrics = pd.read_csv(self.metrics_file_path, index_col=0)
121 |
122 | # If you can't grab the value, transpose the data
123 | if "Value" not in df_metrics.columns:
124 | df_metrics = df_metrics.transpose()
125 |
126 | # If the value is still not one of the columns, the metrics file is aggregated
127 | if "Value" not in df_metrics.columns:
128 | aggregations = set(df_metrics.columns) - {
129 | "Description",
130 | "Long Name",
131 | "Show In Metrics Table",
132 | "Show In Metrics Map",
133 | }
134 |
135 | # Ensure values are interpreted as numbers
136 | for aggregation in aggregations:
137 | df_metrics[aggregation] = pd.to_numeric(df_metrics[aggregation])
138 |
139 | # Remove the desctioption row
140 | if not include_description:
141 | df_metrics = df_metrics.drop("Description", axis="columns")
142 |
143 | # Remove the long names row
144 | if not include_long_names:
145 | df_metrics = df_metrics.drop("Long Name", axis="columns")
146 |
147 | # Remove the metrics table selection row
148 | if not include_metrics_table_selection:
149 | df_metrics = df_metrics.drop("Show In Metrics Table", axis="columns")
150 |
151 | # Remove the metrics map selection row
152 | if not include_metrics_map_selection:
153 | df_metrics = df_metrics.drop("Show In Metrics Map", axis="columns")
154 |
155 | else:
156 | # Ensure values are interpreted as numbers
157 | df_metrics["Value"] = pd.to_numeric(df_metrics["Value"])
158 |
159 | # Remove the desctioption row
160 | if not include_description and "Description" in df_metrics.columns:
161 | df_metrics = df_metrics.drop("Description", axis="columns")
162 |
163 | # Remove the long names row
164 | if not include_long_names and "Long Name" in df_metrics.columns:
165 | df_metrics = df_metrics.drop("Long Name", axis="columns")
166 |
167 | # Remove the metrics table selection row
168 | if (
169 | not include_metrics_table_selection
170 | and "Show In Metrics Table" in df_metrics.columns
171 | ):
172 | df_metrics = df_metrics.drop("Show In Metrics Table", axis="columns")
173 |
174 | # Remove the metrics map selection row
175 | if (
176 | not include_metrics_map_selection
177 | and "Show In Metrics Map" in df_metrics.columns
178 | ):
179 | df_metrics = df_metrics.drop("Show In Metrics Map", axis="columns")
180 |
181 | # Return the metric
182 | return df_metrics
183 |
--------------------------------------------------------------------------------
/tests/equity/data/population_income_data.csv:
--------------------------------------------------------------------------------
1 | Census_Bg,TotalPopulationBG,PerCapitaIncomeBG
2 | 000100-block1,613,71098
3 | 000100-block2,745,87167
4 | 000100-block3,623,113826
5 | 000200-block1,583,93651
6 | 000200-block2,678,186091
7 | 000400-block1,1167,83065
8 | 000400-block2,2032,38504
9 | 000500-block1,750,100054
10 | 000500-block2,874,58411
11 | 000600-block1,1274,47878
12 | 000700-block1,1807,22678
13 | 000700-block2,1011,19846
14 | 000900-block1,604,37036
15 | 000900-block2,892,33139
16 | 001000-block1,1114,37277
17 | 001000-block2,1546,29002
18 | 001100-block1,1213,66244
19 | 001100-block2,447,61099
20 | 001100-block3,1268,21052
21 | 001500-block1,964,45102
22 | 001500-block2,601,45494
23 | 001901-block1,1614,61298
24 | 001901-block2,814,56505
25 | 001901-block3,2061,37563
26 | 001902-block1,3243,41646
27 | 001902-block2,1014,29584
28 | 001902-block3,1295,64980
29 | 002002-block1,1765,42901
30 | 002002-block2,1716,46841
31 | 002002-block3,1551,35979
32 | 002002-block4,1173,45509
33 | 002004-block1,418,53311
34 | 002004-block2,369,60617
35 | 002005-block1,1561,49738
36 | 002005-block2,2392,46776
37 | 002005-block3,2724,58035
38 | 002006-block1,811,52441
39 | 002006-block2,1239,40769
40 | 002006-block3,2832,68743
41 | 002007-block1,1420,96194
42 | 002007-block2,2148,60729
43 | 002008-block1,1874,45410
44 | 002008-block2,1324,57124
45 | 002008-block3,1065,24023
46 | 002009-block1,3692,35351
47 | 002009-block2,1622,79318
48 | 002009-block3,240,47746
49 | 002103-block1,758,35265
50 | 002103-block2,1627,34904
51 | 002103-block3,248,54568
52 | 002103-block4,1057,53460
53 | 002104-block1,1311,206708
54 | 002104-block2,653,162170
55 | 002105-block1,1224,90876
56 | 002105-block2,754,119032
57 | 002106-block1,1805,51956
58 | 002106-block2,1357,49867
59 | 002107-block1,4732,47631
60 | 002107-block2,2412,39887
61 | 002107-block3,1432,43462
62 | 002107-block4,1960,43021
63 | 002108-block1,1086,29965
64 | 002108-block2,974,67178
65 | 002200-block1,1087,49088
66 | 002200-block2,1417,52450
67 | 002300-block1,607,39436
68 | 002300-block2,861,47177
69 | 002401-block1,1547,33575
70 | 002402-block1,793,22744
71 | 002402-block2,872,21715
72 | 002402-block3,1079,21361
73 | 002503-block1,862,44248
74 | 002503-block2,1431,20371
75 | 002504-block1,718,24554
76 | 002504-block2,1610,26041
77 | 002504-block3,558,35914
78 | 002504-block4,2016,64846
79 | 002604-block1,931,46604
80 | 002604-block2,1941,40950
81 | 002604-block3,643,56884
82 | 002605-block1,2476,30296
83 | 002605-block2,1363,42807
84 | 002606-block1,909,41738
85 | 002606-block2,1452,63749
86 | 002611-block1,1109,48895
87 | 002611-block2,1735,35379
88 | 002612-block1,1568,49931
89 | 002612-block2,713,39074
90 | 002612-block3,572,36107
91 | 002612-block4,1097,56662
92 | 002612-block5,2346,28580
93 | 002613-block1,1400,37144
94 | 002613-block2,1819,35794
95 | 002613-block3,1172,50488
96 | 002613-block4,1571,36248
97 | 002614-block1,2285,57057
98 | 002614-block2,1007,34454
99 | 002701-block1,1343,23670
100 | 002701-block2,1974,120762
101 | 002702-block1,1834,20440
102 | 002702-block2,978,48622
103 | 002702-block3,1214,17628
104 | 002702-block4,1084,41035
105 | 002801-block1,2220,53113
106 | 002801-block2,1016,46551
107 | 002801-block3,1849,46945
108 | 002802-block1,1088,65268
109 | 002802-block2,1328,48606
110 | 002900-block1,1235,54654
111 | 002900-block2,1292,36772
112 | 003000-block1,2045,66206
113 | 003000-block2,1590,97771
114 | 003104-block1,3120,35209
115 | 003104-block2,2193,14371
116 | 003105-block1,1870,20577
117 | 003105-block2,1889,14241
118 | 003106-block1,4874,31135
119 | 003106-block2,1145,14625
120 | 003106-block3,1980,10840
121 | 003106-block4,951,102974
122 | 003107-block1,990,25961
123 | 003107-block2,367,35614
124 | 003107-block3,4224,39399
125 | 003108-block1,2621,26046
126 | 003108-block2,1710,21903
127 | 003109-block1,823,26242
128 | 003109-block2,2466,45381
129 | 003110-block1,1767,21645
130 | 003110-block2,1261,30096
131 | 003110-block3,2979,20481
132 | 003110-block4,236,33213
133 | 003111-block1,1013,29341
134 | 003111-block2,504,21076
135 | 003111-block3,1646,29430
136 | 003111-block4,922,34872
137 | 003113-block1,2226,38614
138 | 003113-block2,2186,18320
139 | 003115-block1,2656,19724
140 | 003115-block2,2961,19568
141 | 003115-block3,2502,22949
142 | 003116-block1,1506,20462
143 | 003116-block2,1723,37543
144 | 003116-block3,1482,42743
145 | 003117-block1,1255,52562
146 | 003117-block2,2477,29099
147 | 003200-block1,780,16618
148 | 003200-block2,1019,23529
149 | 003300-block1,291,33086
150 | 003300-block2,1676,32853
151 | 003300-block3,608,20177
152 | 003300-block4,1142,25464
153 | 003400-block1,754,17455
154 | 003400-block2,1033,9939
155 | 003400-block3,1043,18883
156 | 003400-block4,2434,23119
157 | 003500-block1,1133,53189
158 | 003500-block2,2334,41862
159 | 003600-block1,780,47058
160 | 003600-block2,1511,33760
161 | 003600-block3,760,51672
162 | 003700-block1,745,28816
163 | 003700-block2,1413,31352
164 | 003700-block3,1111,23172
165 | 003800-block1,1094,23006
166 | 003800-block2,1119,16390
167 | 003800-block3,1896,18475
168 | 003900-block1,1024,23356
169 | 003900-block2,2706,10241
170 | 003900-block3,996,45552
171 | 004000-block1,1558,14772
172 | 004000-block2,969,18712
173 | 004300-block1,227,14114
174 | 004300-block2,442,15343
175 | 004300-block3,745,18897
176 | 004300-block4,1018,35514
177 | 004400-block1,1287,31882
178 | 004400-block2,1165,19191
179 | 004607-block1,1154,54190
180 | 004607-block2,1008,81233
181 | 004607-block3,1894,68327
182 | 004607-block4,1739,34882
183 | 004609-block1,1652,41162
184 | 004609-block2,2984,50146
185 | 004609-block3,2384,49085
186 | 004609-block4,1865,56656
187 | 004610-block1,2611,44300
188 | 004610-block2,2281,54981
189 | 004612-block1,1553,56402
190 | 004612-block2,1519,47712
191 | 004612-block3,1188,47887
192 | 004612-block4,3713,34138
193 | 004613-block1,1764,80745
194 | 004614-block1,2238,48416
195 | 004614-block2,2672,68976
196 | 004614-block3,2278,51932
197 | 004615-block1,1934,82232
198 | 004615-block2,2659,113202
199 | 004615-block3,1007,41560
200 | 004616-block1,3162,66573
201 | 004616-block2,2504,17580
202 | 004616-block3,2728,57054
203 | 004617-block1,2905,51536
204 | 004617-block2,5634,60788
205 | 004618-block1,3321,31251
206 | 004618-block2,3790,48937
207 | 004619-block1,1993,71236
208 | 004619-block2,1698,46415
209 | 004619-block3,1513,49242
210 | 004620-block1,2162,68096
211 | 004620-block2,1056,51018
212 | 004620-block3,1359,58684
213 | 004621-block1,2423,105615
214 | 004621-block2,1188,67157
215 | 004621-block3,927,51371
216 | 004621-block4,1135,58914
217 | 004622-block1,1453,26238
218 | 004622-block2,3878,64292
219 | 004701-block1,902,59958
220 | 004701-block2,327,53821
221 | 004701-block3,2479,51218
222 | 004701-block4,2651,98423
223 | 004702-block1,2031,46717
224 | 004800-block1,1267,89103
225 | 004800-block2,953,111788
226 | 004901-block1,719,113244
227 | 004901-block2,1085,62006
228 | 004902-block1,497,81051
229 | 004902-block2,1203,155027
230 | 004902-block3,814,72263
231 | 005001-block1,609,54027
232 | 005001-block2,794,41828
233 | 005002-block1,1020,31031
234 | 005002-block2,679,17430
235 | 005002-block3,1239,30311
236 | 005002-block4,537,16154
237 | 005100-block1,1153,68322
238 | 005300-block1,1983,49711
239 | 005300-block2,515,35784
240 | 005300-block3,570,9818
241 | 005400-block1,533,18876
242 | 005400-block2,1872,16135
243 | 005500-block1,799,19100
244 | 005500-block2,750,17878
245 | 005601-block1,1730,37856
246 | 005601-block2,2833,52822
247 | 005601-block3,4022,52342
248 | 005602-block1,2412,37971
249 | 005602-block2,1422,37223
250 | 005602-block3,2322,28923
251 | 005701-block1,2426,48933
252 | 005701-block2,1434,44015
253 | 005701-block3,1824,39505
254 | 005702-block1,1789,41372
255 | 005702-block2,2132,29404
256 | 005800-block1,2600,44076
257 | 005800-block2,1289,38879
258 | 005800-block3,3959,52481
259 | 005900-block1,1271,58849
260 | 005900-block2,3464,14507
261 | 005900-block3,1942,53146
262 | 990100-block0,0,
263 |
--------------------------------------------------------------------------------
/tests/metrics_writer/test_fiat_write_return_period_threshold.py:
--------------------------------------------------------------------------------
1 | # Generated by CodiumAI
2 | import unittest
3 | from unittest.mock import patch
4 |
5 | import numpy as np
6 | import pandas as pd
7 |
8 | from fiat_toolbox.metrics_writer.fiat_write_return_period_threshold import (
9 | ExceedanceProbabilityCalculator,
10 | )
11 |
12 |
13 | class TestExceedanceProbabilityCalculator(unittest.TestCase):
14 | # The class should be able to correctly calculate the exceedance probability for a given dataframe, threshold and time horizon.
15 | def test_calculate_exceedance_probability(self):
16 | # Arrange
17 | calculator = ExceedanceProbabilityCalculator("something")
18 | df = pd.DataFrame(
19 | {
20 | "something (2Y)": [0, 0.1, 0.2],
21 | "something (5Y)": [0, 0.2, 0.4],
22 | "something (10Y)": [0, 0.3, 0.6],
23 | "something (25Y)": [0.4, 0.6, 0.8],
24 | "something (50Y)": [0.9, 1.0, 1.1],
25 | }
26 | )
27 | threshold = 0.2
28 | T = 30
29 |
30 | # Act
31 | result = calculator.calculate(df, threshold, T)
32 |
33 | # Assert
34 | expected_result = pd.DataFrame({"Exceedance Probability": [82.0, 99.8, 100.0]})
35 | pd.testing.assert_frame_equal(result, expected_result)
36 |
37 | # The class should be able to correctly append the exceedance probability column to a given dataframe.
38 | def test_append_exceedance_probability(self):
39 | # Arrange
40 | calculator = ExceedanceProbabilityCalculator("something")
41 | df = pd.DataFrame(
42 | {
43 | "something (2Y)": [0, 0.1, 0.2],
44 | "something (5Y)": [0, 0.2, 0.4],
45 | "something (10Y)": [0, 0.3, 0.6],
46 | "something (25Y)": [0.4, 0.6, 0.8],
47 | "something (50Y)": [0.9, 1.0, 1.1],
48 | }
49 | )
50 | threshold = 0.2
51 | T = 30
52 |
53 | # Act
54 | result = calculator.append_probability(df, threshold, T)
55 |
56 | # Assert
57 | expected_result = pd.DataFrame(
58 | {
59 | "something (2Y)": [0, 0.1, 0.2],
60 | "something (5Y)": [0, 0.2, 0.4],
61 | "something (10Y)": [0, 0.3, 0.6],
62 | "something (25Y)": [0.4, 0.6, 0.8],
63 | "something (50Y)": [0.9, 1.0, 1.1],
64 | "Exceedance Probability": [82.0, 99.8, 100.0],
65 | }
66 | )
67 | pd.testing.assert_frame_equal(result, expected_result)
68 |
69 | # The class should be able to add the exceedance probability column to a given file.
70 | @patch("pandas.read_csv")
71 | @patch("pandas.DataFrame.to_csv")
72 | def test_append_exceedance_probability_to_file(self, mock_to_csv, mock_read_csv):
73 | # Arrange
74 | calculator = ExceedanceProbabilityCalculator("something")
75 | input_file = "tests/data/return_period_threshold_input.csv"
76 | output_file = "tests/data/return_period_threshold_output.csv"
77 | threshold = 0.2
78 | T = 30
79 |
80 | mock_read_csv.return_value = pd.DataFrame(
81 | {
82 | "something (2Y)": [0, 0.1, 0.2],
83 | "something (5Y)": [0, 0.2, 0.4],
84 | "something (10Y)": [0, 0.3, 0.6],
85 | "something (25Y)": [0.4, 0.6, 0.8],
86 | "something (50Y)": [0.9, 1.0, 1.1],
87 | }
88 | )
89 |
90 | # Act
91 | calculator.append_to_file(input_file, output_file, threshold, T)
92 |
93 | # Assert
94 | self.assertEqual(
95 | mock_to_csv.call_args[0][0], "tests/data/return_period_threshold_output.csv"
96 | )
97 |
98 | # The class should be able to handle a dataframe with all NaN values and return a dataframe with NaN values in the exceedance probability column.
99 | def test_handle_all_nan_values(self):
100 | # Arrange
101 | calculator = ExceedanceProbabilityCalculator("something")
102 | df = pd.DataFrame(
103 | {
104 | "something (2Y)": [np.nan, np.nan, np.nan],
105 | "something (5Y)": [np.nan, np.nan, np.nan],
106 | "something (10Y)": [np.nan, np.nan, np.nan],
107 | "something (25Y)": [np.nan, np.nan, np.nan],
108 | "something (50Y)": [np.nan, np.nan, np.nan],
109 | }
110 | )
111 | threshold = 0.2
112 | T = 30
113 |
114 | # Act
115 | result = calculator.calculate(df, threshold, T)
116 |
117 | # Assert
118 | expected_result = pd.DataFrame(
119 | {"Exceedance Probability": [np.nan, np.nan, np.nan]}
120 | )
121 | pd.testing.assert_frame_equal(result, expected_result)
122 |
123 | # The class should be able to handle a dataframe with multiple columns and some columns not starting with the specified column prefix.
124 | def test_handle_dataframe_with_multiple_columns(self):
125 | # Arrange
126 | calculator = ExceedanceProbabilityCalculator("something")
127 |
128 | # Create a dataframe with multiple columns and some columns not starting with the specified column prefix
129 | df = pd.DataFrame(
130 | {
131 | "something (2Y)": [0, 0.1, 0.2],
132 | "something (5Y)": [0, 0.2, 0.4],
133 | "something (10Y)": [0, 0.3, 0.6],
134 | "something (25Y)": [0.4, 0.6, 0.8],
135 | "something (50Y)": [0.9, 1.0, 1.1],
136 | "other_column": [
137 | 1,
138 | 2,
139 | 3,
140 | ], # Column not starting with the specified column prefix
141 | }
142 | )
143 |
144 | # Expected result for the test data
145 | expected = pd.DataFrame(
146 | {
147 | "Exceedance Probability": [82.0, 99.8, 100.0],
148 | }
149 | )
150 |
151 | # Act
152 | result = calculator.calculate(df, threshold=0.2, T=30)
153 |
154 | # Assert
155 | pd.testing.assert_frame_equal(result, expected)
156 |
157 | # If the values of the row are all below the threshold, the exceedance probability should be NaN.
158 | # The other way around, if all values of the row are above the threshold, the exceedance probability
159 | # should use the smallest return period.
160 | def test_threshold_out_of_bounds(self):
161 | # Arrange
162 | df = pd.DataFrame(
163 | {
164 | "something (2Y)": [0, 0.1, 1.3],
165 | "something (5Y)": [0, 0.2, 1.5],
166 | "something (10Y)": [0, 0.5, 1.8],
167 | "something (25Y)": [0.4, 0.8, 2.0],
168 | "something (50Y)": [0.9, 1.5, 2.5],
169 | }
170 | )
171 | calculator = ExceedanceProbabilityCalculator("something")
172 |
173 | # Expected result for the test data
174 | expected = pd.DataFrame(
175 | {
176 | "Exceedance Probability": [np.nan, 14.4, 91.8],
177 | }
178 | )
179 |
180 | # Act
181 | result = calculator.calculate(df, threshold=1, T=5)
182 |
183 | # Assert
184 | pd.testing.assert_frame_equal(result, expected)
185 |
186 | # It shouldn't matter if the return period is out of bounds. The data is not representative as it will contain a lot of nan's, but that is the responsibility of the user.
187 | def test_return_period_out_of_bounds(self):
188 | # Arrange
189 | df = pd.DataFrame(
190 | {
191 | "something (2Y)": [0, 0, 1.3],
192 | "something (5Y)": [0, 0, 1.5],
193 | "something (10Y)": [0, 0, 1.8],
194 | "something (25Y)": [0.05, 0.1, 2.0],
195 | "something (50Y)": [0.1, 0.3, 2.5],
196 | }
197 | )
198 | calculator = ExceedanceProbabilityCalculator("something")
199 |
200 | # Expected result for the test data
201 | expected = pd.DataFrame(
202 | {
203 | "Exceedance Probability": [np.nan, 93.1, 100],
204 | }
205 | )
206 |
207 | # Act
208 | result = calculator.calculate(df, threshold=0.2, T=100)
209 |
210 | # Assert
211 | pd.testing.assert_frame_equal(result, expected)
212 |
213 | # The class should be able to handle a dataframe with non-numeric values and return NaN values in the exceedance probability column for the corresponding rows.
214 | def test_non_numeric_values(self):
215 | # Arrange
216 | df = pd.DataFrame(
217 | {
218 | "something (2Y)": [0, 0.1, 1.3],
219 | "something (5Y)": [0, 0.2, "hoi"],
220 | "something (10Y)": [0, "a", 1.8],
221 | "something (25Y)": [0.4, 0.8, 2.0],
222 | "something (50Y)": [0.9, 1.5, 2.5],
223 | }
224 | )
225 | threshold = 0.2
226 | T = 30
227 | calculator = ExceedanceProbabilityCalculator("something")
228 |
229 | # Act
230 | result = calculator.calculate(df, threshold, T)
231 |
232 | # Assert
233 | expected = pd.DataFrame({"Exceedance Probability": [82.0, np.nan, np.nan]})
234 | pd.testing.assert_frame_equal(result, expected)
235 |
236 | # The class should be able to handle a dataframe with negative values and return NaN values in the exceedance probability column for the corresponding rows.
237 | def test_negative_values(self):
238 | # Arrange
239 | calculator = ExceedanceProbabilityCalculator("something")
240 | df = pd.DataFrame(
241 | {
242 | "something (2Y)": [-0.5, -0.3, -0.1],
243 | "something (5Y)": [-0.4, -0.2, 0.1],
244 | "something (10Y)": [-0.3, 0, 0.2],
245 | "something (25Y)": [-0.1, 0.2, 0.4],
246 | "something (50Y)": [0, 0.4, 0.6],
247 | }
248 | )
249 |
250 | # Act
251 | result = calculator.calculate(df, threshold=0.2, T=30)
252 |
253 | # Assert
254 | expected = pd.DataFrame({"Exceedance Probability": [np.nan, 69.9, 95.0]})
255 | pd.testing.assert_frame_equal(result, expected)
256 |
--------------------------------------------------------------------------------
/fiat_toolbox/equity/equity.py:
--------------------------------------------------------------------------------
1 | import os
2 | from pathlib import Path
3 | from typing import Union
4 |
5 | import numpy as np
6 | import pandas as pd
7 | import parse
8 |
9 | from fiat_toolbox.equity.fiat_functions import calc_rp_coef
10 |
11 |
12 | class Equity:
13 | def __init__(
14 | self,
15 | census_table: Union[str, pd.DataFrame, Path],
16 | damages_table: Union[str, pd.DataFrame, Path],
17 | aggregation_label: str,
18 | percapitaincome_label: str,
19 | totalpopulation_label: str,
20 | damage_column_pattern: str = "Total Damage ({rp}Y)",
21 | ):
22 | """_summary_
23 |
24 | Parameters
25 | ----------
26 | census_table : Union[str, pd.DataFrame, Path]
27 | Census data
28 | damages_table : Union[str, pd.DataFrame, Path]
29 | Damage results
30 | aggregation_label : str
31 | column name of aggregation areas
32 | percapitaincome_label : str
33 | column name of per capita income
34 | totalpopulation_label : str
35 | column name of total population
36 | """
37 | # Merge tables
38 | self.df = self._merge_tables(census_table, damages_table, aggregation_label)
39 | self.df0 = self.df.copy() # Keep copy of original
40 | self.aggregation_label = aggregation_label
41 | self.percapitaincome_label = percapitaincome_label
42 | self.totalpopulation_label = totalpopulation_label
43 | self.damage_column_pattern = damage_column_pattern
44 |
45 | @staticmethod
46 | def _check_datatype(
47 | variable: Union[str, pd.DataFrame],
48 | ) -> pd.DataFrame:
49 | """Check that inputs for equity are rather .csv files or pd.Dataframes
50 |
51 | Parameters
52 | ----------
53 | variable : Union[str, pd.DataFrame]
54 | input
55 |
56 | Returns
57 | -------
58 | pd.DataFrame
59 | input in dataframe format
60 |
61 | Raises
62 | ------
63 | ValueError
64 | Error if input is not in correct format
65 | """
66 |
67 | if isinstance(variable, pd.DataFrame):
68 | pass
69 | elif os.path.exists(variable):
70 | variable = pd.read_csv(variable)
71 | elif isinstance(variable, str) and variable.endswith(".csv"):
72 | variable = pd.read_csv(variable)
73 | else:
74 | raise ValueError(
75 | "Input variable is neither a pandas DataFrame nor a path to a CSV file."
76 | )
77 | return variable
78 |
79 | @staticmethod
80 | def _merge_tables(
81 | census_table: Union[str, pd.DataFrame, Path],
82 | damages_table: Union[str, pd.DataFrame, Path],
83 | aggregation_label: str,
84 | ) -> pd.DataFrame:
85 | """Create dataframe with damage and social data used to calculate the equity weights
86 |
87 | Parameters
88 | ----------
89 | census_table : Union[str, pd.DataFrame, Path]
90 | Census data
91 | damages_table : Union[str, pd.DataFrame, Path]
92 | Damage results
93 | aggregation_label : str
94 | column name used to merge on
95 |
96 | Returns
97 | -------
98 | pd.DataFrame
99 | merged dataframe
100 | """
101 | # Check if data inputs are whether .csv files or pd.DataFrame
102 | census_table = Equity._check_datatype(census_table)
103 | damages_table = Equity._check_datatype(damages_table)
104 | # If the aggregated damages format is the fiat_toolbox one make sure columns are interpreted correctly
105 | if "Unnamed:" in damages_table.columns[0]:
106 | # Use name from input label
107 | damages_table = damages_table.rename(
108 | columns={damages_table.columns[0]: aggregation_label}
109 | )
110 | index_name = damages_table.columns[0]
111 | damages_table = damages_table.set_index(index_name)
112 |
113 | # Drop rows containing other variables
114 | rows_to_drop = [
115 | "Description",
116 | "Show In Metrics Table",
117 | "Show In Metrics Map",
118 | "Long Name",
119 | ]
120 | existing_rows_to_drop = [
121 | col for col in rows_to_drop if col in damages_table.index
122 | ]
123 | damages_table = damages_table.drop(existing_rows_to_drop, axis=0)
124 | damages_table = damages_table.apply(pd.to_numeric)
125 | # Merge census block groups with fiat output (damages estimations per return period)
126 | df = damages_table.merge(census_table, on=aggregation_label, how="left")
127 | df = df[~df[aggregation_label].isna()]
128 | df = df.reset_index(drop=True)
129 | return df
130 |
131 | def _calculate_equity_weights(self):
132 | """Calculates equity weights per aggregation area"""
133 | # Get population and income per capital data
134 | I_PC = self.df[self.percapitaincome_label] # mean per capita income
135 | Pop = self.df[self.totalpopulation_label] # population
136 |
137 | # Calculate aggregated annual income
138 | I_AA = I_PC * Pop
139 |
140 | # Calculate weighted average income per capita
141 | I_PC = np.ma.MaskedArray(I_PC, mask=np.isnan(I_PC))
142 | I_WA = np.ma.average(I_PC, weights=Pop)
143 |
144 | # Calculate equity weights
145 | EW = (I_PC / I_WA) ** -self.gamma # Equity Weight
146 |
147 | # Add annual income to the dataframe
148 | self.df["I_AA"] = I_AA
149 | # Add equity weight calculations into the dataframe
150 | self.df["EW"] = EW
151 |
152 | def _get_rp_from_name(self, name):
153 | parser = parse.parse(self.damage_column_pattern, name, extra_types={"s": str})
154 | if parser:
155 | rp = int(parser.named["rp"])
156 | else:
157 | rp = None
158 | return rp
159 |
160 | def calculate_ewced_per_rp(self):
161 | """Get equity weighted certainty equivalent damages per return period using a risk prenium"""
162 |
163 | # Get equity weight data
164 | I_AA = self.df["I_AA"]
165 | EW = self.df["EW"]
166 |
167 | # Retrieve columns with damage per return period data of fiat output
168 | RPs = {}
169 | for name in self.df.columns:
170 | if self._get_rp_from_name(name):
171 | rp = self._get_rp_from_name(name)
172 | RPs[rp] = name
173 | # Make sure data is sorted
174 | self.RPs = {}
175 | for key in sorted(RPs.keys()):
176 | self.RPs[key] = RPs[key]
177 |
178 | if len(self.RPs) == 0:
179 | raise ValueError(
180 | "Columns with damages per return period could not be found."
181 | )
182 |
183 | # Get Equity weighted certainty equivalent damage per return period
184 | for rp in self.RPs:
185 | col = self.RPs[rp]
186 | # Damage for return period
187 | D = self.df[col]
188 | # Period of interest in years
189 | t = 1
190 | # Probability of exceedance
191 | P = 1 - np.exp(-t / rp)
192 | # Social vulnerability
193 | z = D / I_AA
194 | # Risk premium
195 | R = (
196 | 1
197 | - (1 + P * ((1 - z) ** (1 - self.gamma) - 1)) ** (1 / (1 - self.gamma))
198 | ) / (P * z)
199 | # This step is needed to avoid nan value when z is zero
200 | R[R.isna()] = 0
201 | # Equity weighted damage
202 | EWD = EW * D
203 | # Equity weighted certainty equivalent damage
204 | EWCED = R * EWD
205 | # Add risk premium data to dataframes
206 | self.df[f"R_RP_{rp}"] = R
207 | # Add ewd and ewced to dataframes
208 | self.df[f"EWD_RP_{rp}"] = EWD
209 | self.df[f"EWCED_RP_{rp}"] = EWCED
210 |
211 | def calculate_ewcead(self):
212 | """Calculates equity weighted certainty expected annual damages using log linear approach"""
213 | layers = []
214 | return_periods = []
215 | for rp in self.RPs:
216 | return_periods.append(rp)
217 | layers.append(self.df.loc[:, f"EWCED_RP_{rp}"].values)
218 |
219 | stacked_layers = np.dstack(tuple(layers)).squeeze()
220 | self.df["EWCEAD"] = stacked_layers.dot(
221 | np.array(calc_rp_coef(return_periods))[:, None]
222 | )
223 |
224 | def calculate_ewead(self):
225 | """Calculates equity weighted certainty expected annual damages using log linear approach"""
226 | layers = []
227 | return_periods = []
228 | for rp in self.RPs:
229 | return_periods.append(rp)
230 | layers.append(self.df.loc[:, f"EWD_RP_{rp}"].values)
231 |
232 | stacked_layers = np.dstack(tuple(layers)).squeeze()
233 | self.df["EWEAD"] = stacked_layers.dot(
234 | np.array(calc_rp_coef(return_periods))[:, None]
235 | )
236 |
237 | def equity_calculation(
238 | self,
239 | gamma: float = 1.2,
240 | output_file: Union[str, Path, None] = None,
241 | ) -> pd.DataFrame:
242 | """Calculates equity weighted risk
243 |
244 | Parameters
245 | ----------
246 | gamma : float, optional
247 | elasticity by default 1.2
248 | output_file : Union[str, Path, None], optional
249 | output file path, by default None
250 |
251 | Returns
252 | -------
253 | pd.DataFrame
254 | dataframe with the results
255 | """
256 | self.gamma = gamma
257 | # Get equity weights
258 | self._calculate_equity_weights()
259 | # Calculate equity weighted damage per return period
260 | self.calculate_ewced_per_rp()
261 | # Calculate equity weighted risk
262 | self.calculate_ewead()
263 | self.calculate_ewcead()
264 | # Keep only results
265 | df_ewced_filtered = self.df[[self.aggregation_label, "EW", "EWEAD", "EWCEAD"]]
266 | # Save file if requested
267 | if output_file is not None:
268 | df_ewced_filtered.to_csv(output_file, index=False)
269 |
270 | return df_ewced_filtered
271 |
272 | def rank_ewced(self, ead_column: str = "Risk (EAD)") -> pd.DataFrame:
273 | """Ranks areas per EAD EWCEAD and the calculates difference in ranking between 2nd and 1st
274 |
275 | Parameters
276 | ----------
277 | ead_column : str, optional
278 | name of column where the standard EAD calculation exists, by default "Risk (EAD)"
279 |
280 | Returns
281 | -------
282 | pd.DataFrame
283 | ranking results
284 | """
285 | if ead_column not in self.df.columns:
286 | raise ValueError(
287 | f"EAD column '{ead_column}' not present in provided aggregated file. A different column name can be specified using the 'ead_column' argument."
288 | )
289 | self.df["rank_EAD"] = self.df[ead_column].rank(ascending=False).astype(int)
290 | self.df["rank_EWEAD"] = self.df["EWEAD"].rank(ascending=False).astype(int)
291 | self.df["rank_EWCEAD"] = self.df["EWCEAD"].rank(ascending=False).astype(int)
292 | self.df["rank_diff_EWEAD"] = self.df["rank_EWEAD"] - self.df["rank_EAD"]
293 | self.df["rank_diff_EWCEAD"] = self.df["rank_EWCEAD"] - self.df["rank_EAD"]
294 | return self.df[
295 | [
296 | self.aggregation_label,
297 | "rank_EAD",
298 | "rank_EWCEAD",
299 | "rank_diff_EWCEAD",
300 | "rank_EWEAD",
301 | "rank_diff_EWEAD",
302 | ]
303 | ]
304 |
305 | def calculate_resilience_index(
306 | self, ead_column: str = "Risk (EAD)"
307 | ) -> pd.DataFrame:
308 | """Calculates a simple socioeconomic resilience indicators by the ratio of the standard EAD to the EWCEAD
309 |
310 | Parameters
311 | ----------
312 | ead_column : str, optional
313 | name of column where the standard EAD calculation exists, by default "Risk (EAD)"
314 |
315 | Returns
316 | -------
317 | pd.DataFrame
318 | index results
319 | """
320 | self.df["SRI"] = self.df[ead_column] / self.df["EWCEAD"]
321 | self.df = self.df.replace([np.inf, -np.inf], np.nan)
322 | return self.df[[self.aggregation_label, "SRI"]]
323 |
--------------------------------------------------------------------------------
/fiat_toolbox/infographics/risk_infographics.py:
--------------------------------------------------------------------------------
1 | import base64
2 | import logging
3 | from pathlib import Path
4 | from typing import Dict, List, Union
5 |
6 | from plotly.graph_objects import Figure
7 |
8 | from fiat_toolbox.infographics.infographics import InfographicsParser
9 | from fiat_toolbox.infographics.infographics_interface import IInfographicsParser
10 | from fiat_toolbox.metrics_writer.fiat_read_metrics_file import (
11 | MetricsFileReader,
12 | )
13 |
14 |
15 | class RiskInfographicsParser(IInfographicsParser):
16 | """Class for creating the infographic"""
17 |
18 | logger: logging.Logger = (logging.getLogger(__name__),)
19 |
20 | def __init__(
21 | self,
22 | scenario_name: str,
23 | metrics_full_path: Union[Path, str],
24 | config_base_path: Union[Path, str],
25 | output_base_path: Union[Path, str],
26 | logger: logging.Logger = logging.getLogger(__name__),
27 | ) -> None:
28 | """Initialize the InfographicsParser
29 |
30 | Parameters
31 | ----------
32 | scenario_name : str
33 | The name of the scenario
34 | metrics_full_path : Union[Path, str]
35 | The path to the metrics file
36 | config_base_path : Union[Path, str]
37 | The path to the config folder
38 | output_base_path : Union[Path, str]
39 | The path to the output folder
40 | """
41 |
42 | # Save the scenario name
43 | self.scenario_name = scenario_name
44 |
45 | # Convert the metrics path to a Path object
46 | if isinstance(metrics_full_path, str):
47 | metrics_full_path = Path(metrics_full_path)
48 | self.metrics_full_path = metrics_full_path
49 |
50 | # Convert the config path to a Path object
51 | if isinstance(config_base_path, str):
52 | config_base_path = Path(config_base_path)
53 | self.config_base_path = config_base_path
54 |
55 | # Convert the output path to a Path object
56 | if isinstance(output_base_path, str):
57 | output_base_path = Path(output_base_path)
58 | self.output_base_path = output_base_path
59 | self.logger = logger
60 |
61 | def _get_impact_metrics(self) -> Dict:
62 | """Get the impact metrics for a scenario
63 |
64 | Returns
65 | -------
66 | Dict
67 | The impact metrics for the scenario
68 | """
69 |
70 | # Check if the metrics file exists
71 | if not Path.exists(self.metrics_full_path):
72 | raise FileNotFoundError(
73 | f"Metrics file not found at {self.metrics_full_path}"
74 | )
75 |
76 | # Read configured metrics
77 | metrics = (
78 | MetricsFileReader(self.metrics_full_path)
79 | .read_metrics_from_file()
80 | .to_dict()["Value"]
81 | )
82 |
83 | # Return the metrics
84 | return metrics
85 |
86 | @staticmethod
87 | def _encode_image_from_path(image_path: str) -> str:
88 | """Encode an image from a path to a base64 string
89 |
90 | Parameters
91 | ----------
92 | image_path : str
93 | The path to the image
94 |
95 | Returns
96 | -------
97 | str
98 | The base64 encoded image string
99 | """
100 | path = Path(image_path)
101 | if not Path.exists(path):
102 | RiskInfographicsParser.logger.error(f"Image not found at {path}")
103 | return
104 | with open(path, "rb") as image_file:
105 | encoded_string = base64.b64encode(image_file.read()).decode()
106 |
107 | return f"data:image/png;base64,{encoded_string}"
108 |
109 | @staticmethod
110 | def _figures_list_to_html(
111 | rp_fig: Figure,
112 | metrics: Dict,
113 | charts: Dict,
114 | file_path: Union[str, Path] = "infographics.html",
115 | image_folder_path: Union[str, Path] = None,
116 | ):
117 | """Save a list of plotly figures in an HTML file
118 |
119 | Parameters
120 | ----------
121 | rp_fig : Figure
122 | The plotly figure consisting of the pie charts for multiple return periods
123 | metrics : Dict
124 | The impact metrics for the scenario
125 | file_path : Union[str, Path], optional
126 | Path to the HTML file, by default "infographics.html"
127 | image_path : Union[str, Path], optional
128 | Path to the image folder, by default None
129 | """
130 | # Convert the file_path to a Path object
131 | if isinstance(file_path, str):
132 | file_path = Path(file_path)
133 |
134 | # Check if the file_path already exists
135 | if Path.exists(file_path):
136 | raise FileExistsError(f"File already exists at {file_path}")
137 |
138 | # Check if the file_path is correct
139 | if file_path.suffix != ".html":
140 | raise ValueError(f"File path must be a .html file, not {file_path}")
141 |
142 | # Create the directory if it does not exist
143 | if not Path.exists(file_path.parent):
144 | file_path.parent.mkdir(parents=True)
145 |
146 | # Check if the image_path exists
147 | expected_damage_path = InfographicsParser._check_image_source(
148 | charts["Other"]["Expected_Damages"]["image"],
149 | image_folder_path,
150 | return_image=False,
151 | )
152 | flooded_path = InfographicsParser._check_image_source(
153 | charts["Other"]["Flooded"]["image"], image_folder_path, return_image=False
154 | )
155 |
156 | # Div height is the max of the chart heights
157 | div_height = max(
158 | charts["Other"]["Expected_Damages"]["height"],
159 | charts["Other"]["Flooded"]["height"],
160 | charts["Other"]["Return_Periods"]["plot_height"],
161 | )
162 |
163 | # Write the html to the file
164 | with open(file_path, mode="w", encoding="utf-8") as infographics:
165 | rp_charts = (
166 | rp_fig.to_html(config={"displayModeBar": False})
167 | .split("")[1]
168 | .split("")[0]
169 | )
170 |
171 | infographics.write(
172 | f"""
173 |
174 |
175 |
176 |
177 |
227 |
228 |
229 |
230 |
231 |
{charts["Other"]["Expected_Damages"]["title"]}
232 |
})
233 |
${"{:,.0f}".format(metrics[charts["Other"]["Expected_Damages"]["query"]])}
234 |
235 |
236 |
{charts["Other"]["Flooded"]["title"]}
237 |
})
238 |
{"{:,.0f}".format(metrics[charts["Other"]["Flooded"]["query"]])}
239 |
240 |
241 | {rp_charts}
242 |
243 |
244 |
245 |
246 | """
247 | )
248 |
249 | def _get_infographics(
250 | self,
251 | ) -> Union[Dict, Dict, Figure]:
252 | """Get the infographic for a scenario
253 |
254 | Returns
255 | -------
256 | Figure
257 | The infographic for the scenario
258 |
259 | """
260 |
261 | # Get the impact metrics
262 | metrics = self._get_impact_metrics()
263 |
264 | # Get the infographic configuration
265 | pie_chart_config_path = self.config_base_path.joinpath(
266 | "config_risk_charts.toml"
267 | )
268 |
269 | # Check if the infographic configuration files exist
270 | if not Path.exists(pie_chart_config_path):
271 | raise FileNotFoundError(
272 | f"Infographic configuration file not found at {pie_chart_config_path}"
273 | )
274 |
275 | # Get the pie chart dictionaries
276 | charts = InfographicsParser._get_pies_dictionary(pie_chart_config_path, metrics)
277 |
278 | # Create the pie chart figures
279 | charts_fig = InfographicsParser._get_pie_chart_figure(
280 | data=charts.copy(),
281 | legend_orientation="h",
282 | yanchor="top",
283 | y=-0.1,
284 | title=charts["Other"]["Return_Periods"]["title"],
285 | image_path=self.config_base_path.joinpath("images"),
286 | title_font_size=charts["Other"]["Return_Periods"]["font_size"],
287 | subtitle_font_size=charts["Other"]["Return_Periods"]["subtitle_font"],
288 | image_scale=charts["Other"]["Return_Periods"]["image_scale"],
289 | numbers_font=charts["Other"]["Return_Periods"]["numbers_font"],
290 | legend_font_size=charts["Other"]["Return_Periods"]["legend_font"],
291 | plot_info=charts["Other"]["Info"]["text"],
292 | plot_info_img=charts["Other"]["Info"]["image"],
293 | plot_info_scale=charts["Other"]["Info"]["scale"],
294 | plot_height=charts["Other"]["Return_Periods"]["plot_height"],
295 | )
296 |
297 | # Return the figure
298 | return metrics, charts, charts_fig
299 |
300 | def get_infographics(self) -> Union[List[Figure], Figure]:
301 | """Get the infographic for a scenario
302 |
303 | Returns
304 | -------
305 | Union[List[Figure], Figure]
306 | The infographic for the scenario as a list of figures or a single figure
307 | """
308 |
309 | # Get the infographic
310 | _, _, infographic = self._get_infographics()
311 |
312 | # Return the infographic
313 | return infographic
314 |
315 | def write_infographics_to_file(self) -> str:
316 | """Write the infographic for a scenario to file
317 |
318 | Returns
319 | -------
320 | str
321 | The path to the infographic file
322 | """
323 |
324 | # Create the infographic path
325 | infographic_html = self.output_base_path.joinpath(
326 | f"{self.scenario_name}_metrics.html"
327 | )
328 |
329 | # Check if the infographic already exists. If so, return the path
330 | if Path.exists(infographic_html):
331 | RiskInfographicsParser.logger.info(
332 | f"Infographic already exists, skipping creation. Path: {infographic_html}"
333 | )
334 | return str(infographic_html)
335 |
336 | # Get the infographic
337 | metrics, charts, infographic = self._get_infographics()
338 |
339 | # Convert the infographic to html. The default for using relative image paths is to have an images folder in the same directory as the config files
340 | self._figures_list_to_html(
341 | infographic,
342 | metrics,
343 | charts,
344 | infographic_html,
345 | self.config_base_path.joinpath("images"),
346 | )
347 |
348 | # Return the path to the infographic
349 | return str(infographic_html)
350 |
351 | def get_infographics_html(self) -> str:
352 | """Get the path to the infographic html file
353 |
354 | Returns
355 | -------
356 | str
357 | The path to the infographic html file
358 | """
359 |
360 | # Create the infographic path
361 | infographic_path = self.output_base_path.joinpath(
362 | f"{self.scenario_name}_metrics.html"
363 | )
364 |
365 | return str(infographic_path)
366 |
--------------------------------------------------------------------------------
/tests/infographics/test_risk_infographics.py:
--------------------------------------------------------------------------------
1 | import base64
2 | import io
3 | import unittest
4 | from pathlib import Path
5 | from unittest.mock import mock_open, patch
6 |
7 | import pandas as pd
8 | from plotly.graph_objects import Figure
9 |
10 | from fiat_toolbox.infographics.risk_infographics import RiskInfographicsParser
11 |
12 |
13 | class TestRiskInfographicsParserGetMetrics(unittest.TestCase):
14 | # TODO: These tests should be extended with integration tests where you are testing on actual data. Before this can be done, a standard database should be created with all the necessary data.
15 |
16 | @patch("fiat_toolbox.infographics.risk_infographics.Path.exists")
17 | @patch("fiat_toolbox.infographics.risk_infographics.MetricsFileReader")
18 | def test_get_impact_metrics(
19 | self,
20 | mock_metrics_file_reader,
21 | mock_path_exists,
22 | ):
23 | # Arrange
24 | mock_path_exists.return_value = True
25 |
26 | mock_reader = mock_metrics_file_reader.return_value
27 | mock_reader.read_metrics_from_file.return_value = pd.DataFrame(
28 | {"Value": [1, 2, 3]}
29 | )
30 |
31 | # Act
32 | parser = RiskInfographicsParser(
33 | scenario_name="test_scenario",
34 | metrics_full_path="metrics_path.csv",
35 | config_base_path="DontCare",
36 | output_base_path="DontCare",
37 | )
38 | df_results = parser._get_impact_metrics()
39 |
40 | # Assert
41 | self.assertEqual(df_results, {0: 1, 1: 2, 2: 3})
42 | self.assertEqual(mock_path_exists.call_count, 1)
43 | self.assertEqual(
44 | str(mock_path_exists.call_args_list[0][0][0]),
45 | "metrics_path.csv",
46 | )
47 |
48 | @patch("fiat_toolbox.infographics.risk_infographics.Path.exists")
49 | @patch("fiat_toolbox.infographics.risk_infographics.MetricsFileReader")
50 | def test_get_impact_metrics_no_file(
51 | self,
52 | mock_metrics_file_reader,
53 | mock_path_exists,
54 | ):
55 | # Arrange
56 | mock_path_exists.return_value = False
57 |
58 | mock_reader = mock_metrics_file_reader.return_value
59 | mock_reader.read_metrics_from_file.return_value = {"test": [1, 2, 3]}
60 |
61 | # Act
62 | parser = RiskInfographicsParser(
63 | scenario_name="test_scenario",
64 | metrics_full_path="metrics_path.csv",
65 | config_base_path="DontCare",
66 | output_base_path="DontCare",
67 | )
68 |
69 | # Assert
70 | with self.assertRaises(FileNotFoundError) as context:
71 | _ = parser._get_impact_metrics()
72 |
73 | self.assertTrue(
74 | "Metrics file not found at metrics_path.csv" in str(context.exception)
75 | )
76 | self.assertEqual(mock_path_exists.call_count, 1)
77 | self.assertEqual(
78 | str(mock_path_exists.call_args_list[0][0][0]),
79 | "metrics_path.csv",
80 | )
81 |
82 |
83 | class TestRiskInfographicsParserChartsFigure(unittest.TestCase):
84 | money_bin = b"fake_money_image_data"
85 | house_bin = b"fake_house_image_data"
86 | money_path = "money.png"
87 | house_path = "house.png"
88 |
89 | @patch("fiat_toolbox.infographics.risk_infographics.Path.exists")
90 | @patch("builtins.open", new_callable=mock_open)
91 | def test_encode_image_from_path(self, mock_open, mock_path_exists):
92 | # Arrange
93 | mock_path_exists.return_value = True
94 | mock_open.return_value.read.return_value = self.money_bin
95 |
96 | # Act
97 | encoded_image = RiskInfographicsParser._encode_image_from_path(self.money_path)
98 |
99 | # Assert
100 | expected_encoded_string = (
101 | f"data:image/png;base64,{base64.b64encode(self.money_bin).decode()}"
102 | )
103 | assert encoded_image == expected_encoded_string
104 | mock_open.assert_called_once_with(Path(self.money_path), "rb")
105 | mock_path_exists.assert_called_once_with(Path(self.money_path))
106 |
107 | @patch("fiat_toolbox.infographics.infographics.Path.exists")
108 | @patch("fiat_toolbox.infographics.infographics.Image.open")
109 | @patch("fiat_toolbox.infographics.risk_infographics.Path.exists")
110 | @patch("fiat_toolbox.infographics.risk_infographics.Figure.to_html")
111 | @patch("builtins.open", new_callable=mock_open)
112 | def test_figure_to_html(
113 | self,
114 | mock_open,
115 | mock_to_html,
116 | mock_open_image,
117 | mock_path_exists,
118 | mock_path_exists_infographics,
119 | ):
120 | # Arrange
121 | figure_path = Path("parent/some_figure.html")
122 | mock_open_image.return_value = "some_image"
123 |
124 | def exists_side_effect(path):
125 | if ".html" in str(path):
126 | # In case of the html file, we want it to not exist
127 | return False
128 | else:
129 | return True
130 |
131 | mock_path_exists_infographics.side_effect = exists_side_effect
132 | mock_path_exists.side_effect = exists_side_effect
133 | mock_to_html.return_value = "some_figure"
134 |
135 | def mock_open_side_effect(file_path, mode="r", encoding=None):
136 | file = str(file_path)
137 | if "r" in mode:
138 | if "money.png" in file:
139 | return io.BytesIO(self.money_bin)
140 | elif "house.png" in file:
141 | return io.BytesIO(self.house_bin)
142 | # return mock_open(read_data=house_bin).return_value
143 | else:
144 | return mock_open.return_value
145 |
146 | mock_open.side_effect = mock_open_side_effect
147 | mock_file = mock_open.return_value.__enter__.return_value
148 |
149 | figs = Figure()
150 |
151 | metrics = {"ExpectedAnnualDamages": 1000000, "FloodedHomes": 1000}
152 | charts = {
153 | "Other": {
154 | "Expected_Damages": {
155 | "title": "Expected annual damages",
156 | "query": "ExpectedAnnualDamages",
157 | "image": "money.png",
158 | "image_scale": 0.125,
159 | "title_font_size": 30,
160 | "numbers_font_size": 15,
161 | "height": 300,
162 | },
163 | "Flooded": {
164 | "title": "Number of homes with a high chance of being flooded in a 30-year period",
165 | "query": "FloodedHomes",
166 | "image": "house.png",
167 | "image_scale": 0.125,
168 | "title_font_size": 30,
169 | "numbers_font_size": 15,
170 | "height": 300,
171 | },
172 | "Return_Periods": {
173 | "title": "Building damages",
174 | "font_size": 30,
175 | "image_scale": 0.125,
176 | "numbers_font": 15,
177 | "subtitle_font": 25,
178 | "legend_font": 20,
179 | "plot_height": 300,
180 | },
181 | "Info": {
182 | "title": "Building damages",
183 | "image": "house.png",
184 | "scale": 0.125,
185 | },
186 | }
187 | }
188 |
189 | # Act
190 | parser = RiskInfographicsParser(
191 | scenario_name="test_scenario",
192 | metrics_full_path="DontCare",
193 | config_base_path="DontCare",
194 | output_base_path="DontCare",
195 | )
196 |
197 | parser._figures_list_to_html(
198 | rp_fig=figs, metrics=metrics, charts=charts, file_path=figure_path
199 | )
200 |
201 | # Assert
202 | expected_html = f"""
203 |
204 |
205 |
206 |
207 |
257 |
258 |
259 |
260 |
261 |
Expected annual damages
262 |
.decode()})
263 |
$1,000,000
264 |
265 |
266 |
Number of homes with a high chance of being flooded in a 30-year period
267 |
.decode()})
268 |
1,000
269 |
270 |
271 | some_figure
272 |
273 |
274 |
275 |
276 | """
277 | self.maxDiff = 10000
278 | # Tabs and spaces are removed to make the comparison easier
279 | self.assertEqual(
280 | mock_file.write.call_args[0][0].replace(" ", ""),
281 | expected_html.replace(" ", ""),
282 | )
283 | self.assertEqual(mock_file.write.call_count, 1)
284 | self.assertEqual(mock_open.call_count, 3) # 2 images and 1 html file
285 | self.assertEqual(mock_to_html.call_count, 1)
286 | self.assertEqual(mock_path_exists_infographics.call_count, 6)
287 |
288 | @patch("fiat_toolbox.infographics.risk_infographics.Path.exists")
289 | @patch("fiat_toolbox.infographics.infographics.Image.open")
290 | @patch("fiat_toolbox.infographics.risk_infographics.Figure.to_html")
291 | @patch("fiat_toolbox.infographics.risk_infographics.open")
292 | def test_figure_to_html_no_figures(
293 | self, mock_open, mock_to_html, mock_open_image, mock_path_exists
294 | ):
295 | # Arrange
296 | figure_path = Path("parent/some_figure.html")
297 | mock_open_image.return_value = "some_image"
298 |
299 | def exists_side_effect(path):
300 | if ".html" in str(path):
301 | # In case of the html file, we want it to not exist
302 | return False
303 | else:
304 | return True
305 |
306 | mock_path_exists.side_effect = exists_side_effect
307 |
308 | mock_to_html.return_value = "some_figure"
309 |
310 | figs = []
311 |
312 | metrics = {"ExpectedAnnualDamages": 1000000, "FloodedHomes": 1000}
313 | charts = {
314 | "Other": {
315 | "Expected_Damages": {
316 | "title": "Expected annual damages",
317 | "image": "money.png",
318 | "image_scale": 0.125,
319 | "title_font_size": 30,
320 | "numbers_font_size": 15,
321 | "height": 300,
322 | },
323 | "Flooded": {
324 | "title": "Number of homes with a high chance of being flooded in a 30-year period",
325 | "image": "house.png",
326 | "image_scale": 0.125,
327 | "title_font_size": 30,
328 | "numbers_font_size": 15,
329 | "height": 300,
330 | },
331 | "Return_Periods": {
332 | "title": "Building damages",
333 | "font_size": 30,
334 | "image_scale": 0.125,
335 | "numbers_font": 15,
336 | "subtitle_font": 25,
337 | "legend_font": 20,
338 | "plot_height": 300,
339 | },
340 | "Info": {
341 | "title": "Building damages",
342 | "image": "house.png",
343 | "scale": 0.125,
344 | },
345 | }
346 | }
347 |
348 | # Act
349 | parser = RiskInfographicsParser(
350 | scenario_name="test_scenario",
351 | metrics_full_path="DontCare",
352 | config_base_path="DontCare",
353 | output_base_path="DontCare",
354 | )
355 |
356 | # Assert
357 | with self.assertRaises(AttributeError) as context:
358 | parser._figures_list_to_html(figs, metrics, charts, figure_path)
359 |
360 | self.assertTrue(
361 | "'list' object has no attribute 'to_html'" in str(context.exception)
362 | )
363 |
364 | @patch("fiat_toolbox.infographics.risk_infographics.Path.exists")
365 | def test_html_already_exists(self, mock_path_exists):
366 | # Arrange
367 | figure_path = "some_figure.html"
368 | mock_path_exists.return_value = True
369 | figs = [Figure(), Figure(), Figure()]
370 | metrics = {"ExpectedAnnualDamages": 1000000, "FloodedHomes": 1000}
371 | charts = {
372 | "Other": {
373 | "expected_damage_image": "expected_damage_image.png",
374 | "flooded_title": "Flooded buildings",
375 | "flooded_image": "flooded_image.png",
376 | }
377 | }
378 |
379 | # Act
380 | parser = RiskInfographicsParser(
381 | scenario_name="test_scenario",
382 | metrics_full_path="DontCare",
383 | config_base_path="DontCare",
384 | output_base_path="DontCare",
385 | )
386 |
387 | # Assert
388 | with self.assertRaises(FileExistsError) as context:
389 | parser._figures_list_to_html(figs, metrics, charts, figure_path)
390 |
391 | self.assertTrue(
392 | "File already exists at some_figure.html" in str(context.exception)
393 | )
394 |
395 | @patch("fiat_toolbox.infographics.risk_infographics.Path.exists")
396 | def test_html_wrong_suffix(self, mock_path_exists):
397 | # Arrange
398 | figure_path = "some_figure.txt"
399 |
400 | def exists_side_effect(path):
401 | if ".txt" in str(path):
402 | # In case of the txt file, we want it to not exist
403 | return False
404 | else:
405 | return True
406 |
407 | mock_path_exists.side_effect = exists_side_effect
408 | figs = [Figure(), Figure(), Figure()]
409 | metrics = {"ExpectedAnnualDamages": 1000000, "FloodedHomes": 1000}
410 | charts = {
411 | "Other": {
412 | "expected_damage_image": "expected_damage_image.png",
413 | "flooded_title": "Flooded buildings",
414 | "flooded_image": "flooded_image.png",
415 | }
416 | }
417 |
418 | # Act
419 | parser = RiskInfographicsParser(
420 | scenario_name="test_scenario",
421 | metrics_full_path="DontCare",
422 | config_base_path="DontCare",
423 | output_base_path="DontCare",
424 | )
425 |
426 | # Assert
427 | with self.assertRaises(ValueError) as context:
428 | parser._figures_list_to_html(figs, metrics, charts, figure_path)
429 |
430 | self.assertTrue(
431 | "File path must be a .html file, not some_figure.txt"
432 | in str(context.exception)
433 | )
434 |
--------------------------------------------------------------------------------
/fiat_toolbox/metrics_writer/fiat_write_metrics_file.py:
--------------------------------------------------------------------------------
1 | import json
2 | import logging
3 | import os
4 | from pathlib import Path
5 | from typing import Dict, Tuple, Union
6 |
7 | import duckdb
8 | import pandas as pd
9 | import tomli
10 | from pydantic import BaseModel
11 |
12 | from fiat_toolbox import get_fiat_columns
13 | from fiat_toolbox.metrics_writer.fiat_metrics_interface import IMetricsFileWriter
14 | from fiat_toolbox.metrics_writer.fiat_read_metrics_file import MetricsFileReader
15 |
16 | _AGGR_LABEL_FMT = get_fiat_columns().aggregation_label
17 |
18 |
19 | # sql command struct
20 |
21 |
22 | class sql_struct(BaseModel):
23 | name: str
24 | long_name: str
25 | show_in_metrics_table: bool = True
26 | show_in_metrics_map: bool = True
27 | description: str
28 | select: str
29 | filter: str
30 | groupby: str
31 |
32 |
33 | class MetricsFileWriter(IMetricsFileWriter):
34 | """Class to parse metrics and write to a file."""
35 |
36 | logger: logging.Logger = logging.getLogger(__name__)
37 |
38 | def __init__(
39 | self,
40 | config_file: Union[str, Path],
41 | logger: logging.Logger = logging.getLogger(__name__),
42 | aggregation_label_fmt: str = _AGGR_LABEL_FMT,
43 | ):
44 | """
45 | Initialize the class.
46 |
47 | Parameters
48 | ----------
49 | config_file : Union[str, Path]
50 | The path to the metrics file.
51 | """
52 | # Convert the path to a Path object
53 | if isinstance(config_file, str):
54 | config_file = Path(config_file)
55 |
56 | # Check whether the file exists
57 | if not os.path.exists(config_file):
58 | raise FileNotFoundError(f"Config file '{config_file}' not found.")
59 |
60 | self.config_file = config_file
61 | self.logger = logger
62 | self.aggregation_label_fmt = aggregation_label_fmt
63 |
64 | def _read_metrics_file(
65 | self, include_aggregates: bool
66 | ) -> Union[Dict[str, sql_struct], Dict[str, Dict[str, sql_struct]]]:
67 | """
68 | Read a metrics file and return a list of SQL commands.
69 |
70 | Parameters
71 | ----------
72 | include_aggregates : bool
73 | Whether to include aggregation labels in the metrics.
74 |
75 | Returns
76 | -------
77 | Union[Dict[str, sql_struct], Dict[str, Dict[str, sql_struct]]]
78 | A dictionary with the SQL commands.
79 | """
80 |
81 | # Read the metrics file
82 | _, extension = os.path.splitext(self.config_file)
83 | if extension == ".json":
84 | metrics = json.load(open(self.config_file, "r"))
85 | elif extension == ".toml":
86 | metrics = tomli.load(open(self.config_file, "rb"))
87 | else:
88 | raise ValueError(
89 | f"Config file '{self.config_file}' has an invalid extension. Only .json and .toml are supported."
90 | )
91 |
92 | # Create the sql commands dictionary
93 | sql_command_set = {}
94 | if include_aggregates:
95 | # Check whether the metrics file contains aggregation labels
96 | if "aggregateBy" not in metrics or len(metrics["aggregateBy"]) == 0:
97 | raise ValueError(
98 | "No aggregation labels specified in the metrics file, but include_aggregates is set to True."
99 | )
100 | # Loop over the aggregation labels
101 | for aggregate in metrics["aggregateBy"]:
102 | aggregate_command = {}
103 | # Check whether the metrics file contains metrics
104 | if "queries" not in metrics or len(metrics["queries"]) == 0:
105 | raise ValueError("No queries specified in the metrics file.")
106 | # Loop over the metrics
107 | for metric in metrics["queries"]:
108 | # Correct metrics name if it is count
109 | if "COUNT" in metric["select"] and "#" not in metric["description"]:
110 | metric["description"] = f"{metric['description']} (#)"
111 |
112 | # Create the sql command
113 | metric["groupby"] = (
114 | f"`{self.aggregation_label_fmt.format(name=aggregate)}`"
115 | )
116 | sql_command = sql_struct(**metric)
117 |
118 | # Check whether the metric name is already in the dictionary
119 | if metric["name"] in aggregate_command:
120 | raise ValueError(
121 | f"Duplicate metric name {metric['name']} in metrics file."
122 | )
123 |
124 | # Add the sql command to the dictionary
125 | aggregate_command[metric["name"]] = sql_command
126 |
127 | # Check whether the aggregation label is already in the dictionary
128 | if aggregate in sql_command_set:
129 | raise ValueError(
130 | f"Duplicate aggregation label {aggregate} in metrics file."
131 | )
132 |
133 | # Add the sql command to the dictionary
134 | sql_command_set[aggregate] = aggregate_command
135 | else:
136 | # Check whether the metrics file contains metrics
137 | if "queries" not in metrics or len(metrics["queries"]) == 0:
138 | raise ValueError("No queries specified in the metrics file.")
139 |
140 | # Loop over the metrics
141 | for metric in metrics["queries"]:
142 | # Correct metrics name if it is count
143 | if "COUNT" in metric["select"] and "#" not in metric["description"]:
144 | metric["description"] = f"{metric['description']} (#)"
145 | # Create the sql command
146 | metric["groupby"] = ""
147 | sql_command = sql_struct(**metric)
148 |
149 | # Check whether the metric name is already in the dictionary
150 | if metric["name"] in sql_command_set:
151 | raise ValueError(
152 | f"Duplicate metric name {metric['name']} in metrics file."
153 | )
154 |
155 | # Add the sql command to the dictionary
156 | sql_command_set[metric["name"]] = sql_command
157 | # Return the sql commands dictionary
158 | return sql_command_set
159 |
160 | @staticmethod
161 | def _create_single_metric(
162 | df_results: pd.DataFrame, sql_command: sql_struct
163 | ) -> Union[Tuple[str, object], Tuple[str, Dict[str, object]]]:
164 | """
165 | Create a metrics table from the results dataframe based on an SQL command.
166 |
167 | Parameters
168 | ----------
169 | df_results : pd.DataFrame
170 | The results dataframe.
171 | sql_command : sql_struct
172 | The SQL command.
173 |
174 | Returns
175 | -------
176 | Union[Tuple[str, object], Tuple[str, Dict[str, object]]]
177 | A tuple with the metric name and value or, in the case of a groupby statement,
178 | a tuple with the metric name and a dictionary with the groupby variables as keys
179 | and the metric as value.
180 | """
181 |
182 | # First add the the groupby variables to the query
183 | sql_query = "SELECT "
184 | if sql_command.groupby:
185 | sql_query += f"{sql_command.groupby}, "
186 |
187 | # Then add the select variables
188 | if not sql_command.select:
189 | raise ValueError(
190 | f"No select statement specified for metric {sql_command.name}."
191 | )
192 | sql_query += f"{sql_command.select} AS `{sql_command.name}` FROM df_results"
193 |
194 | # Then add the filter statement
195 | if sql_command.filter:
196 | sql_query += f" WHERE {sql_command.filter}"
197 |
198 | # Finally add the groupby statement
199 | if sql_command.groupby:
200 | sql_query += f" GROUP BY {sql_command.groupby}"
201 |
202 | # Register the dataframe as a DuckDB table
203 | duckdb.unregister("df_results")
204 | duckdb.register("df_results", df_results)
205 |
206 | # Execute the query. If the query is invalid, an error PandaSQLException will be raised
207 | sql_query = sql_query.replace("`", '"')
208 | result = duckdb.query(sql_query).df()
209 |
210 | # If the command contains a groupby statement, return a dictionary with the groupby variables as keys and the metric as value
211 | if sql_command.groupby:
212 | # Set the groupby variables as index
213 | labeled_result = result.set_index(sql_command.groupby.replace("`", ""))
214 | # Remove rows without index name
215 | labeled_result = labeled_result[labeled_result.index.notna()]
216 | # Return the metric name and the dictionary
217 | return labeled_result.columns[0], dict(
218 | labeled_result[labeled_result.columns[0]]
219 | )
220 | # Otherwise return the metric name and the value
221 | return result.columns[0], result[result.columns[0]][0]
222 |
223 | @staticmethod
224 | def _create_metrics_dict(
225 | df_results: pd.DataFrame, sql_commands: Dict[str, sql_struct]
226 | ) -> Dict[str, object]:
227 | """
228 | Create a metrics table from the results dataframe based on a list of SQL commands.
229 |
230 | Parameters
231 | ----------
232 | df_results : pd.DataFrame
233 | The results dataframe.
234 | sql_commands : list[sql_struct]
235 | A list of SQL commands.
236 |
237 | Returns
238 | -------
239 | dict
240 | A dictionary with the metric names and values.
241 | """
242 |
243 | # Initialize the metrics dictionary
244 | df_metrics = {}
245 |
246 | # Run the sql commands one by one
247 | for name, command in sql_commands.items():
248 | # Create the metric (_create_single_metric is a static method, so no need to instantiate the class)
249 | _, value = MetricsFileWriter._create_single_metric(df_results, command)
250 |
251 | # Store the metric in the metrics dictionary using the metric name as key
252 | df_metrics[name] = value
253 |
254 | return df_metrics
255 |
256 | def _parse_metrics(
257 | self, df_results: pd.DataFrame, include_aggregates: bool
258 | ) -> Union[dict, Dict[str, dict]]:
259 | """
260 | Parse the metrics based on the config file and return a dictionary with the metrics.
261 |
262 | Parameters
263 | ----------
264 | df_results : pd.DataFrame
265 | The results dataframe.
266 | include_aggregates : bool
267 | Whether to include aggregation labels in the metrics.
268 |
269 | Returns
270 | -------
271 | Union[dict, List[dict]]
272 | A dictionary with the metrics or, in the case of multiple aggregation labels,
273 | a list of dictionaries.
274 | """
275 |
276 | # Read the metrics file
277 | sql_commands = self._read_metrics_file(include_aggregates)
278 |
279 | # Create the metrics dictionary
280 | if include_aggregates:
281 | metrics = {}
282 | # Loop over the aggregation labels
283 | for aggregate, commands in sql_commands.items():
284 | # Create the metrics dictionary for the current aggregation label (the _create_metrics_dict is a static method, so no need to instantiate the class)
285 | metrics[aggregate] = MetricsFileWriter._create_metrics_dict(
286 | df_results, commands
287 | )
288 | return metrics
289 | else:
290 | # Create the metrics dictionary (the _create_metrics_dict is a static method, so no need to instantiate the class)
291 | return MetricsFileWriter._create_metrics_dict(df_results, sql_commands)
292 |
293 | @staticmethod
294 | def _write_metrics_file(
295 | metrics: Union[dict, Dict[str, dict]],
296 | config: Union[Dict[str, sql_struct], Dict[str, Dict[str, sql_struct]]],
297 | metrics_path: Path,
298 | write_aggregate: str = None,
299 | overwrite: bool = False,
300 | aggregations: list = None,
301 | ) -> None:
302 | """
303 | Write a metrics dictionary to a metrics file.
304 |
305 | Parameters
306 | ----------
307 | metrics : Union[dict, List[dict]]
308 | A dictionary with the metrics or, in the case of multiple aggregation labels,
309 | a list of dictionaries.
310 | config : Union[Dict[str, sql_struct], Dict[str, Dict[str, sql_struct]]]
311 | A dictionary with the SQL commands.
312 | metrics_path : Path
313 | The path to where to store the metrics file.
314 | write_aggregate : str
315 | The name of the aggregation label to write to the metrics file (None for no aggregation label).
316 | overwrite : bool
317 | Whether to overwrite the existing metrics file if it already exists. If False, it appends to the file.
318 | aggregations : list
319 | A list of aggregation areas. If write_aggregate is None, this is ignored.
320 |
321 | Returns
322 | -------
323 | None
324 | """
325 |
326 | if write_aggregate:
327 | # Get the metrics for the current aggregation label
328 | aggregate_metrics = metrics[write_aggregate]
329 |
330 | # Find the names dynamically
331 | if aggregations is None:
332 | aggregations = []
333 | for value in aggregate_metrics.values():
334 | aggregations.extend(value.keys())
335 |
336 | # Update all empty metrics with 0
337 | for key, value in aggregate_metrics.items():
338 | if value == {}:
339 | aggregate_metrics[key] = dict.fromkeys(aggregations, 0)
340 | continue
341 | for name in aggregations:
342 | if name not in value:
343 | aggregate_metrics[key][name] = 0
344 |
345 | # Create a dataframe from the metrics dictionary
346 | metricsFrame = (
347 | pd.DataFrame().from_dict(aggregate_metrics, orient="index").fillna(0)
348 | )
349 |
350 | # Add the long name to the dataframe
351 | metricsFrame.insert(
352 | 0,
353 | "Long Name",
354 | [
355 | config[write_aggregate][name].long_name
356 | for name, _ in metricsFrame.iterrows()
357 | ],
358 | )
359 |
360 | # Add the metrics table selector to the dataframe
361 | metricsFrame.insert(
362 | 0,
363 | "Show In Metrics Table",
364 | [
365 | config[write_aggregate][name].show_in_metrics_table
366 | for name, _ in metricsFrame.iterrows()
367 | ],
368 | )
369 |
370 | # Add the metrics table selector to the dataframe
371 | metricsFrame.insert(
372 | 0,
373 | "Show In Metrics Map",
374 | [
375 | config[write_aggregate][name].show_in_metrics_map
376 | for name, _ in metricsFrame.iterrows()
377 | ],
378 | )
379 |
380 | # Add the description to the dataframe
381 | metricsFrame.insert(
382 | 0,
383 | "Description",
384 | [
385 | config[write_aggregate][name].description
386 | for name, _ in metricsFrame.iterrows()
387 | ],
388 | )
389 |
390 | # Check if the file already exists
391 | if os.path.exists(metrics_path):
392 | if overwrite:
393 | MetricsFileWriter.logger.warning(
394 | f"Metrics file '{metrics_path}' already exists. Overwriting..."
395 | )
396 | os.remove(metrics_path)
397 | else:
398 | new_metrics = MetricsFileReader(
399 | metrics_path
400 | ).read_metrics_from_file(
401 | include_long_names=True,
402 | include_description=True,
403 | include_metrics_table_selection=True,
404 | include_metrics_map_selection=True,
405 | )
406 | metricsFrame = pd.concat([new_metrics, metricsFrame])
407 |
408 | # Transpose the dataframe
409 | metricsFrame = metricsFrame.transpose()
410 |
411 | # Write the metrics to a file
412 | if metrics_path.parent and not metrics_path.parent.exists():
413 | metrics_path.parent.mkdir(parents=True)
414 | metricsFrame.to_csv(metrics_path)
415 | else:
416 | # Create a dataframe from the metrics dictionary
417 | metricsFrame = (
418 | pd.DataFrame()
419 | .from_dict(metrics, orient="index", columns=["Value"])
420 | .fillna(0)
421 | )
422 |
423 | # Add the long name to the dataframe
424 | metricsFrame.insert(
425 | 0,
426 | "Long Name",
427 | [config[name].long_name for name, _ in metricsFrame.iterrows()],
428 | )
429 |
430 | # Add the metrics table selector to the dataframe
431 | metricsFrame.insert(
432 | 0,
433 | "Show In Metrics Table",
434 | [
435 | config[name].show_in_metrics_table
436 | for name, _ in metricsFrame.iterrows()
437 | ],
438 | )
439 |
440 | # Add the metrics table selector to the dataframe
441 | metricsFrame.insert(
442 | 0,
443 | "Show In Metrics Map",
444 | [
445 | config[name].show_in_metrics_map
446 | for name, _ in metricsFrame.iterrows()
447 | ],
448 | )
449 |
450 | # Add the description to the dataframe
451 | metricsFrame.insert(
452 | 0,
453 | "Description",
454 | [config[name].description for name, _ in metricsFrame.iterrows()],
455 | )
456 |
457 | # Check if the file already exists
458 | if os.path.exists(metrics_path):
459 | if overwrite:
460 | logging.warning(
461 | f"Metrics file '{metrics_path}' already exists. Overwriting..."
462 | )
463 | os.remove(metrics_path)
464 | else:
465 | new_metrics = MetricsFileReader(
466 | metrics_path
467 | ).read_metrics_from_file(
468 | include_long_names=True,
469 | include_description=True,
470 | include_metrics_table_selection=True,
471 | include_metrics_map_selection=True,
472 | )
473 | metricsFrame = pd.concat([new_metrics, metricsFrame])
474 |
475 | # Write the metrics to a file
476 | if metrics_path.parent and not metrics_path.parent.exists():
477 | metrics_path.parent.mkdir(parents=True)
478 | metricsFrame.to_csv(metrics_path)
479 |
480 | def parse_metrics_to_file(
481 | self,
482 | df_results: pd.DataFrame,
483 | metrics_path: Union[str, Path],
484 | write_aggregate: str = None,
485 | overwrite: bool = False,
486 | ) -> Union[str, Dict[str, str]]:
487 | """
488 | Parse a metrics file and write the metrics to a file.
489 |
490 | Parameters
491 | ----------
492 | df_results : pd.DataFrame
493 | The results dataframe.
494 | metrics_path : Union[str, Path]
495 | The path to where to store the metrics file.
496 | write_aggregate : str
497 | The name of the aggregation label to write to the metrics file
498 | (None for no aggregation label, 'all' for all possible ones).
499 | overwrite : bool
500 | Whether to overwrite the existing metrics file if it already exists. If False, it appends to the file.
501 |
502 | Returns
503 | -------
504 | Union[str, Dict[str, str]]
505 | The path to the metrics file or a dictionary with the aggregation labels as keys
506 | and the paths to the metrics files as values.
507 | """
508 |
509 | # Convert the path to a Path object
510 | if isinstance(metrics_path, str):
511 | metrics_path = Path(metrics_path)
512 |
513 | # Check whether to include aggregation labels
514 | include_aggregates = True if write_aggregate else False
515 |
516 | # Read the metrics config file
517 | config = self._read_metrics_file(include_aggregates)
518 |
519 | # Parse the metrics
520 | metrics = self._parse_metrics(df_results, include_aggregates)
521 |
522 | # Write the metrics to a file
523 | if write_aggregate == "all":
524 | # Initialize the return dictionary
525 | return_files = {}
526 | for key in config.keys():
527 | # If using aggregation labels, add the aggregation label to the filename
528 | directory, filename = os.path.split(metrics_path)
529 | filename, extension = os.path.splitext(filename)
530 | new_filename = filename + "_" + key + extension
531 | new_path = Path(os.path.join(directory, new_filename))
532 | return_files[key] = new_path
533 |
534 | # Write the metrics to a file
535 | MetricsFileWriter._write_metrics_file(
536 | metrics,
537 | config,
538 | new_path,
539 | write_aggregate=key,
540 | overwrite=overwrite,
541 | aggregations=df_results[
542 | self.aggregation_label_fmt.format(name=key)
543 | ].unique(),
544 | )
545 | else:
546 | # Write the metrics to a file
547 | MetricsFileWriter._write_metrics_file(
548 | metrics,
549 | config,
550 | metrics_path,
551 | write_aggregate=write_aggregate,
552 | overwrite=overwrite,
553 | )
554 | return_files = metrics_path
555 |
556 | return return_files
557 |
--------------------------------------------------------------------------------