├── .github
├── ISSUE_TEMPLATE
│ ├── docs-issue-report.md
│ └── issue-report.md
└── workflows
│ ├── publish-to-test-pypi.yml
│ └── stale.yml
├── .gitignore
├── .readthedocs.yml
├── LICENSE
├── Makefile
├── README.md
├── dev
├── gen-var_info_yml.py
└── var_info.yml
├── docs
├── Makefile
├── requirements.txt
└── source
│ ├── api.rst
│ ├── api
│ ├── supy.cmd
│ │ ├── suews-convert.rst
│ │ └── suews-run.rst
│ ├── supy.util
│ │ ├── supy.util.cal_gs_obs.rst
│ │ ├── supy.util.cal_gs_suews.rst
│ │ ├── supy.util.cal_neutral.rst
│ │ ├── supy.util.calib_g.rst
│ │ ├── supy.util.derive_ohm_coef.rst
│ │ ├── supy.util.download_era5.rst
│ │ ├── supy.util.extract_reclassification.rst
│ │ ├── supy.util.fill_gap_all.rst
│ │ ├── supy.util.gen_epw.rst
│ │ ├── supy.util.gen_forcing_era5.rst
│ │ ├── supy.util.plot_comp.rst
│ │ ├── supy.util.plot_day_clm.rst
│ │ ├── supy.util.plot_reclassification.rst
│ │ ├── supy.util.plot_rsl.rst
│ │ ├── supy.util.read_epw.rst
│ │ └── supy.util.sim_ohm.rst
│ └── supy
│ │ ├── supy.init_supy.rst
│ │ ├── supy.load_SampleData.rst
│ │ ├── supy.load_forcing_grid.rst
│ │ ├── supy.run_supy.rst
│ │ ├── supy.save_supy.rst
│ │ └── supy.show_version.rst
│ ├── conf.py
│ ├── data-structure
│ ├── df_forcing.rst
│ ├── df_output.rst
│ ├── df_state.rst
│ ├── supy-io.ipynb
│ └── supy-io.py
│ ├── faq.rst
│ ├── index.rst
│ ├── proc_var_info
│ ├── df_forcing.csv
│ ├── df_output.csv
│ ├── df_state.csv
│ ├── gen_df_forcing_output_csv.py
│ ├── gen_df_state_csv.py
│ ├── gen_rst.py
│ └── nml_rst_proc.py
│ ├── sample_run
│ ├── tutorial
│ ├── data
│ │ └── US-AR1_2010_data_60.txt
│ ├── external-interaction.ipynb
│ ├── impact-studies.ipynb
│ ├── quick-start.ipynb
│ ├── setup-own-site.ipynb
│ └── tutorial.rst
│ └── version-history.rst
├── env.yml
├── sample_plot.png
└── src
├── Makefile
├── data_test
├── multi-grid
│ ├── 51.5N0.125W-201310-ml.nc
│ ├── 51.5N0.125W-201310-sfc.nc
│ ├── 51.5N0.125W-201311-ml.nc
│ ├── 51.5N0.125W-201311-sfc.nc
│ ├── 51.5N0.125W-201312-ml.nc
│ └── 51.5N0.125W-201312-sfc.nc
└── single-grid
│ ├── 57.75N12.0E-200301-ml.nc
│ ├── 57.75N12.0E-200301-sfc.nc
│ ├── 57.75N12.0E-200302-ml.nc
│ ├── 57.75N12.0E-200302-sfc.nc
│ ├── 57.75N12.0E-200303-ml.nc
│ ├── 57.75N12.0E-200303-sfc.nc
│ ├── 57.75N12.0E-200304-ml.nc
│ ├── 57.75N12.0E-200304-sfc.nc
│ ├── 57.75N12.0E-200305-ml.nc
│ ├── 57.75N12.0E-200305-sfc.nc
│ ├── 57.75N12.0E-200306-ml.nc
│ ├── 57.75N12.0E-200306-sfc.nc
│ ├── 57.75N12.0E-200307-ml.nc
│ ├── 57.75N12.0E-200307-sfc.nc
│ ├── 57.75N12.0E-200308-ml.nc
│ ├── 57.75N12.0E-200308-sfc.nc
│ ├── 57.75N12.0E-200309-ml.nc
│ ├── 57.75N12.0E-200309-sfc.nc
│ ├── 57.75N12.0E-200310-ml.nc
│ ├── 57.75N12.0E-200310-sfc.nc
│ ├── 57.75N12.0E-200311-ml.nc
│ ├── 57.75N12.0E-200311-sfc.nc
│ ├── 57.75N12.0E-200312-ml.nc
│ └── 57.75N12.0E-200312-sfc.nc
├── pyproject.toml
├── setup.py
└── supy
├── __init__.py
├── _check.py
├── _env.py
├── _load.py
├── _misc.py
├── _post.py
├── _run.py
├── _save.py
├── _supy_module.py
├── _version.py
├── checker_rules_indiv.json
├── checker_rules_joint.json
├── cmd
├── SUEWS.py
├── __init__.py
└── table_converter.py
├── code2file.json
├── sample_run
├── Input
│ ├── ESTMinput.nml
│ ├── GridLayoutKc.nml
│ ├── InitialConditionsKc_2011.nml
│ ├── Kc_2011_data_60.txt
│ ├── Kc_2012_data_60.txt
│ ├── SUEWS_AnthropogenicEmission.txt
│ ├── SUEWS_BiogenCO2.txt
│ ├── SUEWS_Conductance.txt
│ ├── SUEWS_ESTMCoefficients.txt
│ ├── SUEWS_Irrigation.txt
│ ├── SUEWS_NonVeg.txt
│ ├── SUEWS_OHMCoefficients.txt
│ ├── SUEWS_Profiles.txt
│ ├── SUEWS_SPARTACUS.nml
│ ├── SUEWS_SiteSelect.txt
│ ├── SUEWS_Snow.txt
│ ├── SUEWS_Soil.txt
│ ├── SUEWS_Veg.txt
│ ├── SUEWS_Water.txt
│ └── SUEWS_WithinGridWaterDist.txt
└── RunControl.nml
├── test
└── test_SuPy.py
├── util
├── _UMEP2epw.py
├── __init__.py
├── _atm.py
├── _converter.py
├── _debug.py
├── _era5.py
├── _gap_filler.py
├── _gs.py
├── _io.py
├── _ohm.py
├── _plot.py
├── _roughness.py
├── _tmy.py
├── _wrf.py
└── rules.csv
└── var2siteselect.json
/.github/ISSUE_TEMPLATE/docs-issue-report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Docs issue report
3 | about: Create a report to help us improve the documentation
4 | title: ''
5 | labels: docs
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the bug**
11 |
12 |
13 | **Location**
14 |
15 |
16 | **Expected statement**
17 |
18 |
19 |
20 | **Screenshots**
21 |
22 |
23 |
24 | **Additional context**
25 | Add any other context about the problem here.
26 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/issue-report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Issue report
3 | about: Create a report to help us debug the issue.
4 |
5 | ---
6 |
7 | **Describe the Issue**
8 |
9 |
10 | **Is `supy_debug-xxxxxxxx.zip` generated?**
11 |
12 |
13 | **Screenshots**
14 |
15 |
16 |
17 | **Additional context**
18 |
19 |
--------------------------------------------------------------------------------
/.github/workflows/publish-to-test-pypi.yml:
--------------------------------------------------------------------------------
1 | name: Publish Python 🐍 distributions 📦 to PyPI and TestPyPI
2 |
3 | on: [push, pull_request]
4 |
5 | jobs:
6 | build-n-publish:
7 | name: Build and publish Python 🐍 distributions 📦 to PyPI and TestPyPI
8 | runs-on: ubuntu-latest
9 |
10 | steps:
11 | - uses: actions/checkout@v3
12 | with:
13 | fetch-depth: 0
14 |
15 | - name: Set up Python 3.9
16 | uses: actions/setup-python@v1
17 | with:
18 | python-version: 3.9
19 |
20 | # # remote debugging
21 | # - name: Setup upterm session
22 | # uses: lhotari/action-upterm@v1
23 |
24 | - name: check git info
25 | run: |
26 | git tag -l
27 | git status
28 | git describe --tags --dirty --match "2[0-9]*"
29 |
30 | - name: test and build
31 | run: |
32 | pip install pandas wheel pytest tables build
33 | make test
34 | make wheel
35 |
36 | - name: Publish distribution 📦 to Test PyPI
37 | uses: pypa/gh-action-pypi-publish@master
38 | with:
39 | packages_dir: wheelhouse
40 | verbose: true
41 | skip_existing: true
42 | password: ${{ secrets.TEST_PYPI_API_TOKEN }}
43 | repository_url: https://test.pypi.org/legacy/
44 |
45 | - name: Publish distribution 📦 to PyPI
46 | if: startsWith(github.ref, 'refs/tags')
47 | uses: pypa/gh-action-pypi-publish@master
48 | with:
49 | packages_dir: wheelhouse
50 | verbose: true
51 | skip_existing: true
52 | password: ${{ secrets.PYPI_API_TOKEN }}
53 |
--------------------------------------------------------------------------------
/.github/workflows/stale.yml:
--------------------------------------------------------------------------------
1 | # This workflow warns and then closes issues and PRs that have had no activity for a specified amount of time.
2 | #
3 | # You can adjust the behavior by modifying this file.
4 | # For more information, see:
5 | # https://github.com/actions/stale
6 | name: Mark stale issues and pull requests
7 |
8 | on:
9 | schedule:
10 | - cron: '24 2 * * *'
11 |
12 | jobs:
13 | stale:
14 |
15 | runs-on: ubuntu-latest
16 | permissions:
17 | issues: write
18 | pull-requests: write
19 |
20 | steps:
21 | - uses: actions/stale@v5
22 | with:
23 | repo-token: ${{ secrets.GITHUB_TOKEN }}
24 | stale-issue-message: 'This is a stale issue that will be automatically closed if no action received in 7 days'
25 | stale-pr-message: 'This is a stale PR that will be automatically closed if no action received in 7 days'
26 | stale-issue-label: 'no-issue-activity'
27 | stale-pr-label: 'no-pr-activity'
28 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 | MANIFEST
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 | .pytest_cache/
49 |
50 | # Translations
51 | *.mo
52 | *.pot
53 |
54 | # Django stuff:
55 | *.log
56 | local_settings.py
57 | db.sqlite3
58 |
59 | # Flask stuff:
60 | instance/
61 | .webassets-cache
62 |
63 | # Scrapy stuff:
64 | .scrapy
65 |
66 | # Sphinx documentation
67 | docs/_build/
68 |
69 | # PyBuilder
70 | target/
71 |
72 | # Jupyter Notebook
73 | .ipynb_checkpoints
74 |
75 | # pyenv
76 | .python-version
77 |
78 | # celery beat schedule file
79 | celerybeat-schedule
80 |
81 | # SageMath parsed files
82 | *.sage.py
83 |
84 | # Environments
85 | .env
86 | .venv
87 | env/
88 | venv/
89 | ENV/
90 | env.bak/
91 | venv.bak/
92 |
93 | # Spyder project settings
94 | .spyderproject
95 | .spyproject
96 |
97 | # Rope project settings
98 | .ropeproject
99 |
100 | # mkdocs documentation
101 | /site
102 |
103 | # mypy
104 | .mypy_cache/
105 | warnings.txt
106 | suews_sample.txt
107 | suews_1h.txt
108 | *.lock
109 | *.dirlock
110 |
111 | # dask
112 | dask-worker-space
113 |
114 | # dev space
115 | dev-test/
116 | dev-test
117 | .vscode/launch.json
118 | .vscode/settings.json
119 | _build/
120 |
121 | pipfile.lock
122 | .vscode/spellright.dict
123 |
124 | # supy test logs
125 | SuPy.log.*
126 | SuPy.log
127 |
128 | # pycharm configuration
129 | .idea/
130 |
131 | .DS_Store
132 | src/data_test/*/ERA5_*.txt
133 |
134 | Output/
135 | docs/source/api/supy.util/*.rst
136 | src/supy/supy_version.json
137 | wheelhouse/
138 | src/supy/_version_scm.py
139 |
--------------------------------------------------------------------------------
/.readthedocs.yml:
--------------------------------------------------------------------------------
1 | # .readthedocs.yml
2 |
3 | # Required
4 | version: 2
5 |
6 | build:
7 | os: ubuntu-20.04
8 | tools:
9 | # nodejs: "10"
10 | python: '3.11'
11 | python:
12 | install:
13 | - requirements: docs/requirements.txt
14 | - method: pip
15 | path: src
16 |
17 | formats:
18 | - epub
19 | # - pdf
20 |
21 | sphinx:
22 | builder: html
23 | configuration: docs/source/conf.py
24 | fail_on_warning: false
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | # -*- makefile -*-
2 | .PHONY: main clean test pip supy docs
3 |
4 | # OS-specific configurations
5 | ifeq ($(OS),Windows_NT)
6 | PYTHON_exe = python.exe
7 |
8 | else
9 | UNAME_S := $(shell uname -s)
10 |
11 |
12 | ifeq ($(UNAME_S),Linux) # Linux
13 | PYTHON_exe=python
14 |
15 | endif
16 |
17 | ifeq ($(UNAME_S),Darwin) # macOS
18 | PYTHON_exe=python
19 |
20 | endif
21 |
22 | endif
23 |
24 | src_dir = src
25 | docs_dir = docs
26 |
27 |
28 | PYTHON := $(if $(PYTHON_exe),$(PYTHON_exe),python)
29 | # All the files which include modules used by other modules (these therefore
30 | # need to be compiled first)
31 |
32 | MODULE = supy
33 |
34 | # default make options
35 | main: test
36 | $(MAKE) -C $(src_dir) main
37 | $(MAKE) -C $(docs_dir) html
38 |
39 | # build wheel
40 | wheel:
41 | python -m build src --wheel --outdir wheelhouse
42 |
43 | # house cleaning
44 | clean:
45 | $(MAKE) -C $(src_dir) clean
46 | $(MAKE) -C $(docs_dir) clean
47 |
48 | # make supy and run test cases
49 | test:
50 | $(MAKE) -C $(src_dir) test
51 |
52 | # make docs and open index
53 | docs:
54 | $(MAKE) -C $(docs_dir) html
55 | open $(docs_dir)/build/html/index.html
56 |
57 | # upload wheels to pypi using twine
58 | upload:
59 | $(MAKE) -C $(src_dir) upload
60 |
61 | # make live docs for testing
62 | livehtml:
63 | $(MAKE) -C $(docs_dir) livehtml
64 |
65 | # use cibuildwheel to build wheels for all platforms
66 | cibuid:
67 | pipx run cibuildwheel==2.9.0 --platform macos
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # SuPy
2 |
3 | **This project is merged into [SUEWS](https://github.com/UMEP-dev/SUEWS).**
4 |
5 |
6 | ---------
7 |
8 | [](https://pypi.org/project/supy)
9 | [](https://pypi.org/project/supy)
10 | [](https://pepy.tech/project/supy)
11 | [](https://mybinder.org/v2/gh/UMEP-dev/SuPy/main)
12 |
13 | [](https://supy.readthedocs.io/en/latest/?badge=latest)
14 | [](https://doi.org/10.5281/zenodo.2574404)
15 |
16 |
17 |
18 | [**SU**EWS](https://suews-docs.readthedocs.io) that speaks **Py**thon
19 |
20 | ## Installation
21 |
22 | SuPy requires 64-bit `python` 3.7+ and can be installed with `pip` in command line prompt:
23 |
24 |
25 | ```shell
26 | python3 -m pip install supy --upgrade
27 | ```
28 |
29 | ## Quick Demo
30 |
31 | Once installed, `supy` can be quickly started to get [SUEWS](https://suews-docs.readthedocs.io) simulations done:
32 |
33 | ```python {cmd}
34 | import supy as sp
35 | import matplotlib.pyplot as plt
36 |
37 | #load sample data
38 | df_state_init, df_forcing = sp.load_SampleData()
39 | grid = df_state_init.index[0]
40 |
41 | #run supy/SUEWS simulation
42 | df_output, df_state_end = sp.run_supy(df_forcing, df_state_init)
43 |
44 | #plot results and save figure
45 | res_plot = df_output.SUEWS.loc[grid, ['QN', 'QF', 'QS', 'QE', 'QH']]
46 | ax=res_plot.loc['2012 6 4':'2012 6 6'].resample('30T').mean().plot()
47 | plt.show()
48 | ax.figure.savefig('sample_plot.png')
49 | ```
50 |
51 | The above code will produce a plot of surface energy balance components as follows:
52 |
53 | 
54 |
55 | ## Tutorial
56 |
57 | Please check out [more SuPy tutorials here!](https://supy.readthedocs.io/en/latest/tutorial/tutorial.html)
58 |
59 | ## Installation of the development version of SuPy
60 |
61 | The development version `supy` includes both the `supy` wrapper and its kernel `supy-driver` - the calculation kernel written in Fortran.
62 |
63 | All below is supposed to be in a directory named `supy-dev` - which however you can change.
64 |
65 |
66 | 1. Get both the source code of SUEWS and SuPy
67 |
68 | the development version of SuPy.
69 |
70 | ``` shell
71 | # get the source code of SUEWS and SuPy
72 | git clone --recurse-submodules git@github.com:UMEP-dev/SUEWS.git
73 |
74 | git clone git@github.com:UMEP-dev/SuPy.git
75 |
76 | ```
77 |
78 | 2. Set up the [conda environment](https://conda.io/docs/user-guide/tasks/manage-environments.html)
79 |
80 |
81 | ```shell
82 | conda env create -f SuPy/env.yml
83 | conda activate supy
84 |
85 | ```
86 |
87 | 3. Install the dev version of SuPy
88 |
89 | ```shell
90 |
91 | pip install -e SuPy/src
92 |
93 | ```
94 |
95 |
96 | 4. Compile the kernel `supy-driver`
97 |
98 | ```shell
99 | cd SUEWS/supy-driver
100 | make dev
101 |
102 | ```
103 |
104 | Here you can check if `supy-driver` is installed correctly:
105 |
106 | ```shell
107 | pip show supy-driver
108 | ```
109 | note the `location` field in the output, which should point to the local `supy-driver` directory.
110 |
111 |
112 | Note: after installing the development version of `supy-driver`, you might get the warning about incompatibility, which you can ignore and carry on. However, in case of any issue, please report it [here](https://github.com/UMEP-dev/SuPy/issues/new?assignees=&labels=&template=issue-report.md).
113 |
114 |
115 |
116 | 5. Check the version info of installed supy
117 |
118 | ```shell
119 | pip show supy
120 | ```
121 | note the `location` field in the output, which should point to the local `supy` directory.
122 |
--------------------------------------------------------------------------------
/dev/gen-var_info_yml.py:
--------------------------------------------------------------------------------
1 | # this script is to generate the var_info.yml file
2 | # import common libs
3 | # %%
4 | import json
5 | import yaml
6 | import pandas as pd
7 |
8 | from pathlib import Path
9 |
10 | # %%
11 | # load var info csv files
12 | p_dir_csv = Path("../docs/source/proc_var_info/")
13 | dict_df_var_info = {}
14 | for p_fn_csv in p_dir_csv.glob("*.csv"):
15 | print(p_fn_csv)
16 | df_var_info = pd.read_csv(p_fn_csv)
17 | # print(df_var_info)
18 | dict_df_var_info[p_fn_csv.stem] = df_var_info
19 |
20 | # merge all dataframes
21 | df_var_info = (
22 | pd.concat(
23 | dict_df_var_info,
24 | names=["type"],
25 | # ignore_index=True,
26 | )
27 | .reset_index()
28 | .set_index(["variable", "type"])
29 | .drop(columns="level_1")
30 | .rename(index=lambda x: x.replace("df_", ""))
31 | .reset_index("type")
32 | )
33 |
34 | # debugging info: show all keys with multiple values
35 | for var in df_var_info.index.unique():
36 | len_var = df_var_info.loc[var].shape
37 | if len(len_var) > 1:
38 | if len_var[0] > 1:
39 | df_var = df_var_info.loc[var].dropna(how="all", axis=1).drop_duplicates()
40 | len_var = df_var.shape
41 | print(var, len_var[0])
42 | print(df_var.loc[var].apply(lambda x: x.dropna().unique()))
43 | print()
44 |
45 | df_var_info.loc["DensSnow_Water"].dropna(how="all", axis=1).drop_duplicates()
46 | # %%
47 | # load "loading path" json file
48 | p_fn_json = Path("../src/supy/var2siteselect.json")
49 | with open(p_fn_json, "r") as json_file:
50 | json_data = json.load(json_file)
51 |
52 | # %%
53 | # Create a dictionary with the JSON data stored under new sub-keys
54 | yaml_data = {}
55 | for key in df_var_info.index.unique():
56 | print(key)
57 |
58 | try:
59 | df_var = df_var_info.loc[key].dropna(how="all", axis=1).drop_duplicates()
60 | if df_var["type"][0] == "output":
61 | df_var = df_var.apply(lambda x: x.dropna().unique())
62 | # if length of unique values is 1, then reduce to a scalar
63 | df_var = df_var.apply(lambda x: x[0] if len(x) == 1 else x.tolist())
64 |
65 | print(df_var)
66 | dict_var = df_var.to_dict()
67 | except ValueError:
68 | dict_var = df_var_info.loc[key].dropna().to_dict()
69 | # replace all keys to lower case
70 | yaml_data[key] = {k.lower(): v for k, v in dict_var.items()}
71 | if key in json_data:
72 | yaml_data[key]["loading path"] = json_data[key]
73 |
74 | # re-organise the yaml data
75 | # merge groups under the key 'type'
76 | # remove the key 'group' from the sub-keys
77 | for k, v in yaml_data.items():
78 | dict_var = v
79 | if "group" in dict_var:
80 | dict_var["type"] = {"output": dict_var["group"]}
81 | del dict_var["group"]
82 | else:
83 | dict_var["type"] = {"input": dict_var["type"]}
84 |
85 | # rename the key 'dimensionality' to 'data dimensions':
86 | if "dimensionality" in dict_var:
87 | dict_var["data dimensions"] = [
88 | dict_var["dimensionality"],
89 | {"remarks": dict_var["dimensionality remarks"]},
90 | ]
91 | del dict_var["dimensionality"]
92 | del dict_var["dimensionality remarks"]
93 |
94 | # add a key 'scheme' to setting-related variables
95 | if "physics scheme" not in dict_var and "input" in dict_var["type"]:
96 | if "state" in dict_var["type"]["input"]:
97 | dict_var["physics scheme"] = {
98 | "scheme to add": [
99 | "code 1",
100 | "code 2",
101 | ],
102 | }
103 |
104 | # write the data back to the yaml_data
105 | yaml_data[k] = dict_var
106 |
107 |
108 | # Write the YAML data to a file
109 | with open("var_info.yml", "w") as yaml_file:
110 | yaml.dump(
111 | yaml_data,
112 | yaml_file,
113 | default_flow_style=False,
114 | sort_keys=False,
115 | )
116 | # %%
117 | yaml_data[key]
118 | # %%
119 | sorted(yaml_data)
120 | # %%
121 | import supy as sp
122 | df_state,df_forcing=sp.load_SampleData()
123 | # %%
124 | df_state.shape
125 | # %%
126 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | SPHINXPROJ = supy
8 | SOURCEDIR = source
9 | BUILDDIR = build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | pip install -r requirements.txt
21 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
22 |
23 | livehtml:
24 | sphinx-autobuild -b html "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS)
--------------------------------------------------------------------------------
/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | Sphinx==6.1.3
2 | numpy>=1.15.4
3 | nbsphinx>=0.8.6
4 | nbconvert>=5.4.1
5 | nbformat>=4.4.0
6 | docutils>=0.14
7 | sphinx-autobuild>=0.7.1
8 | sphinx-rtd-theme>=1.2
9 | sphinxcontrib-websupport>=1.1.0
10 | sphinx-comments
11 | sphinx-click
12 | ipykernel>=4.8.2
13 | sympy
14 | jinja2>=2.10
15 | traitlets>=4.3.2
16 | pandas==1.5
17 | supy>=2021.7.22
18 | seaborn
19 | matplotlib
20 | dask[complete]
21 | toolz
22 | urlpath
23 | atmosp>=0.2.8
24 | cdsapi
25 | xarray
26 | scikit-learn
27 | lmfit
28 | floweaver
29 | sphinx-book-theme
30 | sphinx-panels
31 | sphinxcontrib.email
32 | sphinx-last-updated-by-git
33 | chardet
--------------------------------------------------------------------------------
/docs/source/api.rst:
--------------------------------------------------------------------------------
1 | .. _api_ref:
2 |
3 |
4 |
5 | API reference
6 | =============
7 |
8 |
9 | Top-level Functions
10 | -------------------
11 | .. currentmodule:: supy
12 |
13 | .. autosummary::
14 | :toctree: api/supy
15 |
16 | init_supy
17 | load_forcing_grid
18 | run_supy
19 | save_supy
20 | load_SampleData
21 | show_version
22 |
23 |
24 | Utility Functions
25 | -------------------
26 | .. currentmodule:: supy.util
27 |
28 | ERA-5 Data Downloader
29 | ~~~~~~~~~~~~~~~~~~~~~
30 | .. autosummary::
31 | :toctree: api/supy.util
32 |
33 | download_era5
34 | gen_forcing_era5
35 |
36 | Typical Meteorological Year
37 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~
38 | .. autosummary::
39 | :toctree: api/supy.util
40 |
41 | gen_epw
42 | read_epw
43 |
44 | Gap Filling
45 | ~~~~~~~~~~~
46 |
47 | .. autosummary::
48 | :toctree: api/supy.util
49 |
50 | fill_gap_all
51 |
52 | OHM
53 | ~~~~~~~~~~~
54 |
55 | .. autosummary::
56 | :toctree: api/supy.util
57 |
58 | derive_ohm_coef
59 | sim_ohm
60 |
61 | Surface Conductance
62 | ~~~~~~~~~~~~~~~~~~~
63 |
64 | .. autosummary::
65 | :toctree: api/supy.util
66 |
67 | cal_gs_suews
68 | cal_gs_obs
69 | calib_g
70 |
71 | WRF-SUEWS
72 | ~~~~~~~~~
73 |
74 | .. autosummary::
75 | :toctree: api/supy.util
76 |
77 | extract_reclassification
78 | plot_reclassification
79 |
80 | Plotting
81 | ~~~~~~~~
82 |
83 | .. autosummary::
84 | :toctree: api/supy.util
85 |
86 | plot_comp
87 | plot_day_clm
88 | plot_rsl
89 |
90 | Roughness Calculation
91 | ~~~~~~~~~~~~~~~~~~~~~
92 |
93 | .. autosummary::
94 | :toctree: api/supy.util
95 |
96 | cal_z0zd
97 | cal_neutral
98 |
99 |
100 | Command-Line Tools
101 | -------------------
102 | .. toctree::
103 | :maxdepth: 1
104 |
105 | api/supy.cmd/suews-run
106 | api/supy.cmd/suews-convert
107 |
108 |
109 |
110 | Key Data Structures
111 | -------------------
112 |
113 | .. toctree::
114 | :maxdepth: 1
115 |
116 | data-structure/df_state
117 | data-structure/df_forcing
118 | data-structure/df_output
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
--------------------------------------------------------------------------------
/docs/source/api/supy.cmd/suews-convert.rst:
--------------------------------------------------------------------------------
1 |
2 |
3 | .. click:: supy.cmd.table_converter:convert_table_cmd
4 | :prog: suews-convert
--------------------------------------------------------------------------------
/docs/source/api/supy.cmd/suews-run.rst:
--------------------------------------------------------------------------------
1 |
2 |
3 | .. click:: supy.cmd.SUEWS:SUEWS
4 | :prog: suews-run
--------------------------------------------------------------------------------
/docs/source/api/supy.util/supy.util.cal_gs_obs.rst:
--------------------------------------------------------------------------------
1 | supy.util.cal\_gs\_obs
2 | ======================
3 |
4 | .. currentmodule:: supy.util
5 |
6 | .. autofunction:: cal_gs_obs
--------------------------------------------------------------------------------
/docs/source/api/supy.util/supy.util.cal_gs_suews.rst:
--------------------------------------------------------------------------------
1 | supy.util.cal\_gs\_suews
2 | ========================
3 |
4 | .. currentmodule:: supy.util
5 |
6 | .. autofunction:: cal_gs_suews
--------------------------------------------------------------------------------
/docs/source/api/supy.util/supy.util.cal_neutral.rst:
--------------------------------------------------------------------------------
1 | supy.util.cal\_neutral
2 | ======================
3 |
4 | .. currentmodule:: supy.util
5 |
6 | .. autofunction:: cal_neutral
--------------------------------------------------------------------------------
/docs/source/api/supy.util/supy.util.calib_g.rst:
--------------------------------------------------------------------------------
1 | supy.util.calib\_g
2 | ==================
3 |
4 | .. currentmodule:: supy.util
5 |
6 | .. autofunction:: calib_g
--------------------------------------------------------------------------------
/docs/source/api/supy.util/supy.util.derive_ohm_coef.rst:
--------------------------------------------------------------------------------
1 | supy.util.derive\_ohm\_coef
2 | ===========================
3 |
4 | .. currentmodule:: supy.util
5 |
6 | .. autofunction:: derive_ohm_coef
--------------------------------------------------------------------------------
/docs/source/api/supy.util/supy.util.download_era5.rst:
--------------------------------------------------------------------------------
1 | supy.util.download\_era5
2 | ========================
3 |
4 | .. currentmodule:: supy.util
5 |
6 | .. autofunction:: download_era5
--------------------------------------------------------------------------------
/docs/source/api/supy.util/supy.util.extract_reclassification.rst:
--------------------------------------------------------------------------------
1 | supy.util.extract\_reclassification
2 | ===================================
3 |
4 | .. currentmodule:: supy.util
5 |
6 | .. autofunction:: extract_reclassification
--------------------------------------------------------------------------------
/docs/source/api/supy.util/supy.util.fill_gap_all.rst:
--------------------------------------------------------------------------------
1 | supy.util.fill\_gap\_all
2 | ========================
3 |
4 | .. currentmodule:: supy.util
5 |
6 | .. autofunction:: fill_gap_all
--------------------------------------------------------------------------------
/docs/source/api/supy.util/supy.util.gen_epw.rst:
--------------------------------------------------------------------------------
1 | supy.util.gen\_epw
2 | ==================
3 |
4 | .. currentmodule:: supy.util
5 |
6 | .. autofunction:: gen_epw
--------------------------------------------------------------------------------
/docs/source/api/supy.util/supy.util.gen_forcing_era5.rst:
--------------------------------------------------------------------------------
1 | supy.util.gen\_forcing\_era5
2 | ============================
3 |
4 | .. currentmodule:: supy.util
5 |
6 | .. autofunction:: gen_forcing_era5
--------------------------------------------------------------------------------
/docs/source/api/supy.util/supy.util.plot_comp.rst:
--------------------------------------------------------------------------------
1 | supy.util.plot\_comp
2 | ====================
3 |
4 | .. currentmodule:: supy.util
5 |
6 | .. autofunction:: plot_comp
--------------------------------------------------------------------------------
/docs/source/api/supy.util/supy.util.plot_day_clm.rst:
--------------------------------------------------------------------------------
1 | supy.util.plot\_day\_clm
2 | ========================
3 |
4 | .. currentmodule:: supy.util
5 |
6 | .. autofunction:: plot_day_clm
--------------------------------------------------------------------------------
/docs/source/api/supy.util/supy.util.plot_reclassification.rst:
--------------------------------------------------------------------------------
1 | supy.util.plot\_reclassification
2 | ================================
3 |
4 | .. currentmodule:: supy.util
5 |
6 | .. autofunction:: plot_reclassification
--------------------------------------------------------------------------------
/docs/source/api/supy.util/supy.util.plot_rsl.rst:
--------------------------------------------------------------------------------
1 | supy.util.plot\_rsl
2 | ===================
3 |
4 | .. currentmodule:: supy.util
5 |
6 | .. autofunction:: plot_rsl
--------------------------------------------------------------------------------
/docs/source/api/supy.util/supy.util.read_epw.rst:
--------------------------------------------------------------------------------
1 | supy.util.read\_epw
2 | ===================
3 |
4 | .. currentmodule:: supy.util
5 |
6 | .. autofunction:: read_epw
--------------------------------------------------------------------------------
/docs/source/api/supy.util/supy.util.sim_ohm.rst:
--------------------------------------------------------------------------------
1 | supy.util.sim\_ohm
2 | ==================
3 |
4 | .. currentmodule:: supy.util
5 |
6 | .. autofunction:: sim_ohm
--------------------------------------------------------------------------------
/docs/source/api/supy/supy.init_supy.rst:
--------------------------------------------------------------------------------
1 | supy.init\_supy
2 | ===============
3 |
4 | .. currentmodule:: supy
5 |
6 | .. autofunction:: init_supy
--------------------------------------------------------------------------------
/docs/source/api/supy/supy.load_SampleData.rst:
--------------------------------------------------------------------------------
1 | supy.load\_SampleData
2 | =====================
3 |
4 | .. currentmodule:: supy
5 |
6 | .. autofunction:: load_SampleData
--------------------------------------------------------------------------------
/docs/source/api/supy/supy.load_forcing_grid.rst:
--------------------------------------------------------------------------------
1 | supy.load\_forcing\_grid
2 | ========================
3 |
4 | .. currentmodule:: supy
5 |
6 | .. autofunction:: load_forcing_grid
--------------------------------------------------------------------------------
/docs/source/api/supy/supy.run_supy.rst:
--------------------------------------------------------------------------------
1 | supy.run\_supy
2 | ==============
3 |
4 | .. currentmodule:: supy
5 |
6 | .. autofunction:: run_supy
--------------------------------------------------------------------------------
/docs/source/api/supy/supy.save_supy.rst:
--------------------------------------------------------------------------------
1 | supy.save\_supy
2 | ===============
3 |
4 | .. currentmodule:: supy
5 |
6 | .. autofunction:: save_supy
--------------------------------------------------------------------------------
/docs/source/api/supy/supy.show_version.rst:
--------------------------------------------------------------------------------
1 | supy.show\_version
2 | ==================
3 |
4 | .. currentmodule:: supy
5 |
6 | .. autofunction:: show_version
--------------------------------------------------------------------------------
/docs/source/data-structure/df_forcing.rst:
--------------------------------------------------------------------------------
1 |
2 | .. _df_forcing_var:
3 |
4 | ``df_forcing`` variables
5 | ============================
6 |
7 |
8 |
9 | .. note:: Data structure of ``df_forcing`` is explained :ref:`here `.
10 |
11 | .. option:: RH
12 |
13 | :Description:
14 | Relative Humidity [%] (measurement height (`z`) is needed in `SUEWS_SiteSelect.txt`)
15 |
16 |
17 | .. option:: Tair
18 |
19 | :Description:
20 | Air temperature [°C] (measurement height (`z`) is needed in `SUEWS_SiteSelect.txt`)
21 |
22 |
23 | .. option:: U
24 |
25 | :Description:
26 | Wind speed [m s-1] (measurement height (`z`) is needed in `SUEWS_SiteSelect.txt`)
27 |
28 |
29 | .. option:: Wuh
30 |
31 | :Description:
32 | External water use [|m^3|]
33 |
34 |
35 | .. option:: fcld
36 |
37 | :Description:
38 | Cloud fraction [tenths]
39 |
40 |
41 | .. option:: id
42 |
43 | :Description:
44 | Day of year [DOY]
45 |
46 |
47 | .. option:: imin
48 |
49 | :Description:
50 | Minute [M]
51 |
52 |
53 | .. option:: isec
54 |
55 | :Description:
56 | Second [S]
57 |
58 |
59 | .. option:: it
60 |
61 | :Description:
62 | Hour [H]
63 |
64 |
65 | .. option:: iy
66 |
67 | :Description:
68 | Year [YYYY]
69 |
70 |
71 | .. option:: kdiff
72 |
73 | :Description:
74 | Diffuse radiation [W |m^-2|] |Recmd| if `SOLWEIGUse` = 1
75 |
76 |
77 | .. option:: kdir
78 |
79 | :Description:
80 | Direct radiation [W |m^-2|] |Recmd| if `SOLWEIGUse` = 1
81 |
82 |
83 | .. option:: kdown
84 |
85 | :Description:
86 | Incoming shortwave radiation [W |m^-2|] Must be > 0 W |m^-2|.
87 |
88 |
89 | .. option:: lai
90 |
91 | :Description:
92 | Observed leaf area index [|m^-2| |m^-2|]
93 |
94 |
95 | .. option:: ldown
96 |
97 | :Description:
98 | Incoming longwave radiation [W |m^-2|]
99 |
100 |
101 | .. option:: pres
102 |
103 | :Description:
104 | Barometric pressure [kPa] (measurement height (`z`) is needed in `SUEWS_SiteSelect.txt`)
105 |
106 |
107 | .. option:: qe
108 |
109 | :Description:
110 | Latent heat flux [W |m^-2|]
111 |
112 |
113 | .. option:: qf
114 |
115 | :Description:
116 | Anthropogenic heat flux [W |m^-2|]
117 |
118 |
119 | .. option:: qh
120 |
121 | :Description:
122 | Sensible heat flux [W |m^-2|]
123 |
124 |
125 | .. option:: qn
126 |
127 | :Description:
128 | Net all-wave radiation [W |m^-2|] (Required if `NetRadiationMethod` = 0.)
129 |
130 |
131 | .. option:: qs
132 |
133 | :Description:
134 | Storage heat flux [W |m^-2|]
135 |
136 |
137 | .. option:: rain
138 |
139 | :Description:
140 | Rainfall [mm] (measurement height (`z`) is needed in `SUEWS_SiteSelect.txt`)
141 |
142 |
143 | .. option:: snow
144 |
145 | :Description:
146 | Snow cover fraction (0 – 1) [-] (Required if `SnowUse` = 1)
147 |
148 |
149 | .. option:: wdir
150 |
151 | :Description:
152 | Wind direction [°] |NotAvail|
153 |
154 |
155 | .. option:: xsmd
156 |
157 | :Description:
158 | Observed soil moisture [|m^3| |m^-3|] or [kg |kg^-1|]
159 |
160 |
--------------------------------------------------------------------------------
/docs/source/data-structure/supy-io.py:
--------------------------------------------------------------------------------
1 | #%% Change working directory from the workspace root to the ipynb file location. Turn this addition off with the DataScience.changeDirOnImportExport setting
2 | import os
3 | try:
4 | os.chdir(os.path.join(os.getcwd(), 'docs/source/data-structure'))
5 | print(os.getcwd())
6 | except:
7 | pass
8 | #%% [markdown]
9 | # # Key IO Data Structures in SuPy
10 | #%% [markdown]
11 | # ## Introduction
12 | #%% [markdown]
13 | # The Cell below demonstrates a minimal case of SuPy simulation with all key IO data structures included:
14 |
15 | #%%
16 | import supy as sp
17 | df_state_init, df_forcing = sp.load_SampleData()
18 | df_output, df_state_final = sp.run_supy(df_forcing.iloc[:288], df_state_init)
19 |
20 | #%% [markdown]
21 | # * Input:
22 | # * `df_state_init`: model initial states
23 | # * `df_forcing`: forcing data
24 | # * Output:
25 | # * `df_state_final`: model final states
26 | # * `df_output`: model output results
27 |
28 |
29 | #%% [markdown]
30 | # ## Input
31 | #%% [markdown]
32 | # ### `df_state_init`: model initial states
33 |
34 | #%%
35 | df_state_init.head()
36 |
37 | #%% [markdown]
38 | # ### `df_forcing`: forcing data
39 |
40 | #%%
41 | df_forcing.head()
42 |
43 | #%% [markdown]
44 | # ## Output
45 | #%% [markdown]
46 | # ### `df_state_final`: model final states
47 |
48 | #%%
49 | df_state_final.head()
50 |
51 | #%% [markdown]
52 | # ### `df_output`: model output results
53 |
54 | #%%
55 | df_output.head()
56 |
57 | #%% [markdown]
58 | # [test-link-object: ah_slope_cooling](df_state.rst#cmdoption-arg-ah-slope-cooling)
59 |
60 |
--------------------------------------------------------------------------------
/docs/source/faq.rst:
--------------------------------------------------------------------------------
1 | .. _faq:
2 |
3 |
4 | FAQ
5 | ===
6 |
7 | .. contents:: Contents
8 | :local:
9 | :backlinks: none
10 |
11 | I cannot install SuPy following the docs, what is wrong there?
12 | ----------------------------------------------------------------
13 |
14 | please check if your environment meets the following requirements:
15 |
16 | 1. Operating system (OS):
17 |
18 | a. is it 64 bit? only 64 bit systems are supported.
19 |
20 | b. is your OS up to date? only recent desktop systems are supported:
21 |
22 | - Windows 10 and above
23 | - macOS 10.13 and above
24 | - Linux: no restriction;
25 | If SuPy cannot run on your specific Linux distribution,
26 | please report it to us.
27 |
28 | You can get the OS information with the following code:
29 |
30 | .. code-block:: python
31 |
32 | import platform
33 | platform.platform()
34 |
35 | 2. Python interpreter:
36 |
37 | a. is your Python interpreter 64 bit?
38 |
39 | Check running mode with the following code:
40 |
41 | .. code-block:: python
42 |
43 | import struct
44 | struct.calcsize('P')*8
45 |
46 | b. is your Python version above 3.5?
47 |
48 | Check version info with the following code:
49 |
50 | .. code-block:: python
51 |
52 | import sys
53 | sys.version
54 |
55 | If your environment doesn't meet the requirement by SuPy,
56 | please use a proper environment;
57 | otherwise, `please report your issue`__.
58 |
59 | __ new_issue_
60 |
61 | How do I know which version of SuPy I am using?
62 | -----------------------------------------------
63 |
64 | Use the following code:
65 |
66 | .. code-block:: python
67 |
68 | import supy
69 | supy.show_version()
70 |
71 | .. note:: `show_version` is only available after v2019.5.28.
72 |
73 |
74 |
75 | A `kernel may have died` exception happened, where did I go wrong?
76 | ------------------------------------------------------------------
77 |
78 | The issue is highly likely due to invalid input to SuPy and SUEWS kernel.
79 | We are trying to avoid such exceptions,
80 | but unfortunately they might happen in some edge cases.
81 |
82 | Please `report such issues to us`__ with your input files for debugging.
83 | Thanks!
84 |
85 | __ GitHub Issues_
86 |
87 |
88 | How can I upgrade SuPy to an up-to-date version?
89 | ------------------------------------------------
90 | Run the following code in your terminal:
91 |
92 | .. code-block:: python
93 |
94 | python3 -m pip install supy --upgrade
95 |
96 |
97 | How to deal with ``KeyError`` when trying to load initial model states?
98 | -----------------------------------------------------------------------
99 |
100 | Please see :issue:`28`
101 |
102 |
103 |
--------------------------------------------------------------------------------
/docs/source/index.rst:
--------------------------------------------------------------------------------
1 | .. _index_page:
2 |
3 | SuPy: SUEWS that speaks Python
4 | ------------------------------
5 |
6 | .. image:: https://img.shields.io/pypi/pyversions/supy.svg
7 | :target: https://pypi.org/project/supy
8 | :alt: Python Version Support Status
9 |
10 | .. image:: https://img.shields.io/pypi/v/supy.svg
11 | :target: https://pypi.org/project/supy
12 | :alt: Latest Version Status
13 |
14 | .. image:: https://pepy.tech/badge/supy
15 | :target: https://pepy.tech/project/supy
16 | :alt: Downloads
17 |
18 | .. image:: https://mybinder.org/badge_logo.svg
19 | :target: https://mybinder.org/v2/gh/UMEP-dev/SuPy/main
20 | :alt: Binder Status
21 |
22 | .. image:: https://readthedocs.org/projects/supy/badge/?version=latest
23 | :target: https://supy.readthedocs.io/en/latest/?badge=latest
24 | :alt: Documentation Status
25 |
26 | .. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.2574404.svg
27 | :target: https://doi.org/10.5281/zenodo.2574404
28 | :alt: DOI
29 |
30 |
31 |
32 | - **What is SuPy?**
33 |
34 | SuPy is a Python-enhanced urban climate model
35 | with `SUEWS `_ as its computation core.
36 |
37 | The scientific rigour in SuPy results is thus gurranteed by SUEWS
38 | (see :ref:`SUEWS publications ` and
39 | :ref:`Parameterisations and sub-models within SUEWS`).
40 |
41 | Meanwhile, the data analysis ability of SuPy is greatly enhanced
42 | by `the Python-based SciPy Stack `_,
43 | notably `numpy `_ and
44 | `pandas `_.
45 | More details are described in `our SuPy paper `_.
46 |
47 |
48 |
49 | - **How to get SuPy?**
50 |
51 | SuPy is available on all major platforms (macOS, Windows, Linux) for Python 3.7+ (64-bit only)
52 | via `PyPI `_:
53 |
54 | .. code-block:: shell
55 |
56 | python3 -m pip install supy --upgrade
57 |
58 | - **How to use SuPy?**
59 |
60 | * Please follow :ref:`Quickstart of SuPy` and :ref:`other tutorials `.
61 |
62 | * Please see `api` for details.
63 |
64 | * Please see `faq` if any issue.
65 |
66 | - **How to contribute to SuPy?**
67 |
68 | * Add your development via `Pull Request `_
69 | * Report issues via the `GitHub page `_.
70 | * Cite `our SuPy paper `_.
71 | * Provide `suggestions and feedback `_.
72 |
73 | .. toctree::
74 | :hidden:
75 | :maxdepth: 2
76 |
77 | tutorial/tutorial
78 | data-structure/supy-io
79 | api
80 | faq
81 | version-history
82 |
83 |
--------------------------------------------------------------------------------
/docs/source/proc_var_info/df_forcing.csv:
--------------------------------------------------------------------------------
1 | variable,Description
2 | iy,Year [YYYY]
3 | id,Day of year [DOY]
4 | it,Hour [H]
5 | imin,Minute [M]
6 | qn,Net all-wave radiation [W |m^-2|] (Required if `NetRadiationMethod` = 0.)
7 | qh,Sensible heat flux [W |m^-2|]
8 | qe,Latent heat flux [W |m^-2|]
9 | qs,Storage heat flux [W |m^-2|]
10 | qf,Anthropogenic heat flux [W |m^-2|]
11 | U,Wind speed [m s-1] (measurement height (`z`) is needed in `SUEWS_SiteSelect.txt`)
12 | RH,Relative Humidity [%] (measurement height (`z`) is needed in `SUEWS_SiteSelect.txt`)
13 | Tair,Air temperature [°C] (measurement height (`z`) is needed in `SUEWS_SiteSelect.txt`)
14 | pres,Barometric pressure [kPa] (measurement height (`z`) is needed in `SUEWS_SiteSelect.txt`)
15 | rain,Rainfall [mm] (measurement height (`z`) is needed in `SUEWS_SiteSelect.txt`)
16 | kdown,Incoming shortwave radiation [W |m^-2|] Must be > 0 W |m^-2|.
17 | snow,Snow cover fraction (0 – 1) [-] (Required if `SnowUse` = 1)
18 | ldown,Incoming longwave radiation [W |m^-2|]
19 | fcld,Cloud fraction [tenths]
20 | Wuh,External water use [|m^3|]
21 | xsmd,Observed soil moisture [|m^3| |m^-3|] or [kg |kg^-1|]
22 | lai,Observed leaf area index [|m^-2| |m^-2|]
23 | kdiff,Diffuse radiation [W |m^-2|] |Recmd| if `SOLWEIGUse` = 1
24 | kdir,Direct radiation [W |m^-2|] |Recmd| if `SOLWEIGUse` = 1
25 | wdir,Wind direction [°] |NotAvail|
26 | isec,Second [S]
27 |
--------------------------------------------------------------------------------
/docs/source/proc_var_info/gen_df_forcing_output_csv.py:
--------------------------------------------------------------------------------
1 | # %% Change working directory from the workspace root to the ipynb file location. Turn this addition off with the DataSciece.changeDirOnImportExport setting
2 |
3 | import os
4 |
5 | try:
6 | os.chdir(os.path.join(os.getcwd(), "docs/proc_var_info"))
7 | print(os.getcwd())
8 | except:
9 | pass
10 |
11 | # %%
12 | from urlpath import URL
13 | from pathlib import Path
14 | import numpy as np
15 | import pandas as pd
16 | import supy as sp
17 | import os
18 |
19 |
20 | os.getcwd()
21 | # %% sample run
22 | print("loading in", "gen_df_forcing", "...")
23 | df_state_init_sample, df_forcing_sample = sp.load_SampleData()
24 | df_output_sample, df_state_end_sample = sp.run_supy(
25 | df_forcing_sample.iloc[:10], df_state_init_sample
26 | )
27 | print("loading in", "gen_df_forcing", "...")
28 |
29 | # %% [markdown]
30 | # ## generate forcing related dataframe
31 | # %%
32 | # ### load `SUEWS_***.txt` related tables
33 | from nml_rst_proc import url_repo_base, url_repo_input
34 |
35 | url_repo_output = URL(url_repo_base) / "output_files"
36 |
37 |
38 | def gen_df_forcing(
39 | path_csv_in="SSss_YYYY_data_tt.csv", url_base=url_repo_input,
40 | ) -> pd.DataFrame:
41 | """Generate description info of supy forcing data into a dataframe
42 |
43 | Parameters
44 | ----------
45 | path_csv_in : str, optional
46 | path to the input csv file relative to url_base (the default is '/input_files/SSss_YYYY_data_tt.csv'])
47 | url_base : urlpath.URL, optional
48 | URL to the input files of repo base (the default is url_repo_input, which is defined at the top of this file)
49 |
50 | Returns
51 | -------
52 | pd.DataFrame
53 | Description info of supy forcing data
54 | """
55 |
56 | try:
57 | # load info from SUEWS docs repo
58 | # this is regarded as the official source
59 | urlpath_table = url_base / path_csv_in
60 | df_var_info = pd.read_csv(urlpath_table)
61 | except:
62 | print(f"{urlpath_table} not existing!")
63 | else:
64 | # clean info dataframe
65 | df_var_forcing = df_var_info.drop(["No.", "Use"], axis=1)
66 |
67 | # set index with `Column name`
68 | df_var_forcing = df_var_forcing.set_index("Column Name")
69 | df_var_forcing.index = df_var_forcing.index.map(
70 | lambda x: x.replace("`", "")
71 | ).rename("variable")
72 |
73 | # add `Second` info
74 | df_var_forcing.loc["isec"] = "Second [S]"
75 |
76 | return df_var_forcing
77 |
78 |
79 | # %% [markdown]
80 | # ## generate output related dataframe
81 |
82 |
83 | # %%
84 | def gen_df_output(
85 | list_csv_in=[
86 | "SSss_YYYY_SUEWS_TT.csv",
87 | "SSss_DailyState.csv",
88 | "SSss_YYYY_snow_TT.csv",
89 | "SSss_YYYY_RSL_TT.csv",
90 | "SSss_YYYY_BEERS_TT.csv",
91 | ],
92 | url_base=url_repo_output,
93 | ) -> Path:
94 | """Generate description info of supy output results into dataframe
95 |
96 | Parameters
97 | ----------
98 | list_csv_in : list, optional
99 | list of file names for csv files with meta info (the default is ['SSss_YYYY_SUEWS_TT.csv','SSss_DailyState.csv','SSss_YYYY_snow_TT.csv',], which [default_description])
100 | url_base : [type], optional
101 | URL to the output dir of repo base (the default is url_repo_output, which is defined at the top of this file)
102 |
103 | Returns
104 | -------
105 | pd.DataFrame
106 | Description info of supy output results
107 | """
108 |
109 | # list of URLs
110 | list_url_table = [url_base / table for table in list_csv_in]
111 | try:
112 | df_var_info = pd.concat([pd.read_csv(f) for f in list_url_table], sort=False)
113 | except:
114 | for url in list_url_table:
115 | if not url.get().ok:
116 | print(f"{url} not existing!")
117 | else:
118 | # clean meta info
119 | df_var_info_x = (
120 | df_var_info.set_index("Name").loc[:, ["Description"]].drop_duplicates()
121 | )
122 |
123 | df_var_output = (
124 | df_var_info_x.copy()
125 | .assign(lower=df_var_info_x.index.str.lower())
126 | .reset_index()
127 | .set_index("lower")
128 | )
129 |
130 | df_var_group = df_output_sample.columns.to_frame()
131 | df_var_group.index = df_var_group.index.droplevel(0).rename("Name")
132 |
133 | # wrap into a dataframe
134 | df_var_output = (
135 | df_var_group.merge(
136 | df_var_output.set_index("Name"), left_on="Name", right_on="Name"
137 | )
138 | .rename(columns={"var": "variable", "group": "Group",})
139 | .set_index("variable")
140 | .drop_duplicates()
141 | )
142 |
143 | return df_var_output
144 |
145 |
146 | # %% [markdown]
147 | # ## generate csv files for meta info
148 | # %%
149 | # df_forcing=gen_df_forcing('SSss_YYYY_data_tt.csv')
150 |
151 | # df_output=gen_df_output(
152 | # [
153 | # 'SSss_YYYY_SUEWS_TT.csv',
154 | # 'SSss_DailyState.csv',
155 | # 'SSss_YYYY_snow_TT.csv',
156 | # ],
157 | # )
158 |
159 |
160 | # # %%
161 | # df_forcing.head()
162 |
163 |
164 | # #%%
165 | # df_output.head()
166 |
167 |
168 | # #%%
169 |
--------------------------------------------------------------------------------
/docs/source/proc_var_info/gen_rst.py:
--------------------------------------------------------------------------------
1 | # %% Change working directory from the workspace root to the ipynb file location. Turn this addition off with the DataSciece.changeDirOnImportExport setting
2 | import os
3 |
4 | try:
5 | os.chdir(os.path.join(os.getcwd(), "docs/proc_var_info"))
6 | print(os.getcwd())
7 | except:
8 | pass
9 |
10 | # %%
11 | from pathlib import Path
12 | import pandas as pd
13 | import supy as sp
14 | import os
15 |
16 | try:
17 | os.chdir(os.path.join(os.getcwd(), "docs/proc_var_info"))
18 | print(os.getcwd())
19 | except:
20 | pass
21 | # get_ipython().run_line_magic('load_ext', 'autoreload')
22 | # get_ipython().run_line_magic('autoreload', '2')
23 |
24 |
25 | # %%
26 | from gen_df_state_csv import (
27 | gen_df_state,
28 | list_table,
29 | set_initcond,
30 | set_runcontrol,
31 | set_input_runcontrol,
32 | )
33 | from gen_df_forcing_output_csv import gen_df_forcing, gen_df_output
34 |
35 | # %% [markdown]
36 | # # generate option rst files
37 | # %% [markdown]
38 | # ## generate dataframes for variable groups
39 |
40 | # %%
41 | print("generating df_state.csv ...")
42 | df_state = gen_df_state(
43 | list_table,
44 | set_initcond,
45 | set_runcontrol,
46 | set_input_runcontrol,
47 | )
48 | df_state.to_csv("df_state.csv")
49 | print("df_state.csv done!")
50 |
51 |
52 | # #%%
53 | # get_ipython().run_line_magic('load_ext', 'snakeviz')
54 | # get_ipython().run_line_magic('snakeviz', 'gen_df_state(list_table, set_initcond, set_runcontrol, set_input_runcontrol)')
55 |
56 |
57 | # %%
58 | print("generating df_forcing.csv ...")
59 | df_forcing = gen_df_forcing("SSss_YYYY_data_tt.csv")
60 | df_forcing.to_csv("df_forcing.csv")
61 | print("df_forcing.csv done!")
62 |
63 |
64 | # %%
65 | print("generating df_output.csv ...")
66 | df_output = gen_df_output(
67 | [
68 | "SSss_YYYY_SUEWS_TT.csv",
69 | "SSss_DailyState.csv",
70 | "SSss_YYYY_snow_TT.csv",
71 | "SSss_YYYY_RSL_TT.csv",
72 | "SSss_YYYY_SOLWEIG_TT.csv",
73 | ],
74 | )
75 | df_output.to_csv("df_output.csv")
76 | print("df_output.csv done!")
77 |
78 | # %% [markdown]
79 | # ## generate option string for rst option file
80 |
81 | # %%
82 |
83 |
84 | def gen_opt_str(ser_rec: pd.Series) -> str:
85 | """generate rst option string
86 |
87 | Parameters
88 | ----------
89 | ser_rec : pd.Series
90 | record for specifications
91 |
92 | Returns
93 | -------
94 | str
95 | rst string
96 | """
97 |
98 | name = ser_rec.name
99 | indent = r" "
100 | str_opt = f".. option:: {name}" + "\n\n"
101 | for spec in ser_rec.sort_index().index:
102 | str_opt += indent + f":{spec}:" + "\n"
103 | spec_content = ser_rec[spec]
104 | str_opt += indent + indent + f"{spec_content}" + "\n"
105 | return str_opt
106 |
107 |
108 | # xx=df_var_info.set_index('variable').iloc[10]
109 | # print(gen_opt_str(xx))
110 |
111 |
112 | # %%
113 | def gen_rst(path_rst, path_df_csv, rst_title):
114 | df_var_info = pd.read_csv(path_df_csv).set_index("variable")
115 | df_var_info["rst"] = df_var_info.copy().apply(gen_opt_str, axis=1)
116 | df_var_info = df_var_info.sort_index().reset_index(drop=True)
117 | rst_txt_x = "\n\n".join(df_var_info.rst)
118 | rst_txt = "\n".join([rst_title, rst_txt_x])
119 | with open(path_rst, "w") as f:
120 | print(rst_txt, file=f)
121 |
122 | return path_rst
123 |
124 |
125 | # gen_rst(
126 | # '../source/data-structure/test.rst',
127 | # df_state,
128 | # 'xx\n')
129 |
130 |
131 | # %%
132 | def gen_group_dict(group, path_rst_base=Path("../data-structure/")) -> dict:
133 | """generate dict of rst strings for df groups."""
134 |
135 | rst_title = f"""
136 | .. _df_{group}_var:
137 |
138 | ``df_{group}`` variables
139 | ============================
140 |
141 |
142 | """
143 | dict_info_group = {
144 | "output": "/data-structure/supy-io.ipynb#df_output:-model-output-results",
145 | "forcing": "/data-structure/supy-io.ipynb#df_forcing:-forcing-data",
146 | "state": "/data-structure/supy-io.ipynb#df_state_init:-model-initial-states",
147 | }
148 | rst_info_group = f"""
149 | .. note:: Data structure of ``df_{group}`` is explained :ref:`here <{dict_info_group[group]}>`.
150 | """
151 |
152 | dict_group = {
153 | "path_rst": path_rst_base / ("df_" + group + ".rst"),
154 | "path_df_csv": "df_" + group + ".csv",
155 | "rst_title": rst_title + rst_info_group,
156 | }
157 |
158 | return dict_group
159 |
160 |
161 | # print(gen_group_dict('state'))
162 |
163 |
164 | # %%
165 |
166 | dict_rst_out = {
167 | group: gen_group_dict(group) for group in ["state", "forcing", "output"]
168 | }
169 | # dict_rst_out
170 |
171 |
172 | # %%
173 | for group in dict_rst_out:
174 | print("working on group:", group)
175 | print("file generated:", gen_rst(**dict_rst_out[group]), "\n")
176 |
--------------------------------------------------------------------------------
/docs/source/proc_var_info/nml_rst_proc.py:
--------------------------------------------------------------------------------
1 | # %%
2 | import webbrowser
3 | import os
4 | import supy as sp
5 | import numpy as np
6 | from urlpath import URL
7 | import pandas as pd
8 | from pathlib import Path
9 |
10 | os.getcwd()
11 |
12 | # %%
13 | # code version
14 | tag_suews_ver = sp.__version_driver__
15 |
16 | print(f"supy_driver version required: {tag_suews_ver}")
17 |
18 | # list of useful URLs
19 | def gen_url_base(tag):
20 | url_repo_base = URL(
21 | "https://github.com/"
22 | + "UMEP-dev/"
23 | + f"SUEWS/raw/{tag}/docs/source"
24 | )
25 | return url_repo_base
26 |
27 |
28 | url_repo_base = (
29 | gen_url_base(tag_suews_ver)
30 | if gen_url_base(tag_suews_ver).get().ok
31 | else gen_url_base("master")
32 | )
33 | print(f'''
34 | =================================
35 | SUEWS docs source: {url_repo_base}
36 | =================================
37 | ''')
38 |
39 | url_repo_input = URL(url_repo_base) / "input_files"
40 |
41 | dict_base = {
42 | "docs": URL("https://suews.readthedocs.io/en/latest/input_files/"),
43 | "github": url_repo_input,
44 | }
45 |
46 | # %% [markdown]
47 | # ### filter input variables
48 | # %%
49 | set_input = sp._load.set_var_input.copy()
50 | set_input.update(sp._load.set_var_input_multitsteps)
51 | df_init_sample, df_forcing_sample = sp.load_SampleData()
52 | set_input.difference_update(set(df_forcing_sample.columns))
53 | # set_input, len(set_input)
54 |
55 | # %% [markdown]
56 | # #### retrieve SUEWS-related variables
57 |
58 | # %%
59 | dict_var2SiteSelect = sp._load.dict_var2SiteSelect
60 |
61 | dict_var_full = sp._load.exp_dict_full(dict_var2SiteSelect)
62 |
63 |
64 | def extract_var_suews(dict_var_full, k):
65 | x = sp._load.flatten_list(dict_var_full[k])
66 | x = np.unique(x)
67 | x = [
68 | xx
69 | for xx in x
70 | if xx not in ["base", "const", "0.0"] + [str(x) for x in range(24)]
71 | ]
72 | x = [xx for xx in x if "Code" not in xx]
73 | return x
74 |
75 |
76 | dict_var_ref_suews = {k: extract_var_suews(dict_var_full, k) for k in dict_var_full}
77 |
78 | df_var_ref_suews = pd.DataFrame(
79 | {k: ", ".join(dict_var_ref_suews[k]) for k in dict_var_ref_suews}, index=[0]
80 | ).T.rename({0: "SUEWS-related variables"}, axis=1)
81 |
82 | ser_input_site_exp = (
83 | df_var_ref_suews.filter(items=set_input, axis=0)
84 | .loc[:, "SUEWS-related variables"]
85 | .str.lower()
86 | .str.split(",")
87 | )
88 |
89 | set_site = set(x.lower().strip() for x in np.concatenate(ser_input_site_exp.values))
90 |
91 | # set_site, len(set_site)
92 | # %% [markdown]
93 | # ### filter `runcontrol` related variables
94 | # %%
95 | # runcontrol variables for supy input
96 | path_runcontrol = sp._env.trv_supy_module / "sample_run" / "Runcontrol.nml"
97 | dict_runcontrol = sp._load.load_SUEWS_dict_ModConfig(path_runcontrol).copy()
98 | set_runcontrol = set(dict_runcontrol.keys())
99 | set_input_runcontrol = set_runcontrol.intersection(set_input)
100 |
101 | print(f'''
102 | ============================================================
103 | set_input_runcontrol has {len(set_input_runcontrol)} variables:
104 | ''')
105 | for var in set_input_runcontrol:
106 | print(var)
107 | print(f'''
108 | ============================================================
109 | ''')
110 |
111 | # %% [markdown]
112 | # ### filter `initialcondition` related variables
113 | # %%
114 | # initcond variables for supy input
115 | dict_initcond = sp._load.dict_InitCond_default.copy()
116 | set_initcond = set(dict_initcond.keys())
117 | set_input_initcond = set_initcond.intersection(set_input)
118 | # set_input_initcond, len(set_input_initcond)
119 |
120 |
121 | # %% [markdown]
122 | # ### functions to process `nml` related variables
123 | # %%
124 | def form_option(str_opt):
125 | """generate option name based suffix for URL
126 |
127 | :param str_opt: opt name
128 | :type str_opt: str
129 | :return: URL suffix for the specified option
130 | :rtype: str
131 | """
132 |
133 | str_base = "#cmdoption-arg-"
134 | str_opt_x = str_base + str_opt.lower().replace("_", "-").replace("(", "-").replace(
135 | ")", ""
136 | )
137 | return str_opt_x
138 |
139 |
140 | # form_option('snowinitially')
141 |
142 |
143 | # %%
144 | def choose_page(
145 | str_opt,
146 | set_site=set_site,
147 | set_runcontrol=set_runcontrol,
148 | set_initcond=set_initcond,
149 | source="docs",
150 | ):
151 | # print('str_opt', str_opt)
152 | suffix_page = "html" if source == "docs" else "rst"
153 | # runcontrol variable:
154 | if str_opt in set_runcontrol:
155 | str_base = "RunControl"
156 | if str_opt.startswith("tstep"):
157 | name_page = "Time_related_options"
158 | else:
159 | name_page = "scheme_options"
160 |
161 | # initcondition variable:
162 | elif str_opt in set_initcond:
163 | str_base = "Initial_Conditions"
164 | # the following test sequence is IMPORTANT!
165 | if str_opt.startswith("soilstore"):
166 | name_page = "Soil_moisture_states"
167 | elif str_opt.startswith("snow"):
168 | name_page = "Snow_related_parameters"
169 | elif str_opt.endswith("state"):
170 | name_page = "Above_ground_state"
171 | elif str_opt in ("dayssincerain", "temp_c0"):
172 | name_page = "Recent_meteorology"
173 | else:
174 | name_page = "Vegetation_parameters"
175 |
176 | # site characteristics variable:
177 | elif str_opt in set_site:
178 | str_base = "SUEWS_SiteInfo"
179 | name_page = "Input_Options"
180 |
181 | # defaults to empty strings
182 | else:
183 | str_base = ""
184 | name_page = ""
185 |
186 | str_page = ".".join([name_page, suffix_page])
187 | str_page_full = str_base + "/" + str_page
188 | return str_page_full
189 |
190 |
191 | # for source in ['docs','github']:
192 | # print(source)
193 | # for x in sorted(list(set_site)+list(set_runcontrol)+list(set_initcond)):
194 | # print()
195 | # print(choose_page(x, source=source))
196 | # choose_page('tstep', set_site, set_runcontrol, set_initcond)
197 | # choose_page('snowinitially', set_site, set_runcontrol, set_initcond)
198 |
199 |
200 | # %%
201 | def gen_url_option(
202 | str_opt,
203 | set_site=set_site,
204 | set_runcontrol=set_runcontrol,
205 | set_initcond=set_initcond,
206 | source="docs",
207 | ):
208 | """construct a URL for option based on source
209 |
210 | :param str_opt: option name, defaults to ''
211 | :param str_opt: str, optional
212 | :param source: URL source: 'docs' for readthedocs.org; 'github' for github repo, defaults to 'docs'
213 | :param source: str, optional
214 | :return: a valid URL pointing to the option related resources
215 | :rtype: urlpath.URL
216 | """
217 |
218 | url_base = dict_base[source]
219 |
220 | url_page = choose_page(
221 | str_opt, set_site, set_runcontrol, set_initcond, source=source
222 | )
223 | # print('str_opt', str_opt, url_base, url_page)
224 | str_opt_x = form_option(str_opt)
225 | url_opt = url_base / (url_page + str_opt_x)
226 | return url_opt
227 |
228 |
229 | # for source in [
230 | # # 'docs',
231 | # 'github',
232 | # ]:
233 | # print(source)
234 | # for x in sorted(list(set_site)+list(set_runcontrol)+list(set_initcond)):
235 | # print()
236 | # print(gen_url_option(x, source=source))
237 |
238 | # # webbrowser.open(str(gen_url_option(x, source=source)))
239 |
240 | # gen_url_option('sss', source='github')
241 | # gen_url_option('sss', source='github').get().ok
242 | # %%
243 | # test connectivity of all generated option URLs
244 | # for opt in list(set_initcond)+list(set_runcontrol):
245 | # for source in ['github', 'docs']:
246 | # url = gen_url_option(opt, source=source)
247 | # if not url.get().ok:
248 | # print()
249 | # print(opt)
250 | # print(url)
251 |
252 | # %%
253 | def parse_block(block):
254 | xx = block.reset_index(drop=True)
255 | name_block = xx.loc[0].replace(".. option::", "").strip()
256 | ind_field = xx.index[xx.str.startswith(":")]
257 | list_field = [
258 | xx.iloc[slice(*x)].str.strip().reset_index(drop=True)
259 | for x in zip(ind_field, list(ind_field[1:]) + [None])
260 | ]
261 | name_field = [field.loc[0].replace(":", "") for field in list_field]
262 | content_field = [field.loc[1:].str.join("") for field in list_field]
263 | ser_field = pd.Series(
264 | {field.loc[0].replace(":", ""): " ".join(field.loc[1:]) for field in list_field}
265 | ).rename(name_block)
266 | return ser_field
267 |
268 |
269 | def parse_option_rst(path_rst):
270 | ser_opts = pd.read_csv(path_rst, sep=r"\n", skipinitialspace=True, engine="python")
271 | ser_opts = ser_opts.iloc[:, 0]
272 | ind_opt = ser_opts.index[ser_opts.str.contains(".. option::")]
273 | ser_opt_name = ser_opts[ind_opt].str.replace(".. option::", "").str.strip()
274 | list_block_opt = [
275 | ser_opts.loc[slice(*x)] for x in zip(ind_opt, list(ind_opt[1:]) + [None])
276 | ]
277 | df_opt = pd.concat([parse_block(block) for block in list_block_opt], axis=1).T
278 | return df_opt
279 |
280 |
281 | # url_test = gen_url_option('pavedstate', source='github')
282 | # parse_option_rst(url_test)
283 |
284 |
285 | # %%
286 |
--------------------------------------------------------------------------------
/docs/source/sample_run:
--------------------------------------------------------------------------------
1 | ../../src/supy/sample_run
--------------------------------------------------------------------------------
/docs/source/tutorial/tutorial.rst:
--------------------------------------------------------------------------------
1 | .. _tutorial_index:
2 |
3 | Tutorials
4 | =========
5 |
6 |
7 | To familiarise users with SuPy urban climate modelling and to demonstrate the functionality of SuPy, we provide the following tutorials in `Jupyter notebooks `_:
8 |
9 | .. toctree::
10 | :maxdepth: 1
11 |
12 | quick-start
13 | impact-studies
14 | external-interaction
15 | setup-own-site
16 |
17 | .. Note::
18 | 1. The Anaconda distribution is suggested as the scientific Python 3 environment for its completeness in necessary packages. Please follow the official guide for its `installation `__.
19 | 2. Users with less experience in Python are suggested to go through the following section first before using SuPy.
20 |
21 | Python 101 before SuPy
22 | ----------------------
23 |
24 | Admittedly, this header is somewhat misleading: given the enormity of Python, it's more challenging to get this section *correct* than coding SuPy per se. As such, here a collection of data analysis oriented links to useful Python resources is provided to help novices start using Python and **then** SuPy.
25 |
26 | - `The gist of Python `_: a quick introductory blog that covers Python basics for data analysis.
27 |
28 | - Jupyter Notebook: Jupyter Notebook provides a powerful notebook-based data analysis environment that SuPy users are strongly encouraged to use. Jupyter notebooks can run in browsers (desktop, mobile) either by easy local configuration or on remote servers with pre-set environments (e.g., `Google Colaboratory `_, `Microsoft Azure Notebooks `_). In addition, Jupyter notebooks allow great shareability by incorporating source code and detailed notes in one place, which helps users to organise their computation work.
29 |
30 | - Installation
31 |
32 | Jupyter notebooks can be installed with pip on any desktop/server system and open .ipynb notebook files locally:
33 |
34 | .. code-block:: shell
35 |
36 | python3 -m pip install jupyter -U
37 |
38 | - Extensions: To empower your Jupyter Notebook environment with better productivity, please check out the `Unofficial Jupyter Notebook Extensions `_.
39 | Quick introductory blogs can be found `here `_ and `here `_.
40 |
41 |
42 | - pandas: `pandas` is heavily used in SuPy and thus better understanding of pandas is essential in SuPy workflows.
43 |
44 | - Introductory blogs:
45 |
46 | * `Quick dive into Pandas for Data Science `_: introduction to pandas.
47 | * `Basic Time Series Manipulation with Pandas `_: pandas-based time series manipulation.
48 | * `Introduction to Data Visualization in Python `_: plotting using pandas and related libraries.
49 |
50 | - A detailed tutorial in Jupyter Notebooks:
51 |
52 | * `Introduction to pandas `_
53 | * `pandas fundamentals `_
54 | * `Data Wrangling with pandas `_
55 |
--------------------------------------------------------------------------------
/env.yml:
--------------------------------------------------------------------------------
1 | name: supy
2 | channels:
3 | - conda-forge
4 | - defaults
5 | dependencies:
6 | - python=3.9
7 | - numpy
8 | - pandas
9 | - xarray
10 | - matplotlib
11 | - seaborn
12 | - scipy
13 | - jupyter
14 | - scikit-learn
15 | - pytables
16 | - pip
17 | - click
18 | - f90nml
19 | - dask[complete]
20 | - lmfit
21 | - pytest
22 | - black
23 | - floweaver
24 | - pip:
25 | - atmosp
26 |
27 |
--------------------------------------------------------------------------------
/sample_plot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UMEP-dev/SuPy/0947227e7b86e31d1e073d816b1a0a959f5e1c35/sample_plot.png
--------------------------------------------------------------------------------
/src/Makefile:
--------------------------------------------------------------------------------
1 | # -*- makefile -*-
2 | .PHONY: main clean test pip supy
3 |
4 | # OS-specific configurations
5 | ifeq ($(OS),Windows_NT)
6 | PYTHON_exe = python.exe
7 |
8 | else
9 | UNAME_S := $(shell uname -s)
10 |
11 |
12 |
13 | ifeq ($(UNAME_S),Linux) # Linux
14 | PYTHON_exe=python
15 | endif
16 |
17 | ifeq ($(UNAME_S),Darwin) # macOS
18 | PYTHON_exe=python
19 |
20 | endif
21 |
22 | endif
23 |
24 |
25 | PYTHON := $(if $(PYTHON_exe),$(PYTHON_exe),python)
26 | # All the files which include modules used by other modules (these therefore
27 | # need to be compiled first)
28 |
29 | MODULE = supy
30 |
31 |
32 | # install package in dev mode and do pytest
33 | test:
34 | pip install -e .
35 | pytest -s ${MODULE}/test
36 |
37 |
38 | # If wanted, clean all *.o files after build
39 | clean:
40 | rm -rf ${MODULE}/*.so ${MODULE}/*.pyc ${MODULE}/__pycache__ ${MODULE}/*.dSYM
41 | rm -rf ${MODULE}/test/*.pyc ${MODULE}/test/__pycache__
42 | rm -rf ${MODULE}/util/*.pyc ${MODULE}/util/__pycache__
43 | rm -rf ${MODULE}/cmd/*.pyc ${MODULE}/cmd/__pycache__
44 | rm -rf SuPy.log SuPy.log.*
45 | rm -rf build dist
46 | rm -rf .eggs .pytest_cache
47 |
48 | # upload wheels to pypi using twine
49 | upload:
50 | twine upload --skip-existing dist/*whl
51 |
--------------------------------------------------------------------------------
/src/data_test/multi-grid/51.5N0.125W-201310-ml.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UMEP-dev/SuPy/0947227e7b86e31d1e073d816b1a0a959f5e1c35/src/data_test/multi-grid/51.5N0.125W-201310-ml.nc
--------------------------------------------------------------------------------
/src/data_test/multi-grid/51.5N0.125W-201310-sfc.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UMEP-dev/SuPy/0947227e7b86e31d1e073d816b1a0a959f5e1c35/src/data_test/multi-grid/51.5N0.125W-201310-sfc.nc
--------------------------------------------------------------------------------
/src/data_test/multi-grid/51.5N0.125W-201311-ml.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UMEP-dev/SuPy/0947227e7b86e31d1e073d816b1a0a959f5e1c35/src/data_test/multi-grid/51.5N0.125W-201311-ml.nc
--------------------------------------------------------------------------------
/src/data_test/multi-grid/51.5N0.125W-201311-sfc.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UMEP-dev/SuPy/0947227e7b86e31d1e073d816b1a0a959f5e1c35/src/data_test/multi-grid/51.5N0.125W-201311-sfc.nc
--------------------------------------------------------------------------------
/src/data_test/multi-grid/51.5N0.125W-201312-ml.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UMEP-dev/SuPy/0947227e7b86e31d1e073d816b1a0a959f5e1c35/src/data_test/multi-grid/51.5N0.125W-201312-ml.nc
--------------------------------------------------------------------------------
/src/data_test/multi-grid/51.5N0.125W-201312-sfc.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UMEP-dev/SuPy/0947227e7b86e31d1e073d816b1a0a959f5e1c35/src/data_test/multi-grid/51.5N0.125W-201312-sfc.nc
--------------------------------------------------------------------------------
/src/data_test/single-grid/57.75N12.0E-200301-ml.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UMEP-dev/SuPy/0947227e7b86e31d1e073d816b1a0a959f5e1c35/src/data_test/single-grid/57.75N12.0E-200301-ml.nc
--------------------------------------------------------------------------------
/src/data_test/single-grid/57.75N12.0E-200301-sfc.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UMEP-dev/SuPy/0947227e7b86e31d1e073d816b1a0a959f5e1c35/src/data_test/single-grid/57.75N12.0E-200301-sfc.nc
--------------------------------------------------------------------------------
/src/data_test/single-grid/57.75N12.0E-200302-ml.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UMEP-dev/SuPy/0947227e7b86e31d1e073d816b1a0a959f5e1c35/src/data_test/single-grid/57.75N12.0E-200302-ml.nc
--------------------------------------------------------------------------------
/src/data_test/single-grid/57.75N12.0E-200302-sfc.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UMEP-dev/SuPy/0947227e7b86e31d1e073d816b1a0a959f5e1c35/src/data_test/single-grid/57.75N12.0E-200302-sfc.nc
--------------------------------------------------------------------------------
/src/data_test/single-grid/57.75N12.0E-200303-ml.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UMEP-dev/SuPy/0947227e7b86e31d1e073d816b1a0a959f5e1c35/src/data_test/single-grid/57.75N12.0E-200303-ml.nc
--------------------------------------------------------------------------------
/src/data_test/single-grid/57.75N12.0E-200303-sfc.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UMEP-dev/SuPy/0947227e7b86e31d1e073d816b1a0a959f5e1c35/src/data_test/single-grid/57.75N12.0E-200303-sfc.nc
--------------------------------------------------------------------------------
/src/data_test/single-grid/57.75N12.0E-200304-ml.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UMEP-dev/SuPy/0947227e7b86e31d1e073d816b1a0a959f5e1c35/src/data_test/single-grid/57.75N12.0E-200304-ml.nc
--------------------------------------------------------------------------------
/src/data_test/single-grid/57.75N12.0E-200304-sfc.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UMEP-dev/SuPy/0947227e7b86e31d1e073d816b1a0a959f5e1c35/src/data_test/single-grid/57.75N12.0E-200304-sfc.nc
--------------------------------------------------------------------------------
/src/data_test/single-grid/57.75N12.0E-200305-ml.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UMEP-dev/SuPy/0947227e7b86e31d1e073d816b1a0a959f5e1c35/src/data_test/single-grid/57.75N12.0E-200305-ml.nc
--------------------------------------------------------------------------------
/src/data_test/single-grid/57.75N12.0E-200305-sfc.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UMEP-dev/SuPy/0947227e7b86e31d1e073d816b1a0a959f5e1c35/src/data_test/single-grid/57.75N12.0E-200305-sfc.nc
--------------------------------------------------------------------------------
/src/data_test/single-grid/57.75N12.0E-200306-ml.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UMEP-dev/SuPy/0947227e7b86e31d1e073d816b1a0a959f5e1c35/src/data_test/single-grid/57.75N12.0E-200306-ml.nc
--------------------------------------------------------------------------------
/src/data_test/single-grid/57.75N12.0E-200306-sfc.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UMEP-dev/SuPy/0947227e7b86e31d1e073d816b1a0a959f5e1c35/src/data_test/single-grid/57.75N12.0E-200306-sfc.nc
--------------------------------------------------------------------------------
/src/data_test/single-grid/57.75N12.0E-200307-ml.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UMEP-dev/SuPy/0947227e7b86e31d1e073d816b1a0a959f5e1c35/src/data_test/single-grid/57.75N12.0E-200307-ml.nc
--------------------------------------------------------------------------------
/src/data_test/single-grid/57.75N12.0E-200307-sfc.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UMEP-dev/SuPy/0947227e7b86e31d1e073d816b1a0a959f5e1c35/src/data_test/single-grid/57.75N12.0E-200307-sfc.nc
--------------------------------------------------------------------------------
/src/data_test/single-grid/57.75N12.0E-200308-ml.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UMEP-dev/SuPy/0947227e7b86e31d1e073d816b1a0a959f5e1c35/src/data_test/single-grid/57.75N12.0E-200308-ml.nc
--------------------------------------------------------------------------------
/src/data_test/single-grid/57.75N12.0E-200308-sfc.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UMEP-dev/SuPy/0947227e7b86e31d1e073d816b1a0a959f5e1c35/src/data_test/single-grid/57.75N12.0E-200308-sfc.nc
--------------------------------------------------------------------------------
/src/data_test/single-grid/57.75N12.0E-200309-ml.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UMEP-dev/SuPy/0947227e7b86e31d1e073d816b1a0a959f5e1c35/src/data_test/single-grid/57.75N12.0E-200309-ml.nc
--------------------------------------------------------------------------------
/src/data_test/single-grid/57.75N12.0E-200309-sfc.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UMEP-dev/SuPy/0947227e7b86e31d1e073d816b1a0a959f5e1c35/src/data_test/single-grid/57.75N12.0E-200309-sfc.nc
--------------------------------------------------------------------------------
/src/data_test/single-grid/57.75N12.0E-200310-ml.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UMEP-dev/SuPy/0947227e7b86e31d1e073d816b1a0a959f5e1c35/src/data_test/single-grid/57.75N12.0E-200310-ml.nc
--------------------------------------------------------------------------------
/src/data_test/single-grid/57.75N12.0E-200310-sfc.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UMEP-dev/SuPy/0947227e7b86e31d1e073d816b1a0a959f5e1c35/src/data_test/single-grid/57.75N12.0E-200310-sfc.nc
--------------------------------------------------------------------------------
/src/data_test/single-grid/57.75N12.0E-200311-ml.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UMEP-dev/SuPy/0947227e7b86e31d1e073d816b1a0a959f5e1c35/src/data_test/single-grid/57.75N12.0E-200311-ml.nc
--------------------------------------------------------------------------------
/src/data_test/single-grid/57.75N12.0E-200311-sfc.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UMEP-dev/SuPy/0947227e7b86e31d1e073d816b1a0a959f5e1c35/src/data_test/single-grid/57.75N12.0E-200311-sfc.nc
--------------------------------------------------------------------------------
/src/data_test/single-grid/57.75N12.0E-200312-ml.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UMEP-dev/SuPy/0947227e7b86e31d1e073d816b1a0a959f5e1c35/src/data_test/single-grid/57.75N12.0E-200312-ml.nc
--------------------------------------------------------------------------------
/src/data_test/single-grid/57.75N12.0E-200312-sfc.nc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UMEP-dev/SuPy/0947227e7b86e31d1e073d816b1a0a959f5e1c35/src/data_test/single-grid/57.75N12.0E-200312-sfc.nc
--------------------------------------------------------------------------------
/src/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = [
3 | "wheel",
4 | "setuptools<60",
5 | "oldest-supported-numpy",
6 | "setuptools-git-versioning",
7 | "setuptools_scm[toml]>=6.2",
8 | ]
9 | # the recommended "build_meta" does not work with nonstop f2py and hence is commented out
10 | build-backend = "setuptools.build_meta"
11 |
12 | [project]
13 | name="supy"
14 | dynamic = ["version"]
15 | requires-python = ">3.9"
16 |
17 | # [tool.setuptools.packages]
18 | # find = {"where"= "src"}
19 | # [tool.setuptools-git-versioning]
20 | # enabled = true
21 | # template = "{tag}"
22 | # dev_template = "{tag}.post{ccount}"
23 | # # ignore "dirty" flag when building using CI for windows: seems to be a bug in the versioning tool
24 | # dirty_template = "{tag}"
25 | # version_callback = "setup:get_version"
26 | [tool.setuptools.packages.find]
27 | where = ["src"]
28 |
29 | [tool.setuptools.package-data]
30 | mypkg = ["*.txt", "*.json"]
31 |
32 | [tool.distutils.bdist_wheel]
33 | universal = true
34 |
35 | [tool.setuptools_scm]
36 | write_to = "src/supy/_version_scm.py"
37 | root = ".."
38 |
39 |
40 | # [tool.cibuildwheel]
41 | # # skip 32-bit and pypy builds
42 | # skip = ["*-win32", "*-manylinux_i686", "pp*", '*-musllinux*']
43 | # # build-frontend = "pip"
44 | # before-build = ["git describe --tags --always --dirty"]
45 |
46 | # # Increase pip debugging output
47 | # build-verbosity = 0
48 |
49 | # test-requires = "pytest"
50 | # test-command = 'pytest -s "{package}/supy_driver/test"'
51 |
52 |
53 | # [tool.cibuildwheel.linux]
54 | # # Install system library
55 | # before-all = ["yum install -y glibc-static", "make suews"]
56 | # archs = ["auto64"]
57 |
58 | # [tool.cibuildwheel.macos]
59 | # # before-all = [
60 | # # 'ln -sf "$(which gfortran-10)" /usr/local/bin/gfortran',
61 | # # "make suews",
62 | # # ]
63 | # archs = ["auto64"]
64 | # skip = ['cp311-macosx*','pp*']
65 |
66 |
67 | # [tool.cibuildwheel.windows]
68 | # before-all = ["make suews"]
69 | # before-build = [
70 | # "cd {package}",
71 | # "python -m pip install numpy",
72 | # "python build-win-def.py",
73 | # "git status",
74 | # # "python -m pip install delvewheel"
75 | # ]
76 | # archs = ["auto64"]
77 | # repair-wheel-command = "delvewheel repair -w {dest_dir} {wheel}"
78 |
--------------------------------------------------------------------------------
/src/setup.py:
--------------------------------------------------------------------------------
1 | import os
2 | from signal import raise_signal
3 | from setuptools import setup
4 | import json
5 | from pathlib import Path
6 |
7 | # write version info using git commit
8 | import subprocess
9 | import warnings
10 | import re
11 |
12 | ISRELEASED = True
13 | # if a release, use strict requirement for supy-driver; otehrwise, use a loose requirement
14 | DRIVER_REQ = "supy_driver==2021a15" if ISRELEASED else "supy_driver"
15 | # FULLVERSION += '.dev'
16 |
17 | # pipe = None
18 | # p_fn_ver = Path("./supy/supy_version.json")
19 |
20 | # # force remove the version info file
21 | # flag_dirty = False
22 |
23 | # for cmd in ["git", "/usr/bin/git", "git.cmd"]:
24 |
25 | # try:
26 | # pipe = subprocess.Popen(
27 | # [cmd, "describe", "--tags", "--match", "2[0-9]*", "--dirty=-dirty"],
28 | # stdout=subprocess.PIPE,
29 | # )
30 | # (sout, serr) = pipe.communicate()
31 | # # parse version info from git
32 | # list_str_ver = sout.decode("utf-8").strip().split("-")
33 |
34 | # if list_str_ver[-1].lower() == "dirty":
35 | # flag_dirty = True
36 | # # remove the "dirty" part from version info list
37 | # list_str_ver = list_str_ver[:-1]
38 |
39 | # ver_main = list_str_ver[0]
40 | # print("ver_main", ver_main)
41 | # if len(list_str_ver) > 1:
42 | # ver_post = list_str_ver[1]
43 | # ver_git_commit = list_str_ver[2]
44 | # else:
45 | # ver_post = ""
46 | # ver_git_commit = ""
47 |
48 | # # save version info to json file
49 | # p_fn_ver.unlink(missing_ok=True)
50 | # with open(p_fn_ver, "w") as f:
51 | # json.dump(
52 | # {
53 | # "version": ver_main + ("" if ISRELEASED else ".dev"),
54 | # "iter": ver_post,
55 | # "git_commit": ver_git_commit
56 | # + (
57 | # "-dirty"
58 | # if (flag_dirty and len(ver_git_commit) > 0)
59 | # else (
60 | # "dirty" if (flag_dirty and len(ver_git_commit) == 0) else ""
61 | # )
62 | # ),
63 | # },
64 | # f,
65 | # )
66 | # if pipe.returncode == 0:
67 | # print(f"in {cmd}, git version info saved to", p_fn_ver)
68 | # break
69 | # except Exception as e:
70 | # pass
71 |
72 | # if pipe is None or pipe.returncode != 0:
73 | # # no git, or not in git dir
74 |
75 | # if p_fn_ver.exists():
76 | # warnings.warn(
77 | # f"WARNING: Couldn't get git revision, using existing {p_fn_ver.as_posix()}"
78 | # )
79 | # write_version = False
80 | # else:
81 | # warnings.warn(
82 | # "WARNING: Couldn't get git revision, using generic " "version string"
83 | # )
84 | # else:
85 | # # have git, in git dir, but may have used a shallow clone (travis)
86 | # rev = sout.strip()
87 | # rev = rev.decode("ascii")
88 |
89 | # if p_fn_ver.exists():
90 | # with open(p_fn_ver, "r") as f:
91 | # dict_ver = json.load(f)
92 | # ver_main = dict_ver["version"]
93 | # ver_post = dict_ver["iter"]
94 | # ver_git_commit = dict_ver["git_commit"]
95 |
96 | # # print(dict_ver)
97 | # __version__ = "-".join(filter(None, [ver_main, ver_post, ver_git_commit]))
98 | # # raise ValueError(f"version info found: {__version__}")
99 | # else:
100 | # __version__ = "0.0.0"
101 | # raise ValueError("version info not found")
102 |
103 |
104 | def readme():
105 | try:
106 | with open("../README.md", encoding="utf-8") as f:
107 | return f.read()
108 | except:
109 | return f"SuPy package"
110 |
111 |
112 | setup(
113 | name="supy",
114 | # version=__version__,
115 | description="the SUEWS model that speaks python",
116 | long_description=readme(),
117 | long_description_content_type="text/markdown",
118 | url="https://github.com/UMEP-Dev/SuPy",
119 | author=", ".join(
120 | [
121 | "Dr Ting Sun",
122 | "Dr Hamidreza Omidvar",
123 | "Prof Sue Grimmond",
124 | ]
125 | ),
126 | author_email=", ".join(
127 | [
128 | "ting.sun@ucl.ac.uk",
129 | "h.omidvar@reading.ac.uk",
130 | "c.s.grimmond@reading.ac.uk",
131 | ]
132 | ),
133 | license="GPL-V3.0",
134 | packages=["supy"],
135 | package_data={
136 | "supy": [
137 | "sample_run/*",
138 | "sample_run/Input/*",
139 | "*.json",
140 | "util/*",
141 | "cmd/*",
142 | ]
143 | },
144 | # distclass=BinaryDistribution,
145 | ext_modules=[],
146 | install_requires=[
147 | "pandas< 1.5; python_version <= '3.9'", # to fix scipy dependency issue in UMEP under QGIS3 wtih python 3.9
148 | "pandas; python_version > '3.9'",
149 | "matplotlib",
150 | "chardet",
151 | "scipy",
152 | "dask", # needs dask for parallel tasks
153 | "f90nml", # utility for namelist files
154 | "seaborn", # stat plotting
155 | "atmosp", # my own `atmosp` module forked from `atmos-python`
156 | "cdsapi", # ERA5 data
157 | "xarray", # utility for high-dimensional datasets
158 | "multiprocess", # a better multiprocessing library
159 | "click", # cmd tool
160 | "lmfit", # optimiser
161 | "numdifftools", # required by `lmfit` for uncertainty estimation
162 | "pvlib", # TMY-related solar radiation calculations
163 | "platypus-opt==1.0.4", # a multi-objective optimiser
164 | DRIVER_REQ, # a separate f2py-based driver
165 | ],
166 | extras_require={
167 | "hdf": [
168 | "tables", # for dumping in hdf5
169 | ]
170 | },
171 | entry_points={
172 | # command line tools
173 | "console_scripts": [
174 | "suews-run=supy.cmd.SUEWS:SUEWS",
175 | "suews-convert=supy.cmd.table_converter:convert_table_cmd",
176 | ]
177 | },
178 | include_package_data=True,
179 | python_requires="~=3.7",
180 | classifiers=[
181 | "Programming Language :: Python :: 3 :: Only",
182 | "Programming Language :: Python :: 3.7",
183 | "Programming Language :: Python :: 3.8",
184 | "Programming Language :: Python :: 3.9",
185 | "Programming Language :: Python :: 3.10",
186 | "Intended Audience :: Education",
187 | "Intended Audience :: Science/Research",
188 | "Operating System :: MacOS :: MacOS X",
189 | "Operating System :: Microsoft :: Windows",
190 | "Operating System :: POSIX :: Linux",
191 | ],
192 | zip_safe=False,
193 | )
194 |
--------------------------------------------------------------------------------
/src/supy/__init__.py:
--------------------------------------------------------------------------------
1 | ###########################################################################
2 | # SuPy: SUEWS that speaks Python
3 | # Authors:
4 | # Ting Sun, ting.sun@reading.ac.uk
5 | # History:
6 | # 20 Jan 2018: first alpha release
7 | # 01 Feb 2018: performance improvement
8 | # 03 Feb 2018: improvement in output processing
9 | # 08 Mar 2018: pypi packaging
10 | # 01 Jan 2019: public release
11 | # 22 May 2019: restructure of module layout
12 | # 02 Oct 2019: logger restructured
13 | ###########################################################################
14 |
15 |
16 | # core functions
17 | from ._supy_module import (
18 | init_supy,
19 | load_SampleData,
20 | load_forcing_grid,
21 | run_supy,
22 | save_supy,
23 | check_forcing,
24 | check_state,
25 | )
26 |
27 |
28 | # utilities
29 | from . import util
30 |
31 |
32 | # version info
33 | from ._version import show_version, __version__, __version_driver__
34 |
35 | from .cmd import SUEWS
36 |
37 | # module docs
38 | __doc__ = """
39 | supy - SUEWS that speaks Python
40 | ===============================
41 |
42 | **SuPy** is a Python-enhanced urban climate model with SUEWS as its computation core.
43 |
44 | """
45 |
--------------------------------------------------------------------------------
/src/supy/_env.py:
--------------------------------------------------------------------------------
1 | from importlib.resources import files
2 | from logging.handlers import TimedRotatingFileHandler
3 | import sys
4 | import logging
5 | import inspect
6 | from pathlib import Path
7 | import tempfile
8 |
9 |
10 |
11 | ########################################################################
12 | # this file provides variable and functions useful for the whole module.
13 | ########################################################################
14 | # get Traversable object for loading resources in this package
15 | # this can be used similarly as `pathlib.Path` object
16 | trv_supy_module = files("supy")
17 |
18 | # set up logger format, note `u` to guarantee UTF-8 encoding
19 | FORMATTER = logging.Formatter(u"%(asctime)s - %(name)s - %(levelname)s - %(message)s")
20 |
21 | # log file name
22 | LOG_FILE = "SuPy.log"
23 |
24 |
25 | def get_console_handler():
26 | console_handler = logging.StreamHandler(sys.stdout)
27 | console_handler.setFormatter(FORMATTER)
28 | return console_handler
29 |
30 |
31 | def get_file_handler():
32 | try:
33 | path_logfile = Path(LOG_FILE)
34 | path_logfile.touch()
35 | except Exception:
36 | tempdir = tempfile.gettempdir()
37 | path_logfile = Path(tempdir) / LOG_FILE
38 |
39 | file_handler = TimedRotatingFileHandler(
40 | path_logfile, when="midnight", encoding="utf-8",
41 | )
42 | file_handler.setFormatter(FORMATTER)
43 | return file_handler
44 |
45 |
46 | def get_logger(logger_name, level=logging.DEBUG):
47 | logger = logging.getLogger(logger_name)
48 | # better to have too much log than not enough
49 | logger.setLevel(level)
50 | logger.addHandler(get_console_handler())
51 | logger.addHandler(get_file_handler())
52 |
53 | # with this pattern, it's rarely necessary to propagate the error up to parent
54 | logger.propagate = False
55 | return logger
56 |
57 |
58 | logger_supy = get_logger("SuPy", logging.INFO)
59 | logger_supy.debug("a debug message from SuPy")
60 |
61 |
62 | if sys.version_info >= (3, 8):
63 | from importlib import metadata
64 | else:
65 | from importlib_metadata import metadata
--------------------------------------------------------------------------------
/src/supy/_misc.py:
--------------------------------------------------------------------------------
1 | import urllib
2 | import os
3 |
4 | # from pathlib import Path
5 |
6 |
7 | ##############################################################################
8 | # an auxiliary function to resolve path case issues
9 | # credit: https://stackoverflow.com/a/8462613/920789
10 | def path_insensitive(path):
11 | """
12 | Get a case-insensitive path for use on a case sensitive system.
13 |
14 | >>> path_insensitive('/Home')
15 | '/home'
16 | >>> path_insensitive('/Home/chris')
17 | '/home/chris'
18 | >>> path_insensitive('/HoME/CHris/')
19 | '/home/chris/'
20 | >>> path_insensitive('/home/CHRIS')
21 | '/home/chris'
22 | >>> path_insensitive('/Home/CHRIS/.gtk-bookmarks')
23 | '/home/chris/.gtk-bookmarks'
24 | >>> path_insensitive('/home/chris/.GTK-bookmarks')
25 | '/home/chris/.gtk-bookmarks'
26 | >>> path_insensitive('/HOME/Chris/.GTK-bookmarks')
27 | '/home/chris/.gtk-bookmarks'
28 | >>> path_insensitive("/HOME/Chris/I HOPE this doesn't exist")
29 | "/HOME/Chris/I HOPE this doesn't exist"
30 | """
31 |
32 | return _path_insensitive(path) or path
33 |
34 |
35 | def _path_insensitive(path):
36 | """
37 | Recursive part of path_insensitive to do the work.
38 | """
39 | path = str(path)
40 | if path == "" or os.path.exists(path):
41 | return path
42 |
43 | base = os.path.basename(path) # may be a directory or a file
44 | dirname = os.path.dirname(path)
45 |
46 | suffix = ""
47 | if not base: # dir ends with a slash?
48 | if len(dirname) < len(path):
49 | suffix = path[: len(path) - len(dirname)]
50 |
51 | base = os.path.basename(dirname)
52 | dirname = os.path.dirname(dirname)
53 |
54 | if not os.path.exists(dirname):
55 | dirname = _path_insensitive(dirname)
56 | if not dirname:
57 | return
58 |
59 | # at this point, the directory exists but not the file
60 |
61 | try: # we are expecting dirname to be a directory, but it could be a file
62 | files = os.listdir(dirname)
63 | except OSError:
64 | return
65 |
66 | baselow = base.lower()
67 | try:
68 | basefinal = next(fl for fl in files if fl.lower() == baselow)
69 | except StopIteration:
70 | return
71 |
72 | if basefinal:
73 | return os.path.join(dirname, basefinal) + suffix
74 | else:
75 | return
76 |
77 |
78 | ##############################################################################
79 | # an auxiliary function to test URL connectivity
80 | # credit: https://stackoverflow.com/a/8462613/920789
81 | # https://gist.github.com/dehowell/884204#gistcomment-1771089
82 | def url_is_alive(url):
83 | """
84 | Checks that a given URL is reachable.
85 | :param url: A URL
86 | :rtype: bool
87 | """
88 | request = urllib.request.Request(url)
89 | request.get_method = lambda: "HEAD"
90 |
91 | try:
92 | urllib.request.urlopen(request)
93 | return True
94 | except urllib.request.HTTPError:
95 | return False
96 |
--------------------------------------------------------------------------------
/src/supy/_post.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pandas as pd
3 | from supy_driver import suews_driver as sd
4 |
5 |
6 | ##############################################################################
7 | # post-processing part
8 | # get variable information from Fortran
9 | def get_output_info_df():
10 | size_var_list = sd.output_size()
11 | var_list_x = [np.array(sd.output_name_n(i)) for i in np.arange(size_var_list) + 1]
12 |
13 | df_var_list = pd.DataFrame(var_list_x, columns=["var", "group", "aggm", "outlevel"])
14 | df_var_list = df_var_list.applymap(lambda x: x.decode().strip())
15 | df_var_list_x = df_var_list.replace(r"^\s*$", np.nan, regex=True).dropna()
16 | var_dfm = df_var_list_x.set_index(["group", "var"])
17 | return var_dfm
18 |
19 |
20 | # get variable info as a DataFrame
21 | # save `var_df` for later use
22 | var_df = get_output_info_df()
23 |
24 | # dict as var_df but keys in lowercase
25 | var_df_lower = {group.lower(): group for group in var_df.index.levels[0].str.strip()}
26 |
27 | # generate dict of functions to apply for each variable
28 | dict_func_aggm = {
29 | "T": "last",
30 | "A": "mean",
31 | "S": "sum",
32 | "L": "last",
33 | }
34 | var_df["func"] = var_df.aggm.apply(lambda x: dict_func_aggm[x])
35 |
36 | # dict of resampling ruls:
37 | # {group: {var: agg_method}}
38 | dict_var_aggm = {
39 | group: var_df.loc[group, "func"].to_dict() for group in var_df.index.levels[0]
40 | }
41 |
42 |
43 | # generate index for variables in different model groups
44 | def gen_group_cols(group_x):
45 | # get correct group name by cleaning and swapping case
46 | group = group_x.replace("dataoutline", "").replace("line", "")
47 | # print group
48 | group = var_df_lower[group]
49 | header_group = np.apply_along_axis(
50 | list, 0, var_df.loc[["datetime", group]].index.values
51 | )[:, 1]
52 |
53 | # generate MultiIndex if not `datetimeline`
54 | if not group_x == "datetimeline":
55 | index_group = pd.MultiIndex.from_product(
56 | [[group], header_group], names=["group", "var"], sortorder=None
57 | )
58 | else:
59 | index_group = header_group
60 |
61 | return index_group
62 |
63 |
64 | # merge_grid: useful for both `dict_output` and `dict_state`
65 | def pack_df_grid(dict_output):
66 | # pack all grid and times into index/columns
67 | df_xx = pd.DataFrame.from_dict(dict_output, orient="index")
68 | # pack
69 | df_xx0 = df_xx.applymap(pd.Series)
70 | df_xx1 = df_xx0.applymap(pd.DataFrame.from_dict)
71 | df_xx2 = pd.concat(
72 | {
73 | grid: pd.concat(df_xx1[grid].to_dict()).unstack().dropna(axis=1)
74 | for grid in df_xx1.columns
75 | }
76 | )
77 | # drop redundant levels
78 | df_xx2.columns = df_xx2.columns.droplevel(0)
79 | # regroup by `grid`
80 | df_xx2.index.names = ["grid", "time"]
81 | gb_xx2 = df_xx2.groupby(level="grid")
82 | # merge results of each grid
83 | xx3 = gb_xx2.agg(lambda x: tuple(x.values)).applymap(np.array)
84 |
85 | return xx3
86 |
87 |
88 | # generate MultiIndex for variable groups
89 | def gen_index(varline_x):
90 | var_x = varline_x.replace("dataoutline", "").replace("line", "")
91 | group = var_df_lower[var_x]
92 | var = var_df.loc[group].index.tolist()
93 | mindex = pd.MultiIndex.from_product([[group], var], names=["group", "var"])
94 | return mindex
95 |
96 |
97 | # generate one MultiIndex from a whole dict
98 | def gen_MultiIndex(dict_x):
99 | x_keys = dict_x.keys()
100 | mindex = pd.concat([gen_index(k).to_frame() for k in x_keys]).index
101 | return mindex
102 |
103 |
104 | # generate one Series from a dict entry
105 | def gen_Series(dict_x, varline_x):
106 | m_index = gen_index(varline_x)
107 | res_Series = pd.Series(dict_x[varline_x], index=m_index)
108 | return res_Series
109 |
110 |
111 | # merge a whole dict into one Series
112 | def comb_gen_Series(dict_x):
113 | x_keys = dict_x.keys()
114 | res_Series = pd.concat([gen_Series(dict_x, k) for k in x_keys])
115 | return res_Series
116 |
117 |
118 | # pack up output of `run_suews`
119 | def pack_df_output(dict_output):
120 | # TODO: add output levels as in the Fortran version
121 | df_output = pd.DataFrame(dict_output).T
122 | # df_output = pd.concat(dict_output).to_frame().unstack()
123 | # set index level names
124 | index = df_output.index.set_names(["datetime", "grid"])
125 | # clean columns
126 | columns = gen_MultiIndex(df_output.iloc[0])
127 | values = np.apply_along_axis(np.hstack, 1, df_output.values)
128 | df_output = pd.DataFrame(values, index=index, columns=columns)
129 | return df_output
130 |
131 |
132 | def pack_df_state(dict_state):
133 | df_state = pd.DataFrame(dict_state).T
134 | # df_state = pd.concat(dict_state).to_frame().unstack()
135 | # set index level names
136 | df_state.index = df_state.index.set_names(["datetime", "grid"])
137 |
138 | return df_state
139 |
140 |
141 | def pack_df_output_array(dict_output_array, df_forcing):
142 | grid_list = list(dict_output_array.keys())
143 | grid_start = grid_list[0]
144 | col_df = gen_MultiIndex(dict_output_array[grid_start])
145 | dict_df = {}
146 | for grid in grid_list:
147 | array_grid = np.hstack([v[:, 5:] for v in dict_output_array[grid].values()])
148 | df_grid = pd.DataFrame(array_grid, columns=col_df, index=df_forcing.index)
149 |
150 | dict_df.update({grid: df_grid})
151 |
152 | # join results of all grids
153 | df_grid_res = pd.concat(dict_df, keys=dict_df.keys())
154 |
155 | # set index level names
156 | df_grid_res.index.set_names(["grid", "datetime"], inplace=True)
157 |
158 | return df_grid_res
159 |
160 |
161 | # resample supy output
162 | def resample_output(df_output, freq="60T", dict_aggm=dict_var_aggm):
163 |
164 | # get grid and group names
165 | list_grid = df_output.index.get_level_values("grid").unique()
166 | list_group = df_output.columns.get_level_values("group").unique()
167 |
168 | # resampling output according to different rules defined in dict_aggm
169 | # note the setting in .resample: (closed='right',label='right')
170 | # which is to conform to SUEWS convention
171 | # that timestamp refer to the ending of previous period
172 | df_rsmp = pd.concat(
173 | {
174 | grid: pd.concat(
175 | {
176 | group: df_output.loc[grid, group]
177 | .resample(freq, closed="right", label="right",)
178 | .agg(dict_aggm[group])
179 | for group in list_group
180 | },
181 | axis=1,
182 | names=["group", "var"],
183 | )
184 | for grid in list_grid
185 | },
186 | names=["grid"],
187 | )
188 |
189 | # clean results
190 | df_rsmp = df_rsmp.dropna(how="all", axis=0)
191 |
192 | return df_rsmp
193 |
194 |
195 | def proc_df_rsl(df_output, debug=False):
196 | try:
197 | # if we work on the whole output with multi-index columns
198 | df_rsl_raw = df_output["RSL"].copy()
199 | except:
200 | # if we directly work on the RSL output
201 | df_rsl_raw = df_output.copy()
202 |
203 | try:
204 | # drop unnecessary columns if existing
205 | df_rsl_data = df_rsl_raw.drop(["Year", "DOY", "Hour", "Min", "Dectime"], axis=1)
206 | except:
207 | df_rsl_data = df_rsl_raw
208 |
209 | # retrieve data for plotting
210 | df_rsl = df_rsl_data.iloc[:, : 30 * 4]
211 | df_rsl.columns = (
212 | df_rsl.columns.str.split("_")
213 | .map(lambda l: tuple([l[0], int(l[1])]))
214 | .rename(["var", "level"])
215 | )
216 | df_rsl_proc = df_rsl.stack()
217 | if debug:
218 | # retrieve debug variables
219 | df_rsl_debug = df_rsl_data.iloc[:, 120:]
220 | return df_rsl_proc, df_rsl_debug
221 | else:
222 | return df_rsl_proc
223 |
--------------------------------------------------------------------------------
/src/supy/_version.py:
--------------------------------------------------------------------------------
1 | # version info for supy
2 |
3 | from supy_driver import __version__ as sd_ver
4 | from ._env import trv_supy_module
5 | import json
6 | import sys
7 | from importlib.resources import files
8 | from ._version_scm import __version__, __version_tuple__
9 |
10 |
11 | import pandas as pd
12 |
13 | # ser_ver = pd.read_json(
14 | # path_supy_module / "supy_version.json", typ="series", convert_dates=False
15 | # )
16 | # __version__ = "-".join(
17 | # list(
18 | # filter(
19 | # None,
20 | # [
21 | # ser_ver.version,
22 | # ser_ver.iter,
23 | # ser_ver.git_commit,
24 | # ],
25 | # )
26 | # )
27 | # )
28 | __version_driver__ = sd_ver
29 |
30 |
31 | def show_version(mode="simple", as_json=False):
32 | """print `supy` and `supy_driver` version information."""
33 | dict_info_supy = {}
34 | dict_info_supy["supy"] = __version__
35 | dict_info_supy["supy_driver"] = __version_driver__
36 | # dict_info['system'] = {'platform':sys.platform,'python_version':sys.version}
37 |
38 | if as_json:
39 | if as_json is True:
40 | print(json.dumps(dict_info_supy, indent=2))
41 | pd.show_versions(as_json=as_json)
42 | else:
43 | from pathlib import Path
44 |
45 | assert isinstance(as_json, str) # needed for mypy
46 | pd.show_versions(as_json=as_json)
47 | path_json = Path(as_json)
48 | ser_json = pd.read_json(path_json, typ="series", convert_dates=False)
49 | ser_info_supy = pd.Series(dict_info_supy)
50 | ser_json = pd.concat([ser_info_supy, ser_json], axis=0)
51 | ser_json.to_json(path_json, orient="index")
52 | else:
53 |
54 | print("SuPy versions")
55 | print("-------------")
56 | print(f"supy: {__version__}")
57 | print(f"supy_driver: {__version_driver__}")
58 | if mode == "full":
59 | print("\n=================")
60 | print("SYSTEM DEPENDENCY")
61 | pd.show_versions()
62 |
--------------------------------------------------------------------------------
/src/supy/checker_rules_joint.json:
--------------------------------------------------------------------------------
1 | {
2 | "forcing height": {
3 | "expr": "(sfr_1*bldgh+sfr_2*dectreeh+sfr_3*evetreeh)*3 3e4 else x)
130 | )
131 |
132 | df_state_init, df_forcing_tstep = sp.load_SampleData()
133 | # multi-step results
134 | df_output_m, df_state_m = sp.run_supy(
135 | df_forcing_part, df_state_init, save_state=False
136 | )
137 | df_res_m = (
138 | df_output_m.loc[:, list_grp_test]
139 | .fillna(-999.0)
140 | .sort_index(axis=1)
141 | .round(6)
142 | .applymap(lambda x: -999.0 if np.abs(x) > 3e4 else x)
143 | )
144 | # print(df_res_m.iloc[:3, 86], df_res_s.iloc[:3, 86])
145 | pd.testing.assert_frame_equal(
146 | left=df_res_s,
147 | right=df_res_m,
148 | )
149 | # test_equal_mode = df_res_s.eq(df_res_m).all(None)
150 | # self.assertTrue(test_equal_mode)
151 |
152 | # test saving output files working
153 | def test_is_supy_save_working(self):
154 | df_state_init, df_forcing_tstep = sp.load_SampleData()
155 | # df_state_init = pd.concat([df_state_init for x in range(6)])
156 | df_forcing_part = df_forcing_tstep.iloc[: 288 * 2]
157 | t_start = time()
158 | df_output, df_state = sp.run_supy(df_forcing_part, df_state_init)
159 | t_end = time()
160 | with tempfile.TemporaryDirectory() as dir_temp:
161 | list_outfile = sp.save_supy(df_output, df_state, path_dir_save=dir_temp)
162 |
163 | # only print to screen on macOS due incompatibility on Windows
164 | if platform.system() == "Darwin":
165 | capturedOutput = io.StringIO() # Create StringIO object
166 | sys.stdout = capturedOutput # and redirect stdout.
167 | # Call function.
168 | n_grid = df_state_init.index.size
169 | print(f"Running time: {t_end-t_start:.2f} s for {n_grid} grids")
170 | sys.stdout = sys.__stdout__ # Reset redirect.
171 | # Now works as before.
172 | print("Captured:\n", capturedOutput.getvalue())
173 |
174 | test_non_empty = np.all([isinstance(fn, Path) for fn in list_outfile])
175 | self.assertTrue(test_non_empty)
176 |
177 | # test saving output files working
178 | def test_is_checking_complete(self):
179 | df_state_init, df_forcing_tstep = sp.load_SampleData()
180 | dict_rules = sp._check.dict_rules_indiv
181 |
182 | # variables in loaded dataframe
183 | set_var_df_init = set(df_state_init.columns.get_level_values("var"))
184 |
185 | # variables in dict_rules
186 | set_var_dict_rules = set(list(dict_rules.keys()))
187 |
188 | # common variables
189 | set_var_common = set_var_df_init.intersection(set_var_dict_rules)
190 |
191 | # test if common variables are all those in `df_state_init`
192 | test_common_all = set_var_df_init == set_var_common
193 | self.assertTrue(test_common_all)
194 |
195 | # test ERA5 forcing generation
196 | def test_gen_forcing(self):
197 | import xarray as xr
198 |
199 | # # mimic downloading
200 | # dict_era5_file = sp.util.download_era5(
201 | # 57.7081,
202 | # 11.9653,
203 | # "20030101",
204 | # "20031231",
205 | # dir_save="./data_test/single-grid",
206 | # )
207 | # list_fn_ml = [k for k in dict_era5file.keys() if "ml" in k]
208 | # list_fn_sfc = [k for k in dict_era5_file.keys() if "sfc" in k]
209 | # test forcing generation
210 | list_fn_fc = sp.util.gen_forcing_era5(
211 | 57.7081,
212 | 11.9653,
213 | "20030101",
214 | "20031231",
215 | dir_save="./data_test/multi-grid",
216 | force_download=False,
217 | )
218 | df_forcing = sp.util.read_suews(list_fn_fc[0])
219 | ser_tair = df_forcing.Tair
220 | # ds_sfc = xr.open_mfdataset(list_fn_sfc)
221 | # ser_t2 = ds_sfc.t2m.to_series()
222 | # res_dif = ((df_forcing.Tair + 273.15 - ser_t2.values) / 98).round(4)
223 | test_dif = -30 < ser_tair.max() < 100
224 | self.assertTrue(test_dif)
225 |
--------------------------------------------------------------------------------
/src/supy/util/_UMEP2epw.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | """
4 | Created on Wed Aug 17 14:38:16 2022
5 | Author: Csilla V Gal
6 |
7 | """
8 | import pandas as pd
9 | import numpy as np
10 | import supy
11 | from metpy.units import units
12 | from metpy.calc import mixing_ratio_from_relative_humidity,specific_humidity_from_mixing_ratio
13 |
14 | from pathlib import Path
15 |
16 |
17 | def convert_UMEPf2epw(path_txt,lat,lon,tz,alt=None,path_epw=None):
18 | '''
19 | Converts and saves a UMEP-generated, year-long forcing file (.txt)
20 | with hourly resolution to .epw
21 |
22 | NOTE: A small function is added now at the end to
23 | clear up .epw file formatting and it's header.
24 | It can be removed or some aspects integrated into supy.util.gen_epw.
25 |
26 | Parameters
27 | ----------
28 | path_txt : path
29 | Path to the .txt file.
30 | lat : float
31 | Latitude of the site.
32 | lon : float
33 | Longitude of the site.
34 | tz : float
35 | Time zone expressed as a difference from UTC+0, such as -8 for UTC-8.
36 | alt : float, optional
37 | Altitude of the site.
38 | path_epw : path,optional
39 | Path to the new .epw file.
40 |
41 | Returns
42 | -------
43 | df_epw, text_meta, path_epw: Tuple[pd.DataFrame, str, Path]
44 | - df_epw: uTMY result
45 | - text_meta: meta-info text
46 | - path_epw: path to generated `epw` file
47 | '''
48 | # Dictionary for parameter naming conversion (UMEP to SUEWS)
49 | # NOTE: To be used with with 1.A column naming option, otherwise redundant.
50 | dict_var = {'%iy': 'iy',
51 | 'id': 'id',
52 | 'it': 'it',
53 | 'imin': 'imin',
54 | 'Q*': 'QN',
55 | 'QH': 'QH',
56 | 'QE': 'QE',
57 | 'Qs': 'QS',
58 | 'Qf': 'QF',
59 | 'Wind': 'U10',
60 | 'RH': 'RH2',
61 | 'Td': 'T2',
62 | 'press': 'pres',
63 | 'rain': 'Rain',
64 | 'Kdn': 'Kdown',
65 | 'snow': 'snow',
66 | 'ldown': 'Ldown',
67 | 'fcld': 'Fcld',
68 | 'wuh': 'Wuh',
69 | 'xsmd': 'xsmd',
70 | 'lai_hr': 'LAI',
71 | 'Kdiff': 'kdiff',
72 | 'Kdir': 'kdir',
73 | 'Wd': 'wdir',
74 | 'isec': 'isec'}
75 |
76 | # List of the stanard UMEP column names (the header)
77 | # NOTE: Can be used with with 1.A column naming option, otherwise redundant.
78 | header_UMEP = ['%iy','id','it','imin','Q*','QH','QE','Qs','Qf','Wind','RH','Td','press','rain','Kdn','snow','ldown','fcld','wuh','xsmd','lai_hr','Kdiff','Kdir','Wd']
79 |
80 | # List of the stanard UMEP column names converted to SUEWS naming standard (the renamed header)
81 | # NOTE: To be used with with 1.B column naming option, otherwise redundant.
82 | header_SUEWS = ['iy','id','it','imin','QN','QH','QE','QS','QF','U10','RH2','T2','pres','Rain','Kdown','snow','Ldown','Fcld','Wuh','xsmd','LAI','kdiff','kdir','wdir']
83 |
84 | if path_epw is None:
85 | path_epw = path_txt[:-4] + '.epw'
86 |
87 |
88 | # (0) Load file
89 | df_data = pd.read_table(path_txt,engine='python',delimiter=' +')
90 |
91 |
92 | # (1) Fix column names to work with supy.util.gen_epw (UMEP > SUEWS)
93 |
94 | # # Case A: with renaming (when reading table is not an issue)
95 | # # NOTE: When correctly loaded: df_data.columns == header_UMEP
96 | # df_data.rename(columns=dict_var, inplace=True)
97 |
98 | # Case B: with tour-de-force naming (when reading table might be an issue)
99 | df_data.columns = header_SUEWS
100 |
101 |
102 | # (2) Fix index
103 | df_data.index = pd.to_datetime(df_data.iy.astype(str) + ' ' + df_data.id.astype(str) + ' ' + df_data.it.astype(str) + ' ' + df_data.imin.astype(str), format = "%Y %j %H %M")
104 |
105 |
106 | # (3) Convert -999 values to NaN & remove columns with NaN
107 | # NOTE: The parameters that supy.util.gen_epw needs are: ['Kdown','Ldown','U10','T2','RH2',Q2].
108 | # NOTE: All extra data could be copied over.
109 | df_data.replace(-999, np.nan, inplace=True)
110 | df_data.dropna(axis=1,inplace=True)
111 |
112 |
113 | # (4) Match units with SUEWS (kPa > hPa)
114 | # NOTE: This value is not used by supy.util.gen_epw and units used for atmos. pressure in epw is Pa.
115 | df_data['pres'] = df_data['pres']*10
116 |
117 |
118 | # (5) Calculate specific humidity (Q2) for supy.util.gen_epw
119 | temperature = df_data['T2'].values * units.degC
120 | relhum = df_data['RH2'].values * units.percent
121 | press = df_data['pres'].values * units.hPa
122 |
123 | mixing_ratio = mixing_ratio_from_relative_humidity(press,temperature,relhum)
124 | spec_hum = specific_humidity_from_mixing_ratio(mixing_ratio) * units('kg/kg')
125 | df_data["Q2"] = spec_hum.to('g/kg')
126 |
127 |
128 | # (6) Save data with supy.util.gen_epw
129 | data_epw,header_epw,path_2epw = supy.util.gen_epw(df_data,lat,lon,tz,path_epw)
130 |
131 |
132 | # (7) Patch up the generated .epw.
133 | # NOTE: This can be turned off/removed.
134 | data_epw,header_epw,path_2epw = patchup_epw(data_epw,header_epw,path_2epw,lat,lon,tz,alt)
135 |
136 | return data_epw,header_epw,path_2epw
137 |
138 | def patchup_epw(df_data,df_header,path_epw,lat,lon,tz,alt):
139 | '''
140 | Fixes supy.util.gen_epw-generated .epw file headers, NaN values and rounding rulsed.
141 | Changes done after https://bigladdersoftware.com/epx/docs/8-3/auxiliary-programs/energyplus-weather-file-epw-data-dictionary.html
142 |
143 | Parameters
144 | ----------
145 | data_epw : pd.DataFrame
146 | The .epw file data table. (Generated by supy.util.gen_epw.)
147 | header_epw : list
148 | The .epw file header. (Generated by supy.util.gen_epw.)
149 | path_epw : path
150 | The path to the generated .epw file. (After supy.util.gen_epw.)
151 | lat : float
152 | Latitude of the site. (Input to convert_UMEPf2epw function.)
153 | lon : float
154 | Longitude of the site. (Input to convert_UMEPf2epw function.)
155 | tz : float
156 | Time zone expressed as a difference from UTC+0, such as -8 for UTC-8.
157 | (Input to convert_UMEPf2epw function.)
158 | alt : float, optional
159 | Altitude of the site. (Optional input to convert_UMEPf2epw function.)
160 |
161 | Returns
162 | -------
163 | df_data,df_header,path_epw : Tuple[pd.DataFrame, list, Path]
164 | - df_data : the epw data table
165 | - df_header : the epw header
166 | - path_epw : path to generated .epw file
167 |
168 | '''
169 | if alt is None:
170 | alt = 'NA'
171 | else:
172 | alt = str(alt)
173 |
174 | # DATA
175 | # Fixing roundings
176 | df_data.iloc[:,:5] = df_data.iloc[:,:5].astype(int)
177 | df_data.iloc[:,6:8] = df_data.iloc[:,6:8].round(1)
178 | df_data.iloc[:,8:21] = df_data.iloc[:,8:21].round(0).astype(int)
179 | df_data.iloc[:,21] = df_data.iloc[:,21].round(1) # Wind Speed
180 | df_data.iloc[:,22:29] = df_data.iloc[:,22:29].round(0).astype(int)
181 | df_data.iloc[:,29] = df_data.iloc[:,29].round(3) # Aerosol Optical Depth >> 0.999
182 | df_data.iloc[:,30:] = df_data.iloc[:,30:].round(0).astype(int)
183 |
184 | # Fixing NaN values
185 | # Global Horizontal Illuminance, Direct Normal Illuminance, Diffuse Horizontal Illuminance
186 | df_data.iloc[:,16:19] = 999999
187 | # Present Weather Observation
188 | df_data.iloc[:,26] = 9
189 | # Present Weather Codes
190 | df_data.iloc[:,27] = 999999999
191 | # Aerosol Optical Depth
192 | df_data.iloc[:,29] = 0.999
193 | # Liquid Precipitation Quantity
194 | df_data.iloc[:,34] = 99
195 |
196 |
197 | # HEADER
198 | # First Line :: LOCATION,NA,NA,NA,UMEP/SuPy,NA,47.50,19.12,1.0,163.0
199 | line_first = 'LOCATION,NA,NA,NA,UMEP/SuPy,NA,' + str(lat) + ',' + str(lon) + ',' + str(tz) + ',' + alt
200 | # Last Line :: DATA PERIODS,1,1,Year_2013,Tuesday,2013/ 1/ 1,2013/12/31
201 | line_last = 'DATA PERIODS,1,1,Year_' + df_data.index[0].strftime('%Y') + ',' + df_data.index[0].strftime('%A') + ',' + df_data.index[0].strftime('%-m/%-d') + ',' + df_data.index[-2].strftime('%-m/%-d')
202 |
203 | df_header[0] = line_first
204 | df_header[1] = 'DESIGN CONDITIONS,0'
205 | df_header[2] = 'TYPICAL/EXTREME PERIODS,0'
206 | df_header[3] = 'GROUND TEMPERATURES,0'
207 | df_header[4] = 'HOLIDAYS/DAYLIGHT SAVINGS,No,0,0,0'
208 | df_header[5] = 'COMMENTS 1,Generated by SuPy'
209 | df_header[6] = 'COMMENTS 2,Generated from user-provided data'
210 | df_header[7] = line_last
211 |
212 |
213 | # OverWRITE epw
214 | filenew = open(path_epw, "w")
215 | for i in range(len(df_header)):
216 | filenew.write(df_header[i]+'\n')
217 | filenew.close()
218 | df_data.to_csv(path_epw, header=None, index=None, mode='a', line_terminator='\n')
219 |
220 | # Convert results to SuPy standards
221 | path_epw = Path(path_epw) # Path FIX
222 |
223 | return df_data,df_header,path_epw
224 |
225 |
226 | # EXAMPLE
227 | # lat = 47.5
228 | # lon = 19.0
229 | # tz = 1
230 | # alt = 100
231 | # path_UMEPfrcg = 'Budapest-Lorinc.txt'
232 | # path_SAVEepw = path_UMEPfrcg[:-4] + '-TEST.epw'
233 |
234 | # df_data,list_header,path_file = convert_UMEPf2epw(path_UMEPfrcg,lat,lon,tz,alt=100,path_epw=path_SAVEepw)
235 |
--------------------------------------------------------------------------------
/src/supy/util/__init__.py:
--------------------------------------------------------------------------------
1 | # supy utilities
2 |
3 |
4 | from ._tmy import gen_epw, read_epw
5 |
6 |
7 | from ._era5 import download_era5, gen_forcing_era5
8 |
9 | from ._gap_filler import fill_gap_all
10 |
11 |
12 | from ._plot import plot_comp, plot_day_clm, plot_rsl
13 |
14 |
15 | from ._ohm import derive_ohm_coef, sim_ohm, replace_ohm_coeffs
16 |
17 | from ._atm import (
18 | cal_cp,
19 | cal_dens_air,
20 | cal_des_dta,
21 | cal_dq,
22 | cal_Lob,
23 | cal_ra_obs,
24 | )
25 |
26 | from ._gs import (
27 | cal_rs_obs,
28 | cal_g_dq,
29 | cal_g_dq_noah,
30 | cal_g_kd,
31 | cal_g_kd_noah,
32 | cal_g_lai,
33 | cal_g_smd,
34 | cal_g_swc_noah,
35 | cal_g_ta,
36 | cal_g_ta_noah,
37 | cal_gs_suews,
38 | cal_gs_obs,
39 | calib_g,
40 | fit_g_kd,
41 | fit_g_smd,
42 | fit_g_ta,
43 | fit_g_dq,
44 | deriv_g_kd_noah,
45 | deriv_g_smd_noah,
46 | deriv_g_ta_noah,
47 | deriv_g_dq_noah,
48 | )
49 |
50 | from ._io import read_suews, read_forcing
51 |
52 | from ._wrf import extract_reclassification, plot_reclassification
53 |
54 | from ._roughness import cal_z0zd, cal_neutral
55 |
56 | from ._debug import diag_rsl, diag_rsl_prm, save_zip_debug
57 |
--------------------------------------------------------------------------------
/src/supy/util/_debug.py:
--------------------------------------------------------------------------------
1 | import zipfile
2 | import supy_driver as sd
3 | import pandas as pd
4 |
5 |
6 | def diag_rsl(df_forcing, df_state, df_output, include_rsl=False):
7 | """
8 | Diagnose near-surface meteorological variables using RSL scheme as in `suews_driver`.
9 |
10 | Parameters
11 | ----------
12 | df_forcing : pandas.Dataframe
13 | Forcing as used in SuPy run.
14 | df_state : pandas.Dataframe
15 | Model states as used in SuPy run.
16 | df_output : pandas.Dataframe
17 | Model output produced by SuPy run.
18 | include_rsl : bool, optional
19 | Flag to determine if full RSL output at all levels should be included, by default False
20 |
21 | Returns
22 | -------
23 | df_sfc (if `include_rsl=False`) or (df_sfc, df_rsl)
24 | df_sfc: DataFrame with only near-surface level variables
25 | df_rsl: DataFrame with only RSL results at all levels
26 | """
27 | grid = df_state.index[0]
28 |
29 | # get SUEWS group from `df_output`
30 | try:
31 | df_suews = df_output.loc[grid, "SUEWS"]
32 | except:
33 | df_suews = df_output
34 |
35 | sfr = df_state.sfr.values[0]
36 | zmeas = df_state.z.values[0]
37 | zh = df_state[["bldgh", "evetreeh", "dectreeh"]].dot(sfr[[1, 2, 3]])
38 | fai = df_state[["faibldg", "faievetree", "faidectree",]].dot(sfr[[1, 2, 3]])
39 | stabilitymethod = df_state.stabilitymethod.values[0]
40 | dict_sfc = {}
41 | dict_rsl = {}
42 | for idx in df_suews.index:
43 | z0m, zdm, l_mod, qh, qe = df_suews.loc[idx, ["z0m", "zdm", "Lob", "QH", "QE"]]
44 | temp_c, press_hpa, avrh, avu1 = df_forcing.loc[idx, ["Tair", "pres", "RH", "U"]]
45 | (
46 | lv_j_kg,
47 | lvs_j_kg,
48 | es_hpa,
49 | ea_hpa,
50 | vpd_hpa,
51 | vpd_pa,
52 | dq,
53 | dens_dry,
54 | avcp,
55 | avdens,
56 | ) = sd.atmmoiststab_module.cal_atmmoist(temp_c, press_hpa, avrh, 0.0)
57 | res_rsl_idx = sd.rsl_module.rslprofile(
58 | zh,
59 | z0m,
60 | zdm,
61 | l_mod,
62 | sfr,
63 | fai,
64 | stabilitymethod,
65 | avcp,
66 | lv_j_kg,
67 | avdens,
68 | avu1,
69 | temp_c,
70 | avrh,
71 | press_hpa,
72 | zmeas,
73 | qh,
74 | qe,
75 | )
76 | dict_sfc.update({idx.isoformat(): res_rsl_idx[:4]})
77 | dict_rsl.update({idx.isoformat(): res_rsl_idx[4:]})
78 |
79 | # post-process results
80 | df_sfc = pd.DataFrame.from_dict(
81 | dict_sfc, orient="index", columns=["T2", "q2", "U10", "RH2"]
82 | )
83 | df_sfc.index = pd.to_datetime(df_sfc.index)
84 | df_rsl = pd.DataFrame.from_dict(dict_rsl, orient="index")
85 | df_rsl.index = pd.to_datetime(df_rsl.index)
86 |
87 | if include_rsl:
88 | return df_sfc, df_rsl
89 | else:
90 | return df_sfc
91 |
92 |
93 | def diag_rsl_prm(df_state, df_output):
94 | """
95 | Calculate parameters used in RSL scheme.
96 |
97 | Parameters
98 | ----------
99 | df_state : pandas.Dataframe
100 | Model states as used in SuPy run.
101 | df_output : pandas.Dataframe
102 | Model output produced by SuPy run.
103 |
104 | Returns
105 | -------
106 | df_sfc (if `include_rsl=False`) or (df_sfc, df_rsl)
107 | df_sfc: DataFrame with only near-surface level variables
108 | df_rsl: DataFrame with only RSL results at all levels
109 | """
110 | grid = df_state.index[0]
111 |
112 | # get SUEWS group from `df_output`
113 | try:
114 | df_suews = df_output.loc[grid, "SUEWS"]
115 | except:
116 | df_suews = df_output
117 |
118 | # print(df_suews.head())
119 |
120 | zh_min = 0.15
121 | sfr = df_state.loc[:, "sfr"]
122 | sfr_obj = sfr.iloc[:, 1:4].values
123 | zmeas = df_state.z.values
124 | fai = df_state.loc[:, ["faibldg", "faievetree", "faidectree"]].values
125 | h_obj = df_state.loc[:, ["bldgh", "evetreeh", "dectreeh"]].values
126 | zh = pd.Series(
127 | [pd.Series(h).dot(sfr) for h, sfr in zip(h_obj, sfr_obj)], index=df_state.index
128 | )
129 | fai = pd.Series(
130 | [pd.Series(fai).dot(sfr) for fai, sfr in zip(fai, sfr_obj)],
131 | index=df_state.index,
132 | )
133 | stabilitymethod = df_state.stabilitymethod.values
134 |
135 | dict_prm = {}
136 | zh = zh.iloc[0]
137 | fai = fai.iloc[0]
138 | sfr = sfr.iloc[0]
139 | # print(zh,fai,sfr)
140 | for idx in df_suews.index:
141 | print(df_suews.loc[idx])
142 | print(df_suews.loc[idx, ["z0m", "zdm", "Lob", "QH", "QE"]])
143 | z0m, zdm, l_mod, qh, qe = df_suews.loc[idx, ["z0m", "zdm", "Lob", "QH", "QE"]]
144 | (
145 | l_mod_rsl,
146 | zh_rsl,
147 | lc,
148 | beta,
149 | zd,
150 | z0,
151 | elm,
152 | scc,
153 | f,
154 | PAI,
155 | ) = sd.rsl_module.rsl_cal_prms(stabilitymethod, zh, l_mod, sfr, fai,)
156 | dict_prm.update(
157 | {idx.isoformat(): [l_mod_rsl, zh_rsl, lc, beta, zd, z0, elm, scc, f, PAI]}
158 | )
159 |
160 | # post-process results
161 | df_prm = pd.DataFrame.from_dict(
162 | dict_prm,
163 | orient="index",
164 | columns=[
165 | "l_mod_rsl",
166 | "zh_rsl",
167 | "lc",
168 | "beta",
169 | "zd",
170 | "z0",
171 | "elm",
172 | "scc",
173 | "f",
174 | "PAI",
175 | ],
176 | )
177 | df_prm.index = pd.to_datetime(df_prm.index)
178 |
179 | return df_prm
180 |
181 |
182 | def save_zip_debug(df_forcing,df_state_init):
183 | import tempfile
184 | from pathlib import Path
185 |
186 | from .._version import show_version
187 |
188 | path_dir_save = Path.cwd()
189 | path_json_version = path_dir_save / "supy_info.json"
190 | try:
191 | path_json_version.touch()
192 | except Exception:
193 | tempdir = tempfile.gettempdir()
194 | path_dir_save = Path(tempdir)
195 | path_json_version = path_dir_save / path_json_version.name
196 |
197 | # save version info
198 | show_version(as_json=path_json_version.as_posix())
199 |
200 | # save forcing data
201 | path_forcing = path_dir_save / "df_forcing.pkl"
202 | df_forcing.to_pickle(path_forcing)
203 |
204 | # save state data
205 | path_state_init = path_dir_save / "df_state_init.pkl"
206 | df_state_init.to_pickle(path_state_init)
207 |
208 | # get a random hash to use as a unique identifier for this run
209 | import hashlib
210 | hash=hashlib.md5(str(path_dir_save).encode('utf-8')).hexdigest()[:8]
211 |
212 | # bundle all files in a zip file
213 | path_zip_debug = path_dir_save / f"supy_debug-{hash}.zip"
214 | with zipfile.ZipFile(path_zip_debug, 'w') as myzip:
215 | myzip.write(path_forcing.as_posix(), arcname=path_forcing.name)
216 | myzip.write(path_state_init.as_posix(), arcname=path_state_init.name)
217 | myzip.write(path_json_version.as_posix(), arcname=path_json_version.name)
218 |
219 | return path_zip_debug
220 |
221 |
--------------------------------------------------------------------------------
/src/supy/util/_gap_filler.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import numpy as np
3 |
4 |
5 | # locate the first position of period with in-between gaps
6 | def loc_gap(ser_test, freq="1D", pattern="010"):
7 | rsmp = ser_test.resample(freq)
8 | ser_TF_10 = rsmp.apply(lambda ser: ser.isna().any()) * 1
9 | n_gap = ser_TF_10.sum()
10 | if cal_ratio_info(ser_test) < 0.5:
11 | raise RuntimeError(
12 | f"too many gaps (n_gap/n_period={n_gap}/{ser_TF_10.size}) to proceed! please enlarge the series to introduce more valid periods."
13 | )
14 | str_TF_10 = ser_TF_10.astype(str).str.cat()
15 | pos_gap = str_TF_10.find(pattern)
16 | loc_ser = ser_TF_10.iloc[pos_gap : pos_gap + len(pattern)].index
17 | return loc_ser
18 |
19 |
20 | # fill gap with neighbouring days
21 | def fill_gap_one(ser_test, freq="1D", pattern="010"):
22 | # resample into daily periods
23 | rsmp = ser_test.resample(freq)
24 | # locate the gaps according to gap pattern: 0 for NO gap, 1 for gapped
25 | loc_ser = loc_gap(ser_test, freq, pattern)
26 |
27 | # generator groups
28 | ser_find = (rsmp.get_group(x) for x in loc_ser)
29 | if len(loc_ser) == 0:
30 | return ser_test
31 |
32 | # assign series:
33 | # ser_prev: series prior to gapped period
34 | # ser_gap: series with gaps
35 | # ser_post: series after gapped period
36 | if pattern == "010":
37 | ser_prev, ser_gap, ser_post = ser_find
38 | elif pattern == "01":
39 | ser_prev, ser_gap = ser_find
40 | ser_post = pd.Series([])
41 | elif pattern == "10":
42 | ser_gap, ser_post = ser_find
43 | ser_prev = pd.Series([])
44 |
45 | # base series for gap filling
46 | ser_fill_base = pd.concat([ser_prev, ser_post])
47 | ser_fill = (
48 | ser_fill_base.groupby(
49 | [
50 | ser_fill_base.index.hour.rename("hr"),
51 | ser_fill_base.index.minute.rename("min"),
52 | ]
53 | )
54 | .median()
55 | .reset_index(drop=True)
56 | )
57 | try:
58 | ser_fill.index = ser_gap.index
59 | except ValueError:
60 | print(ser_test)
61 | print(pattern)
62 | print(ser_fill_base)
63 | print(ser_gap)
64 |
65 | # calculate rescaling factor with enough values to robustly rescale
66 | if (pattern == "010") and (ser_gap.count() > len(ser_gap) / 2):
67 | scale_fill = (ser_fill / ser_gap).median()
68 | # correct scale_fill for edge cases
69 | scale_fill = 1 if abs(scale_fill) > 10 else scale_fill
70 | scale_fill = 1 if abs(scale_fill) < 0.1 else scale_fill
71 | scale_fill = 1 if np.isnan(scale_fill) else scale_fill
72 | else:
73 | scale_fill = 1
74 | # rescale fill based on median ratio of fill:orig at available timesteps
75 | ser_fill_gap = ser_fill / scale_fill
76 |
77 | # fill in gaps with rescaled values of the filling data
78 | ser_gap.loc[ser_gap.isna()] = ser_fill_gap.loc[ser_gap.isna()]
79 | ser_filled = pd.concat([ser_prev, ser_gap, ser_post])
80 |
81 | # fill the original gapped series
82 | ser_test_filled = ser_test.copy()
83 | ser_test_filled.loc[ser_filled.index] = ser_filled
84 | return ser_test_filled
85 |
86 |
87 | def cal_ratio_info(ser_to_fill, freq="1d"):
88 | rsmp = ser_to_fill.resample(freq)
89 | ser_TF_10 = rsmp.apply(lambda ser: ser.isna().any()) * 1
90 | return 1 - ser_TF_10.sum() / ser_TF_10.size
91 |
92 |
93 | # fill gaps iteratively
94 | def fill_gap_all_x(ser_to_fill: pd.Series, freq="1D", limit_fill=1) -> pd.Series:
95 | """Fill all gaps in a time series using data from neighbouring divisions of 'freq'
96 |
97 | Parameters
98 | ----------
99 | ser_to_fill : pd.Series
100 | Time series to gap-fill
101 | freq : str, optional
102 | Frequency to identify gapped divisions, by default '1D'
103 | limit_fill: int, optional
104 | Maximum number of consecutive NaNs to fill.
105 | Any number less than one means no pre-gap-filling interpolation will be done.
106 |
107 | Returns
108 | -------
109 | ser_test_filled: pd.Series
110 | Gap-filled time series.
111 |
112 | Patterns
113 | --------
114 | 010: missing data in division between others with no missing data
115 | 01: missing data in division after one with no missing data
116 | 10: division with missing data before one with no missing data
117 | """
118 |
119 | if limit_fill > 0:
120 | ser_test_filled = ser_to_fill.copy().interpolate(limit=limit_fill)
121 | else:
122 | ser_test_filled = ser_to_fill.copy()
123 |
124 | ptn_list = ["010", "01", "10"]
125 |
126 | # if ratio_info > 0.5:
127 | n_freq = ser_test_filled.resample(freq).size().size
128 | if n_freq > 4:
129 | n_chunk = int(pd.Timedelta(freq) / ser_test_filled.index.freq)
130 | n_freq = int(ser_test_filled.size / n_chunk)
131 | if n_chunk * n_freq < ser_test_filled.size:
132 | ser_residual = ser_test_filled.iloc[n_chunk * n_freq :]
133 | else:
134 | ser_residual = pd.Series([])
135 | ser_test_filled = ser_test_filled.iloc[: n_chunk * n_freq]
136 | # ser_test_filled
137 | # n_freq
138 | if n_freq > 4:
139 | n_sep = int(n_freq / 2) * n_chunk
140 | # offset = pd.Timedelta(freq) * n_sep
141 | # ind_sep1 = ser_test_filled.index[0] + offset
142 | # ind_sep2 = ser_test_filled.index[1] + offset
143 | ser_test_filled1 = ser_test_filled.iloc[:n_sep]
144 | ser_test_filled2 = ser_test_filled.iloc[n_sep:]
145 | rinfo1 = cal_ratio_info(ser_test_filled1)
146 | rinfo2 = cal_ratio_info(ser_test_filled2)
147 | rinfo_thresh = 0.2
148 | while (not (rinfo1 >= rinfo_thresh and rinfo2 >= rinfo_thresh)) and (
149 | n_sep < n_freq
150 | ):
151 | # print(rinfo1, rinfo2, n_sep)
152 | if rinfo1 < rinfo_thresh:
153 | n_sep += n_chunk
154 | if rinfo2 < rinfo_thresh:
155 | n_sep -= n_chunk
156 | ser_test_filled1 = ser_test_filled.iloc[:n_sep]
157 | ser_test_filled2 = ser_test_filled.iloc[n_sep:]
158 | rinfo1 = cal_ratio_info(ser_test_filled1)
159 | rinfo2 = cal_ratio_info(ser_test_filled2)
160 |
161 | # print(rinfo1, rinfo2, ser_test_filled1.size, ser_test_filled2.size)
162 | ser_test_filled1 = fill_gap_all_x(ser_test_filled1)
163 | ser_test_filled2 = fill_gap_all_x(ser_test_filled2)
164 | # print(ser_test_filled1.size)
165 | # print(ser_test_filled2.size)
166 | # print()
167 | ser_test_filled = pd.concat([ser_test_filled1, ser_test_filled2, ser_residual])
168 | else:
169 | while ser_test_filled.isna().any():
170 | # print("here")
171 | # try to different gap patterns and fill gaps
172 | try:
173 | ptn_gap = next(
174 | ptn
175 | for ptn in ptn_list
176 | if len(loc_gap(ser_test_filled, freq, ptn)) == len(ptn)
177 | )
178 | # print(ser_test_filled,ptn_gap)
179 | ser_test_filled = fill_gap_one(ser_test_filled, freq, ptn_gap)
180 | except StopIteration:
181 | pass
182 | except RuntimeError as e:
183 | # print("cannot proceed so can only return the original gapped series!")
184 | return ser_test_filled
185 |
186 | return ser_test_filled
187 |
188 |
189 | def fill_gap_all(
190 | ser_to_fill: pd.Series,
191 | freq="1D",
192 | limit_fill=1,
193 | thresh_ratio=0.8,
194 | ) -> pd.Series:
195 | """Fill all gaps in a time series using data from neighbouring divisions of 'freq'
196 |
197 | Parameters
198 | ----------
199 | ser_to_fill : pd.Series
200 | Time series to gap-fill
201 | freq : str, optional
202 | Frequency to identify gapped divisions, by default '1D'
203 | limit_fill: int, optional
204 | Maximum number of consecutive NaNs to fill.
205 | Any number less than one means no pre-gap-filling interpolation will be done.
206 |
207 | Returns
208 | -------
209 | ser_test_filled: pd.Series
210 | Gap-filled time series.
211 |
212 | Patterns
213 | --------
214 | 010: missing data in division between others with no missing data
215 | 01: missing data in division after one with no missing data
216 | 10: division with missing data before one with no missing data
217 | """
218 | ratio_info = cal_ratio_info(ser_to_fill)
219 | ser_test_filled = norm_ser_dt(ser_to_fill)
220 | if ratio_info < thresh_ratio:
221 | raise RuntimeError(
222 | f"input series is too gapped (valid data ratio = {ratio_info:.2%}) to proceed with the gap-filling method."
223 | )
224 | else:
225 | # normalise into a series with complete diurnal cycles
226 | ser_test_filled = fill_gap_all_x(ser_test_filled, freq, limit_fill)
227 | # i = 0
228 | while ser_test_filled.isna().any():
229 | # i += 1
230 | ser_test_filled = fill_gap_all_x(ser_test_filled, freq, limit_fill)
231 | ser_gap = ser_test_filled.groupby(ser_test_filled.index.year).apply(
232 | lambda x: x.isna().sum()
233 | )
234 | # print(i, ser_gap)
235 | ser_test_filled = ser_test_filled.loc[ser_to_fill.index]
236 | return ser_test_filled
237 |
238 |
239 | def norm_ser_dt(ser_dt):
240 | """
241 | normalise a time series into a time series with complete diurnal cycles.
242 |
243 | Parameters
244 | ----------
245 | ser_dt : pandas.Series
246 | datetime-indexed time series
247 |
248 | Returns
249 | -------
250 | pandas.Series
251 | time series with complete diurnal cycles; missing values may be inserted as np.nan.
252 | """
253 | freq = ser_dt.index.freq
254 | dt_start, dt_end = ser_dt.index.normalize()[[0, -1]]
255 | dt_end = dt_end + pd.Timedelta("1d")
256 | dt_end
257 | idx_norm = pd.date_range(dt_start, dt_end, freq=freq)[:-1]
258 | ser_norm = ser_dt.reindex(idx_norm)
259 | return ser_norm
260 |
--------------------------------------------------------------------------------
/src/supy/util/_io.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from pathlib import Path
3 | import pandas as pd
4 | from .._load import (
5 | load_SUEWS_Forcing_met_df_pattern,
6 | resample_forcing_met,
7 | set_index_dt,
8 | )
9 |
10 |
11 | def read_suews(path_suews_file: str) -> pd.DataFrame:
12 | """read in SUEWS input/output file as datetime-aware DataFrame.
13 |
14 | Parameters
15 | ----------
16 | path_suews_file : str
17 | a string that can be converted into a valid path to SUEWS file.
18 |
19 | Returns
20 | -------
21 | pd.DataFrame
22 | datetime-aware DataFrame
23 | """
24 |
25 | path_suews_file = Path(path_suews_file).resolve()
26 | df_raw = pd.read_csv(
27 | path_suews_file,
28 | delim_whitespace=True,
29 | comment="!",
30 | on_bad_lines='error',
31 | )
32 | df_suews = set_index_dt(df_raw)
33 | return df_suews
34 |
35 |
36 | def read_forcing(path_suews_file: str, tstep_mod=300) -> pd.DataFrame:
37 | """read in SUEWS forcing files as DataFrame ready for SuPy simulation.
38 |
39 | Parameters
40 | ----------
41 | path_suews_file : str
42 | a string that represents wildcard pattern can locate SUEWS forcing files, which should follow `SUEWS convention `_.
43 |
44 | tstep_mod: int or None, optional
45 | time step [s] for resampling, by default 300.
46 | If `None`, resampling will be skipped.
47 |
48 | Returns
49 | -------
50 | pd.DataFrame
51 | datetime-aware DataFrame
52 | """
53 |
54 | path_suews_file = Path(path_suews_file)
55 | path_input = path_suews_file.parent
56 | str_pattern = path_suews_file.name
57 |
58 | df_forcing_raw = load_SUEWS_Forcing_met_df_pattern(path_input, str_pattern)
59 | tstep_met_in = df_forcing_raw.index.to_series().diff()[-1] / pd.Timedelta("1s")
60 | df_forcing_raw = df_forcing_raw.asfreq(f"{tstep_met_in:.0f}s")
61 |
62 | df_forcing = df_forcing_raw
63 |
64 | # resampling only when necessary
65 | if tstep_mod is not None:
66 | if tstep_mod < tstep_met_in:
67 | df_forcing = df_forcing_raw.replace(-999, np.nan)
68 | df_forcing = resample_forcing_met(
69 | df_forcing, tstep_met_in, tstep_mod, kdownzen=0
70 | )
71 | df_forcing = df_forcing.replace(np.nan, -999)
72 |
73 | return df_forcing
74 |
--------------------------------------------------------------------------------
/src/supy/util/_ohm.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Mon Dec 17 11:13:44 2018
4 |
5 | Authors:
6 | George Meachim,
7 | Ting Sun
8 | """
9 |
10 | import numpy as np
11 | import pandas as pd
12 |
13 | from ._plot import plot_comp, plot_day_clm
14 | from .._env import logger_supy
15 |
16 |
17 | # Linear fitting of QS, QN, deltaQN/dt (entire timeseries)
18 | def derive_ohm_coef(ser_QS, ser_QN):
19 | """
20 | A function to linearly fit two independant variables to a dependent one.
21 |
22 | Parameters
23 | ----------
24 | ser_QS : pd.Series
25 | The dependent variable QS (Surface heat storage).
26 | ser_QN : pd.Series
27 | The first independent variable (Net all wave radiation).
28 |
29 | Returns
30 | -------
31 | Tuple
32 | a1, a2 coefficients and a3 (intercept)
33 | """
34 |
35 |
36 |
37 | from sklearn.linear_model import LinearRegression
38 |
39 | # derive dt in hours
40 | dt_hr = ser_QN.index.freq / pd.Timedelta("1H")
41 |
42 | # Calculate difference between neighbouring QN values
43 | ser_delta_QN_dt = ser_QN.diff() / dt_hr
44 |
45 | # Drop NaNs and infinite values
46 | ser_QS = ser_QS.replace([np.inf, -np.inf], np.nan).dropna(how="all")
47 | ser_QN = ser_QN.loc[ser_QS.index]
48 | ser_delta_QN_dt = ser_delta_QN_dt.loc[ser_QS.index]
49 |
50 | # Create DataFrame with regression quantities and rename cols
51 | frames = [ser_QS, ser_QN, ser_delta_QN_dt]
52 | regression_df = pd.concat(frames, axis=1)
53 | regression_df.columns = ["QS", "QN", "delta_QN_dt"]
54 |
55 | # Reindex after dropping NaNs
56 | regression_df.reset_index(drop=True, inplace=True)
57 | regression_df.fillna(regression_df.mean(), inplace=True)
58 |
59 | feature_cols = ["QN", "delta_QN_dt"]
60 |
61 | X = regression_df[feature_cols].replace([np.inf, -np.inf], np.nan).dropna(how="all")
62 | y = regression_df.QS
63 |
64 | lm = LinearRegression()
65 | lm.fit(X, y)
66 |
67 | a1 = lm.coef_[0]
68 | a2 = lm.coef_[1]
69 | a3 = lm.intercept_
70 |
71 | return a1, a2, a3
72 |
73 |
74 | def replace_ohm_coeffs(df_state, coefs, land_cover_type):
75 | """
76 | This function takes as input parameters the model initial state DataFrame,
77 | the new ohm coefficients as calculated by performing linear regression on
78 | AMF Obs and the land cover type for which they were calculated.
79 |
80 | Parameters
81 | ----------
82 | df_state : pandas.DataFrame
83 | Model state dataframe used in supy
84 | coefs : tuple
85 | new a1, a2, a3 coefficients to replace those in `df_state`;
86 | note:
87 | 1. the format should be (a1, a3, a3);
88 | 2. any of a1/a2/a3 can be either one numeric value or a list-like structure of four values for SW/SD/WW/WD conditions indicated by `ohm_threshsw` and `ohm_threshwd`.
89 | land_cover_type : str
90 | one of seven SUEWS land cover types, can be any of
91 | 1. {"Paved","Bldgs","EveTr","DecTr","Grass","BSoil","Water"} (string case does NOT matter); or
92 | 2. an integer between 0 to 6.
93 |
94 | Returns
95 | -------
96 | pandas.DataFrame
97 | a DataFrame with updated OHM coefficients.
98 | """
99 |
100 | land_cover_type_dict = {
101 | "paved": 0,
102 | "bldgs": 1,
103 | "evetr": 2,
104 | "dectr": 3,
105 | "grass": 4,
106 | "bsoil": 5,
107 | "water": 6,
108 | }
109 |
110 | try:
111 | lc_index = (
112 | land_cover_type_dict.get(land_cover_type.lower())
113 | if isinstance(land_cover_type, str)
114 | else land_cover_type
115 | )
116 | except:
117 | list_lc = list(land_cover_type_dict.keys())
118 | logger_supy.error(
119 | f"land_cover_type must be one of {list_lc}, instead of {land_cover_type}"
120 | )
121 | else:
122 | # Instantiate 4x3 matrix of zeros to put old coeffs
123 | coef_matrix = np.zeros((4, 3))
124 | coef_matrix[:, 0] = coefs[0]
125 | coef_matrix[:, 1] = coefs[1]
126 | coef_matrix[:, 2] = coefs[2]
127 |
128 | # Copy ohm_coef part of df_state_init
129 | df_ohm = df_state.loc[:, "ohm_coef"].copy()
130 | # Reshape values into matrix form
131 | values_ohm = df_ohm.values.reshape((-1, 8, 4, 3))
132 | # Get ohm values corresponding to user specified land cover and assign to matrix
133 | values_ohm[:, lc_index] = coef_matrix
134 | # Place new ohm values into df_ohm
135 | df_ohm.loc[:, :] = values_ohm.reshape(df_ohm.shape)
136 | # Make copy of df_state_init
137 | df_state_init_copy = df_state.copy()
138 | # Replace ohm_coef part of df_state_init with new values
139 | df_state_init_copy.loc[:, "ohm_coef"] = df_ohm.values
140 |
141 | return df_state_init_copy
142 |
143 |
144 | def sim_ohm(ser_qn: pd.Series, a1: float, a2: float, a3: float) -> pd.Series:
145 | """Calculate QS using OHM (Objective Hysteresis Model).
146 |
147 | Parameters
148 | ----------
149 | ser_qn : pd.Series
150 | net all-wave radiation.
151 | a1 : float
152 | a1 of OHM coefficients.
153 | a2 : float
154 | a2 of OHM coefficients.
155 | a3 : float
156 | a3 of OHM coefficients.
157 |
158 | Returns
159 | -------
160 | pd.Series
161 | heat storage flux calculated by OHM.
162 | """
163 |
164 | # derive delta t in hour
165 | try:
166 | dt_hr = ser_qn.index.freq / pd.Timedelta("1h")
167 | except AttributeError as ex:
168 | raise RuntimeError("frequency info is missing from input `ser_qn`") from ex
169 |
170 | # Calculate rate of change of Net All-wave radiation
171 | ser_qn_dt = ser_qn.diff() / dt_hr
172 |
173 | # Derive QS from OBS quantities
174 | ser_qs = a1 * ser_qn + a2 * ser_qn_dt + a3
175 |
176 | return ser_qs
177 |
178 |
179 | def compare_heat_storage(ser_qn_obs, ser_qs_obs, a1, a2, a3):
180 | """This function compares the storage heat flux calculated with AMF
181 | QN and linear regression coefficients with that output by SUEWS.
182 | Input params: QN_Ser_Obs: A series of OBS net all-wave radiation.
183 | QS_Ser_SUEWS: A series of SUEWS storage heat flux values.
184 | a1, a2, a3: Linear regression coefficients from ohm_linregress_clean.
185 | Returns: MPL plot of diurnal comparison, MPL plot with 1:1 line and fitted line.
186 | """
187 |
188 | # calculate qs using OHM
189 | ser_qs_sim = sim_ohm(ser_qn_obs, a1, a2, a3).rename("Sim")
190 |
191 | # re-organise obs and sim info one dataframe
192 | ser_qs_obs = ser_qs_obs.rename("Obs")
193 | df_qs = pd.concat([ser_qs_sim, ser_qs_obs.rename("Obs")], axis=1)
194 | # Plotting
195 | plot1 = plot_day_clm(df_qs)
196 | plot2 = plot_comp(df_qs)
197 |
198 | return plot1, plot2
199 |
--------------------------------------------------------------------------------
/src/supy/util/_roughness.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from .._env import logger_supy
4 | from ._atm import cal_cp, cal_Lob
5 |
6 |
7 | def cal_neutral(
8 | ser_qh,
9 | ser_ustar,
10 | ser_ta_c,
11 | ser_rh_pct,
12 | ser_pres_hpa,
13 | ser_ws,
14 | z_meas,
15 | h_sfc,
16 | ):
17 | """Calculates the rows associated with neutral condition (threshold=0.01)
18 |
19 |
20 | Parameters
21 | ----------
22 | ser_qh: pd.DataFrame
23 | sensible heat flux [W/m^2]
24 | ser_ustar: pd.Series
25 | friction velocity [m/s]
26 | ser_ta_c: pd.Series
27 | air temperature [°C]
28 | ser_rh_pct: pd.Series
29 | relative humidity [%]
30 | ser_pres_hpa: pd.Series
31 | air pressure [hPa]
32 | ser_ws: pd.Series
33 | wind speed [m/s]
34 | z_meas
35 | measurement height [m]
36 | h_sfc
37 | vegetation height [m]
38 |
39 | Returns
40 | -------
41 | ser_ws_neutral: pd.Series
42 | observation time series of WS (Neutral conditions)
43 | ser_ustar_neutral: pd.Series
44 | observation time series of u* (Neutral conditions)
45 | """
46 |
47 | # calculate Obukhov length
48 | # ser_Lob = df_val.apply(
49 | # lambda ser: cal_Lob(ser.H, ser.USTAR, ser.TA, ser.RH, ser.PA * 10), axis=1
50 | # )
51 | ser_Lob = cal_Lob(ser_qh, ser_ustar, ser_ta_c, ser_rh_pct, ser_pres_hpa)
52 |
53 | # zero-plane displacement: estimated using rule f thumb `d=0.7*h_sfc`
54 |
55 | z_d = 0.7 * h_sfc
56 |
57 | if z_d >= z_meas:
58 | logger_supy.exception(
59 | "vegetation height is greater than measuring height. Please fix this before continuing ..."
60 | )
61 |
62 | # calculate stability scale
63 | ser_zL = (z_meas - z_d) / ser_Lob
64 |
65 | # determine periods under quasi-neutral conditions
66 | limit_neutral = 0.01
67 | ind_neutral = ser_zL.between(-limit_neutral, limit_neutral)
68 |
69 | ind_neutral = ind_neutral[ind_neutral].index
70 |
71 | # df_sel = df_val.loc[ind_neutral.index, ["WS", "USTAR"]].dropna()
72 | ser_ustar_neutral = ser_ustar.loc[ind_neutral]
73 | ser_ws_neutral = ser_ws.loc[ind_neutral]
74 |
75 | return ser_ws_neutral, ser_ustar_neutral
76 |
77 |
78 | # calculate z0 and zd using mutli-objective optimisation
79 | def cal_z0zd_mo(
80 | ser_qh,
81 | ser_ustar,
82 | ser_ta_c,
83 | ser_rh_pct,
84 | ser_pres_hpa,
85 | ser_ws,
86 | z_meas,
87 | h_sfc,
88 | ):
89 | """Calculates surface roughness and zero plane displacement height using mutli-objective optimisation.
90 | Refer to https://suews-parameters-docs.readthedocs.io/en/latest/steps/roughness-SuPy.html for example
91 |
92 | Parameters
93 | ----------
94 | ser_qh: pd.DataFrame
95 | sensible heat flux [W/m^2]
96 | ser_ustar: pd.Series
97 | friction velocity [m/s]
98 | ser_ta_c: pd.Series
99 | air temperature [°C]
100 | ser_rh_pct: pd.Series
101 | relative humidity [%]
102 | ser_pres_hpa: pd.Series
103 | air pressure [hPa]
104 | z_meas
105 | measurement height in m
106 | h_sfc
107 | vegetation height in m
108 |
109 | Returns
110 | -------
111 | z0
112 | surface roughness length for momentum
113 | zd
114 | zero displacement height
115 | ser_ws_neutral: pd.Series
116 | observation time series of WS (Neutral conditions)
117 | ser_ustar_neutral: pd.series
118 | observation time series of u* (Neutral conditions)
119 | """
120 |
121 | from platypus.core import Problem
122 | from platypus.types import Real, random
123 | from platypus.algorithms import NSGAIII
124 |
125 | # Calculates rows related to neutral conditions
126 | ser_ws_neutral, ser_ustar_neutral = cal_neutral(
127 | ser_qh,
128 | ser_ustar,
129 | ser_ta_c,
130 | ser_rh_pct,
131 | ser_pres_hpa,
132 | ser_ws,
133 | z_meas,
134 | h_sfc,
135 | )
136 |
137 | # function to optimize
138 | def func_uz(params):
139 | z0 = params[0]
140 | d = params[1]
141 | z = z_meas
142 | k = 0.4
143 | uz = (ser_ustar_neutral / k) * np.log((z - d) / z0) # logarithmic law
144 |
145 | o1 = abs(1 - np.std(uz) / np.std(ser_ws_neutral)) # objective 1: normalized STD
146 | # objective 2: normalized MAE
147 | o2 = np.mean(abs(uz - ser_ws_neutral)) / (np.mean(ser_ws_neutral))
148 |
149 | return [o1, o2], [uz.min(), d - z0]
150 |
151 | problem = Problem(2, 2, 2)
152 | problem.types[0] = Real(0, 10) # bounds for first parameter (z0)
153 | problem.types[1] = Real(0, h_sfc) # bounds for second parameter (zd)
154 |
155 | problem.constraints[0] = ">=0" # constrain for first parameter
156 | problem.constraints[1] = ">=0" # constrain for second parameter
157 |
158 | problem.function = func_uz
159 | random.seed(12345)
160 | algorithm = NSGAIII(problem, divisions_outer=50)
161 | algorithm.run(30000)
162 |
163 | z0s = []
164 | ds = []
165 | os1 = []
166 | os2 = []
167 | # getting the solution vaiables
168 | for s in algorithm.result:
169 | z0s.append(s.variables[0])
170 | ds.append(s.variables[1])
171 | os1.append(s.objectives[0])
172 | os2.append(s.objectives[1])
173 | # getting the solution associated with minimum obj2 (can be changed)
174 | idx = os2.index(min(os2, key=lambda x: abs(x - np.mean(os2))))
175 | z0 = z0s[idx]
176 | zd = ds[idx]
177 |
178 | return z0, zd
179 |
180 |
181 | # calculate z0 and d using curve fitting
182 | def cal_z0zd(
183 | ser_qh,
184 | ser_ustar,
185 | ser_ta_c,
186 | ser_rh_pct,
187 | ser_pres_hpa,
188 | ser_ws,
189 | z_meas,
190 | h_sfc,
191 | debug=False,
192 | ):
193 | """Calculates surface roughness and zero plane displacement height.
194 | Refer to https://suews-parameters-docs.readthedocs.io/en/latest/steps/roughness-SuPy.html for example
195 |
196 | Parameters
197 | ----------
198 | ser_qh: pd.DataFrame
199 | sensible heat flux [W/m^2]
200 | ser_ustar: pd.Series
201 | friction velocity [m/s]
202 | ser_ta_c: pd.Series
203 | air temperature [°C]
204 | ser_rh_pct: pd.Series
205 | relative humidity [%]
206 | ser_pres_hpa: pd.Series
207 | air pressure [hPa]
208 | z_meas: number
209 | measurement height in m
210 | h_sfc: number
211 | vegetation height in m
212 | debug : bool, optional
213 | Option to output final calibrated `ModelResult `, by default False
214 |
215 |
216 | Returns
217 | -------
218 | z0
219 | surface roughness length for momentum
220 | zd
221 | zero displacement height
222 | """
223 |
224 | from lmfit import Model, Parameter, Parameters
225 |
226 | # Calculates rows related to neutral conditions
227 | ser_ws_neutral, ser_ustar_neutral = cal_neutral(
228 | ser_qh,
229 | ser_ustar,
230 | ser_ta_c,
231 | ser_rh_pct,
232 | ser_pres_hpa,
233 | ser_ws,
234 | z_meas,
235 | h_sfc,
236 | )
237 |
238 | # function to optimize
239 | def cal_uz_neutral(ustar_ntrl, z0, zd, z=z_meas, k=0.4):
240 | # logarithmic law
241 | uz = (ustar_ntrl / k) * np.log((z - zd) / z0)
242 | return uz
243 |
244 | model_uz_neutral = Model(
245 | cal_uz_neutral,
246 | independent_vars=["ustar_ntrl"],
247 | param_names=["z0", "zd"],
248 | )
249 | prms = Parameters()
250 | prm_z0 = Parameter(
251 | "z0",
252 | h_sfc * 0.1,
253 | vary=True,
254 | min=0.01 * h_sfc,
255 | max=0.95 * h_sfc,
256 | )
257 | prm_zd = Parameter(
258 | "zd",
259 | h_sfc * 0.7,
260 | vary=True,
261 | min=0.01 * h_sfc,
262 | max=0.95 * h_sfc,
263 | )
264 | prms.add_many(prm_z0, prm_zd)
265 | try:
266 | res_fit = model_uz_neutral.fit(
267 | ser_ws_neutral,
268 | ustar_ntrl=ser_ustar_neutral,
269 | params=prms,
270 | )
271 | # provide full fitted model if debug == True otherwise only a dict with best fit parameters
272 | res = res_fit if debug else res_fit.best_values
273 | if isinstance(res, dict):
274 | return res["z0"], res["zd"]
275 |
276 | return res
277 | except Exception as e:
278 | print(e)
279 | print('Fitting failed! Use 0.1h and 0.7h for z0 and zd, respectively')
280 | return h_sfc * 0.1, h_sfc * 0.7
--------------------------------------------------------------------------------
/src/supy/util/_wrf.py:
--------------------------------------------------------------------------------
1 | # WRF-SUEWS related utilities
2 | import pandas as pd
3 | import numpy as np
4 | from .._load import load_SUEWS_nml_simple
5 | from pathlib import Path
6 |
7 |
8 | dict_modis_20 = {
9 | 1: "Evergreen Needleleaf Forest",
10 | 2: "Evergreen Broadleaf Forest",
11 | 3: "Deciduous Needleleaf Forest",
12 | 4: "Deciduous Broadleaf Forest",
13 | 5: "Mixed Forests",
14 | 6: "Closed Shrublands",
15 | 7: "Open Shrublands",
16 | 8: "Woody Savannas",
17 | 9: "Savannas",
18 | 10: "Grasslands",
19 | 11: "Permanent Wetlands",
20 | 12: "Croplands",
21 | 13: "Urban and Built-Up",
22 | 14: "Cropland/Natural Vegetation Mosaic",
23 | 15: "Snow and Ice",
24 | 16: "Barren or Sparsely Vegetated",
25 | 17: "Water",
26 | 18: "Wooded Tundra",
27 | 19: "Mixed Tundra",
28 | 20: "Barren Tundra",
29 | }
30 |
31 | list_cat_suews = [
32 | # built-up
33 | "Paved",
34 | "Building",
35 | # vegetated
36 | "Evergreen Trees",
37 | "Deciduous Trees",
38 | "Grass",
39 | # soil
40 | "Bare Soil",
41 | # water
42 | "Water",
43 | # not-used
44 | "Extra",
45 | ]
46 |
47 |
48 | def extract_reclassification(path_nml: str) -> pd.DataFrame:
49 | """Extract reclassification info from `path_nml` as a DataFrame.
50 |
51 | Parameters
52 | ----------
53 | path_nml : str
54 | Path to `namelist.suews`
55 |
56 | Returns
57 | -------
58 | pd.DataFrame
59 | Reclassification DataFrame with rows for WRF land covers while columns for SUEWS.
60 | """
61 | df_lc = load_SUEWS_nml_simple(path_nml).landuse
62 |
63 | ser_cat_suews = pd.Series(list_cat_suews, name="lc_suews")
64 | df_ind = pd.DataFrame(df_lc.loc["suews_cat_ind"], columns=ser_cat_suews)
65 | df_frac = pd.DataFrame(df_lc.loc["suews_cat_frac"], columns=ser_cat_suews)
66 |
67 | df_rcl = pd.concat([df_ind, df_frac], axis=1, keys=["lc_wrf", "frac"])
68 | df_rcl = df_rcl.stack(-1).reset_index("lc_suews")
69 | df_rcl = df_rcl.pivot_table(index="lc_wrf", columns="lc_suews", values="frac")
70 | df_rcl = df_rcl.drop(-999, axis=0)
71 | df_rcl = df_rcl.drop(list_cat_suews[-1], axis=1)
72 | df_rcl = df_rcl.replace(np.nan, 0)
73 | df_rcl = df_rcl.rename(dict_modis_20, axis=0)
74 | return df_rcl
75 |
76 |
77 | def gen_df_sankey(path_nml: str):
78 | # load reclassification data
79 | df_rcl = extract_reclassification(path_nml)
80 |
81 | # create flow data
82 | df_flow = df_rcl.T.reset_index().melt(id_vars=["lc_suews"], value_name="frac")
83 |
84 | df_flow = df_flow.rename(
85 | {
86 | "lc_suews": "target",
87 | "lc_wrf": "source",
88 | "frac": "value",
89 | },
90 | axis=1,
91 | )
92 |
93 | # label conversion types
94 |
95 | def cat_type(x: str) -> str:
96 | if x in ["Building", "Paved"]:
97 | return "Built-up"
98 | elif x in ["Deciduous Trees", "Evergreen Trees", "Grass"]:
99 | return "Vegetated"
100 | else:
101 | return x
102 |
103 | df_flow["type"] = df_flow.target.apply(cat_type)
104 |
105 | # create process data
106 | df_process = df_flow.loc[df_flow.value > 0.1]
107 | df_process = pd.concat(
108 | [
109 | df_process[["target", "type"]].rename({"target": "id"}, axis=1),
110 | df_process[["source", "type"]].rename({"source": "id"}, axis=1),
111 | ],
112 | sort=False,
113 | )
114 | df_process = df_process.drop_duplicates().groupby("id").first()
115 | df_process["name"] = df_process.index
116 |
117 | return df_flow, df_process
118 |
119 |
120 | def in_ipynb():
121 | try:
122 | from IPython import get_ipython
123 |
124 | cfg = get_ipython().has_trait("kernel")
125 | if cfg:
126 | return True
127 | else:
128 | return False
129 | except NameError:
130 | return False
131 |
132 |
133 | def plot_reclassification(
134 | path_nml: str,
135 | path_save="LC-WRF-SUEWS.png",
136 | width=800,
137 | height=360,
138 | top=10,
139 | bottom=10,
140 | left=280,
141 | right=130,
142 | ):
143 | """Produce Sankey Diagram to visualise the reclassification specified in `path_nml`
144 |
145 | Parameters
146 | ----------
147 | path_nml : str
148 | Path to `namelist.suews`
149 | path_save : str, optional
150 | Path to save Sankey diagram, by default 'LC-WRF-SUEWS.png'
151 | width : int, optional
152 | Width of diagram, by default 800
153 | height : int, optional
154 | Height of diagram, by default 360
155 | top : int, optional
156 | Top margin of diagram, by default 10
157 | bottom : int, optional
158 | Bottom margin of diagram, by default 10
159 | left : int, optional
160 | Left margin of diagram, by default 260
161 | right : int, optional
162 | Right margin of diagram, by default 60
163 |
164 | Returns
165 | -------
166 | Sankey Diagram
167 | Sankey Diagram showing the reclassification.
168 | """
169 | try:
170 | from floweaver import (
171 | Bundle,
172 | Dataset,
173 | Partition,
174 | ProcessGroup,
175 | Waypoint,
176 | SankeyDefinition,
177 | weave,
178 | )
179 | except Exception as ie:
180 | raise ImportError("Please install `floweaver` by `pip install floweaver`.")
181 |
182 | # derive DataFrames required by Sankey
183 | df_flow, df_process = gen_df_sankey(path_nml)
184 |
185 | # set the default size to fit the documentation better.
186 | size = dict(width=width, height=height)
187 | margins = dict(top=top, bottom=bottom, left=left, right=right)
188 |
189 | # create Sankey data
190 | dataset = Dataset(df_flow, dim_process=df_process)
191 | # SUEWS LCs
192 | list_suews = df_flow.target.unique().tolist()
193 | # WRF LCs
194 | list_wrf = df_flow.source.unique().tolist()
195 | # LC types
196 | list_type = df_flow.type.unique()[[1,2,0,3]].tolist()
197 | print('list_type:', list_type)
198 | lc_by_type = Partition.Simple("type", list_type)
199 |
200 | nodes = {
201 | "SUEWS": ProcessGroup(list_suews),
202 | "WRF": ProcessGroup(list_wrf),
203 | }
204 | nodes["SUEWS"].partition = Partition.Simple("process", list_cat_suews)
205 | nodes["WRF"].partition = Partition.Simple("process", list_wrf)
206 | nodes["type"] = Waypoint(lc_by_type)
207 | ordering = [
208 | ["WRF"], # put "WRF" on the left...
209 | ["type"], # put "type" on the left...
210 | ["SUEWS"], # ... and "SUEWS" on the right.
211 | ]
212 | bundles = [
213 | Bundle("WRF", "SUEWS", waypoints=["type"]),
214 | ]
215 |
216 | # Set the colours for the labels in the partition.
217 | palette = {
218 | "Built-up": "slategrey",
219 | "Vegetated": "forestgreen",
220 | "Bare Soil": "tan",
221 | "Water": "royalblue",
222 | }
223 |
224 | sdd = SankeyDefinition(nodes, bundles, ordering, flow_partition=lc_by_type)
225 |
226 | data_sankey = weave(sdd, dataset, palette=palette)
227 | sankey = data_sankey.to_widget(**size, margins=margins)
228 | if in_ipynb():
229 | path_save = Path(path_save)
230 | suffix = path_save.suffix
231 | if suffix == "png":
232 | print("Saving figure in png:")
233 | sankey.auto_save_png(path_save)
234 | elif suffix == "svg":
235 | print("Saving figure in svg:")
236 | sankey.auto_save_svg(path_save)
237 | else:
238 | print("Saving figure in png: ")
239 | sankey.auto_save_png(path_save)
240 | print(path_save.resolve())
241 | else:
242 | print("Please run this function in Jupyter notebook for visualisation.")
243 |
244 | return sankey
245 |
--------------------------------------------------------------------------------