├── __init__.py ├── src ├── __init__.py ├── weathergen │ ├── install_rpackages.R │ ├── global.R │ ├── impose_climate_change.R │ └── generate_weather.R ├── metrics_definition.py ├── prepare_weagen_config.py ├── setup_reservoirs_lakes_glaciers.py ├── prepare_cst_parameters.py ├── setup_gauges_and_outputs.py ├── setup_time_horizon.py ├── downscale_climate_forcing.py ├── copy_config_files.py ├── get_region_preview.py ├── prepare_climate_data_catalog.py ├── get_change_climate_proj_summary.py ├── plot_map.py ├── extract_historical_climate.py ├── plot_map_forcing.py ├── get_stats_climate_proj.py └── plot_results.py ├── tests ├── __init__.py ├── test_cli.py ├── test_model_creation.py ├── conftest.py ├── wflow_build_model.yml ├── data │ └── tests_data_catalog.yml └── snake_config_model_test.yml ├── .gitattributes ├── docs ├── _images │ ├── WB_logo.png │ ├── CST_scheme.png │ ├── company_logos.png │ └── deltares_logo.png └── notebooks │ ├── Model building.ipynb │ └── Climate projections.ipynb ├── data └── observations │ └── output-locations-test.csv ├── blueearth_cst.Rproj ├── .github └── pull_request_template.md ├── config ├── snake_config_projections_isimip3.yml ├── snake_config_projections_isimip3_linux.yml ├── wflow_update_waterbodies.yml ├── snake_config_projections_cmip6_full.yml ├── snake_config_projections_cmip5_full_linux.yml ├── snake_config_projections_cmip5_full.yml ├── weathergen_config.yml ├── wflow_build_model.yml ├── wflow_sbm.toml ├── snake_config_model_test_linux.yml ├── snake_config_model_test.yml ├── deltares_data_climate_projections.yml ├── deltares_data_climate_projections_linux.yml └── cmip6_data.yml ├── environment.yml ├── environment_linux.yml ├── run_snake_docker.sh ├── run_snake_test.cmd ├── Dockerfile ├── pixi.toml ├── .gitignore ├── Snakefile_model_creation ├── Snakefile_climate_projections └── Snakefile_climate_experiment /__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # GitHub syntax highlighting 2 | pixi.lock linguist-language=YAML linguist-generated=true 3 | -------------------------------------------------------------------------------- /docs/_images/WB_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Deltares/blueearth_cst/main/docs/_images/WB_logo.png -------------------------------------------------------------------------------- /docs/_images/CST_scheme.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Deltares/blueearth_cst/main/docs/_images/CST_scheme.png -------------------------------------------------------------------------------- /docs/_images/company_logos.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Deltares/blueearth_cst/main/docs/_images/company_logos.png -------------------------------------------------------------------------------- /docs/_images/deltares_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Deltares/blueearth_cst/main/docs/_images/deltares_logo.png -------------------------------------------------------------------------------- /data/observations/output-locations-test.csv: -------------------------------------------------------------------------------- 1 | wflow_id,station_name,x,y 2 | 2,station_2,9.759284327,0.399418254 3 | 3,station_3,9.7745323,0.399418254 4 | 4,station_4,9.784992262,0.398460652 5 | 5,station_5,9.79357385,0.391536452 6 | 6,station_6,9.816151163,0.387779705 7 | 7,station_7,9.763851352,0.446267098 8 | -------------------------------------------------------------------------------- /src/weathergen/install_rpackages.R: -------------------------------------------------------------------------------- 1 | # Install Rlang package from the correct repo 2 | install.packages("rlang", repos = "http://cran.rstudio.com", dependencies = TRUE) 3 | 4 | # Install weathergenr package, but not don't update the dependencies 5 | devtools::install_github("Deltares/weathergenr", ref="Dev", upgrade = "never") 6 | -------------------------------------------------------------------------------- /blueearth_cst.Rproj: -------------------------------------------------------------------------------- 1 | Version: 1.0 2 | 3 | RestoreWorkspace: Default 4 | SaveWorkspace: Default 5 | AlwaysSaveHistory: Default 6 | 7 | EnableCodeIndexing: Yes 8 | UseSpacesForTab: Yes 9 | NumSpacesForTab: 2 10 | Encoding: ISO8859-1 11 | 12 | RnwWeave: Sweave 13 | LaTeX: pdfLaTeX 14 | 15 | AutoAppendNewline: Yes 16 | StripTrailingWhitespace: Yes 17 | -------------------------------------------------------------------------------- /src/weathergen/global.R: -------------------------------------------------------------------------------- 1 | #################### GLOBAL R SETTINGS ######################################### 2 | 3 | # Set library location (prevents errors if another R.exe is already installed in the system) 4 | .libPaths(paste0(Sys.getenv("R_HOME"),"/library")) 5 | 6 | # General options 7 | options(warn = -1) # Disable warnings 8 | 9 | # Disable S3 method overwritten message 10 | Sys.setenv(`_R_S3_METHOD_REGISTRATION_NOTE_OVERWRITES_` = "false") 11 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | ## Issue addressed 2 | Fixes # 3 | 4 | ## Explanation 5 | Explain how you addressed the bug/feature request, what choices you made and why. 6 | 7 | ## Checklist 8 | - [ ] Updated tests or added new tests 9 | - [ ] Branch is up to date with `main` 10 | - [ ] Tests pass locally 11 | - [ ] Black formatting pass locally 12 | - [ ] Files used by CST API and dashboard are not impacted by the changes 13 | 14 | ## Additional Notes (optional) 15 | Add any additional notes or information that may be helpful also if CST files were impacted. 16 | -------------------------------------------------------------------------------- /config/snake_config_projections_isimip3.yml: -------------------------------------------------------------------------------- 1 | # Path to where the wflow model will be stored (include the project name) 2 | project_name: Gabon 3 | project_dir: examples/Gabon 4 | 5 | # Path to the data catalog to use (can be different for Linux/Windows/data stored on P drive or other) 6 | data_sources: config/deltares_data_climate_projections.yml 7 | 8 | # climate models and scenarios 9 | clim_project: isimip3 10 | models: [gfdl] 11 | scenarios: [ssp370] 12 | members: [r1i1p1f1] 13 | 14 | variables: [precip, temp] 15 | 16 | # time horizons 17 | start_month_hyd_year: Oct 18 | historical: 1991-01-01, 2014-12-31 19 | future_horizons: 20 | near: 2021-01-01, 2050-12-31 21 | far: 2071-01-01, 2100-12-31 22 | -------------------------------------------------------------------------------- /config/snake_config_projections_isimip3_linux.yml: -------------------------------------------------------------------------------- 1 | # Path to where the wflow model will be stored (include the project name) 2 | project_name: Gabon 3 | project_dir: examples/Gabon 4 | 5 | # Path to the data catalog to use (can be different for Linux/Windows/data stored on P drive or other) 6 | data_sources: config/deltares_data_climate_projections_linux.yml 7 | 8 | # climate models and scenarios 9 | clim_project: isimip3 10 | models: [gfdl] 11 | scenarios: [ssp370] 12 | members: [r1i1p1f1] 13 | 14 | variables: [precip, temp] 15 | 16 | # time horizons 17 | start_month_hyd_year: Oct 18 | historical: 1991-01-01, 2014-12-31 19 | future_horizons: 20 | near: 2021-01-01, 2050-12-31 21 | far: 2071-01-01, 2100-12-31 22 | -------------------------------------------------------------------------------- /config/wflow_update_waterbodies.yml: -------------------------------------------------------------------------------- 1 | setup_reservoirs: 2 | reservoirs_fn: hydro_reservoirs # source for reservoirs based on GRAND: {hydro_reservoirs}; None to skip 3 | min_area: 1.0 # minimum lake area to consider [km2] 4 | timeseries_fn: jrc # if 'jrc' or 'gww' is used to calculate some reservoir attributes instead of the GRanD and HydroLAKES db. 5 | 6 | setup_lakes: 7 | lakes_fn: hydro_lakes # source for lakes based on hydroLAKES: {hydro_lakes}; None to skip 8 | min_area: 10.0 # minimum reservoir area to consider [km2] 9 | 10 | setup_glaciers: 11 | glaciers_fn: rgi # source for glaciers based on Randolph Glacier Inventory {rgi}; None to skip 12 | min_area: 1.0 # minimum glacier area to consider [km2] -------------------------------------------------------------------------------- /tests/test_cli.py: -------------------------------------------------------------------------------- 1 | """Test some snake command line interface (CLI) for validity of snakefiles.""" 2 | 3 | import os 4 | from os.path import join, dirname, realpath 5 | import subprocess 6 | import pytest 7 | 8 | TESTDIR = dirname(realpath(__file__)) 9 | SNAKEDIR = join(TESTDIR, "..") 10 | 11 | config_fn = join(TESTDIR, "snake_config_model_test.yml") 12 | 13 | _snakefiles = [ 14 | "Snakefile_model_creation", 15 | "Snakefile_climate_projections", 16 | "Snakefile_climate_experiment", 17 | ] 18 | 19 | 20 | @pytest.mark.parametrize("snakefile", _snakefiles) 21 | def test_snakefile_cli(snakefile): 22 | # Test if snake command line runs successfully 23 | # snakemake all -c 1 -s Snakefile_model_creation --configfile tests/snake_config_model_test.yml --dry-run 24 | # move to SNAKEDIR 25 | os.chdir(SNAKEDIR) 26 | cmd = f"snakemake all -c 1 -s {snakefile} --configfile {config_fn} --dry-run" 27 | result = subprocess.run(cmd, shell=True, capture_output=True) 28 | # Check the output of the subprocess command 29 | assert result.returncode == 0 30 | # assert result.stdout == b'Hello, world!\n' 31 | -------------------------------------------------------------------------------- /config/snake_config_projections_cmip6_full.yml: -------------------------------------------------------------------------------- 1 | # Path to where the wflow model will be stored (include the project name) 2 | project_name: Gabon 3 | project_dir: examples/Gabon 4 | 5 | # Path to the data catalog to use (can be different for Linux/Windows/data stored on P drive or other) 6 | data_sources: config/cmip6_data.yml 7 | 8 | # climate models and scenarios 9 | clim_project: cmip6 10 | models: ['IPSL/IPSL-CM6A-LR','SNU/SAM0-UNICON','NCAR/CESM2','NCAR/CESM2-WACCM','INM/INM-CM4-8','INM/INM-CM5-0', 11 | 'NOAA-GFDL/GFDL-ESM4','NCC/NorESM2-LM','NIMS-KMA/KACE-1-0-G','CAS/FGOALS-f3-L','CSIRO-ARCCSS/ACCESS-CM2', 12 | 'NCC/NorESM2-MM','CSIRO/ACCESS-ESM1-5','NCAR/CESM2-WACCM-FV2','NCAR/CESM2-FV2','CMCC/CMCC-CM2-SR5', 13 | 'AS-RCEC/TaiESM1','NCC/NorCPM1','IPSL/IPSL-CM5A2-INCA','CMCC/CMCC-CM2-HR4','CMCC/CMCC-ESM2','IPSL/IPSL-CM6A-LR-INCA', 14 | 'E3SM-Project/E3SM-1-0'] 15 | scenarios: [ssp126, ssp245, ssp370, ssp585] 16 | members: [r1i1p1f1] 17 | 18 | variables: [precip, temp] 19 | 20 | # time horizons 21 | start_month_hyd_year: Jan 22 | historical: 1970, 2010 23 | future_horizons: 24 | near: 2020, 2060 25 | 26 | 27 | 28 | -------------------------------------------------------------------------------- /tests/test_model_creation.py: -------------------------------------------------------------------------------- 1 | """Test functions from the model creation workflow.""" 2 | 3 | import os 4 | from os.path import join, dirname, realpath 5 | import pytest 6 | 7 | from ..src import copy_config_files 8 | 9 | TESTDIR = dirname(realpath(__file__)) 10 | SNAKEDIR = join(TESTDIR, "..") 11 | 12 | config_fn = join(TESTDIR, "snake_config_model_test.yml") 13 | 14 | 15 | def test_copy_config(project_dir, data_sources, model_build_config): 16 | """Test if config files are copied to project_dir/config folder""" 17 | # Call the copy file function 18 | copy_config_files.copy_config_files( 19 | config=config_fn, 20 | output_dir=join(project_dir, "config"), 21 | config_out_name="snake_config_model_creation.yml", 22 | other_config_files=[data_sources, model_build_config], 23 | ) 24 | 25 | # Check if config files are copied to project_dir/config folder 26 | assert os.path.exists(f"{project_dir}/config/snake_config_model_creation.yml") 27 | assert os.path.exists(f"{project_dir}/config/wflow_build_model.yml") 28 | assert os.path.exists(f"{project_dir}/config/tests_data_catalog.yml") 29 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: cst 2 | 3 | channels: 4 | - conda-forge 5 | - r 6 | 7 | # notes R packages stats and utils were not found 8 | dependencies: 9 | - cartopy 10 | - datrie # for snakemake 11 | - dvc 12 | - descartes 13 | - flit>=3.2 14 | - gcsfs>=2023.12.1 15 | - gdal>=3.1 16 | - geopandas>=0.10 17 | - graphviz # for snakemake 18 | - hydromt>=0.9.4 19 | - hydromt_wflow>=0.4.1, <0.6 20 | - jupyter # to run examples 21 | - matplotlib 22 | - numpy 23 | - pandas>=2.0.0 24 | - pip 25 | - python>=3.9, <3.12 26 | - r-base 27 | - r-essentials 28 | - r-akima 29 | - r-patchwork 30 | - r-doparallel 31 | - r-devtools 32 | - r-dplyr 33 | - r-e1071 34 | - r-fitdistrplus 35 | - r-forecast 36 | - r-ggplot2 37 | - r-MASS 38 | - r-ncdf4 39 | - r-scales 40 | - r-sf 41 | - r-tibble 42 | - r-tidyr 43 | - r-r.utils 44 | - r-yaml 45 | - rpy2 46 | - rtools # only needed for windows 47 | - scipy 48 | - seaborn # plot 49 | - shapely>=2.0.0 50 | - tabulate==0.8.10 # snakemake dependency, bug in v0.9.0 51 | - xarray<=2024.3.0 52 | - xclim 53 | - zarr 54 | - pip: 55 | - hydroengine 56 | - gwwapi 57 | - snakemake -------------------------------------------------------------------------------- /environment_linux.yml: -------------------------------------------------------------------------------- 1 | name: cst 2 | 3 | channels: 4 | - conda-forge 5 | - r 6 | 7 | # notes R packages stats and utils were not found 8 | dependencies: 9 | - python>=3.9, <=3.12 10 | - cartopy 11 | - datrie # for snakemake 12 | - dvc 13 | - descartes 14 | - flit>=3.2 15 | - gcsfs>=2023.12.1 16 | - gdal>=3.1 17 | - geopandas>=0.10 18 | - graphviz # for snakemake 19 | - hydromt>=0.9.4, <=0.10 20 | - hydromt_wflow>=0.4.1, <0.6 21 | - jupyter # to run examples 22 | - matplotlib 23 | - numpy 24 | - pandas>=2.0.0 25 | - pip 26 | - proj 27 | - r-base 28 | - r-essentials 29 | - r-akima 30 | - r-patchwork 31 | - r-doparallel 32 | - r-devtools 33 | - r-dplyr 34 | - r-e1071 35 | - r-fitdistrplus 36 | - r-forecast 37 | - r-ggplot2 38 | - r-MASS 39 | - r-ncdf4 40 | - r-scales 41 | - r-sf 42 | - r-tibble 43 | - r-tidyr 44 | - r-r.utils 45 | - r-yaml 46 | - rpy2 47 | - scipy 48 | - seaborn # plot 49 | - shapely>=2.0.0 50 | - tabulate==0.8.10 # snakemake dependency, bug in v0.9.0 51 | - xarray<=2024.3.0 52 | - xclim 53 | - zarr 54 | - pip: 55 | - hydroengine 56 | - gwwapi 57 | - snakemake 58 | -------------------------------------------------------------------------------- /run_snake_docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | workflow_image="cst-workflow:0.0.1" 3 | docker_root='/root/work' 4 | volumeargs=( 5 | "-v $(pwd)/config:${docker_root}/config" 6 | "-v $(pwd)/examples:${docker_root}/examples" 7 | "-v $(pwd)/data:${docker_root}/data" 8 | "-v /mnt/p/wflow_global/hydromt:/mnt/p/wflow_global/hydromt" 9 | "-v $(pwd)/.snakemake:${docker_root}/.snakemake" 10 | ) 11 | 12 | docker run \ 13 | $(echo ${volumeargs[@]}) \ 14 | --privileged \ 15 | --entrypoint='' \ 16 | ${workflow_image} \ 17 | snakemake all \ 18 | -F \ 19 | -c 4 \ 20 | -s ${docker_root}/Snakefile_model_creation \ 21 | --configfile ${docker_root}/config/snake_config_model_test_linux.yml 22 | 23 | docker run \ 24 | $(echo ${volumeargs[@]}) \ 25 | --privileged \ 26 | --entrypoint='' \ 27 | ${workflow_image} \ 28 | snakemake all \ 29 | -F \ 30 | -c 4 \ 31 | -s ${docker_root}/Snakefile_climate_experiment \ 32 | --configfile ${docker_root}/config/snake_config_model_test_linux.yml 33 | 34 | docker run \ 35 | $(echo ${volumeargs[@]}) \ 36 | --privileged \ 37 | --entrypoint='' \ 38 | ${workflow_image} \ 39 | snakemake all \ 40 | -F \ 41 | -c 4 \ 42 | -s ${docker_root}/Snakefile_climate_projections \ 43 | --configfile ${docker_root}/config/snake_config_model_test_linux.yml 44 | -------------------------------------------------------------------------------- /config/snake_config_projections_cmip5_full_linux.yml: -------------------------------------------------------------------------------- 1 | # Path to where the wflow model will be stored (include the project name) 2 | project_name: Gabon 3 | project_dir: examples/Gabon 4 | 5 | # Path to the data catalog to use (can be different for Linux/Windows/data stored on P drive or other) 6 | data_sources: config/deltares_data_climate_projections_linux.yml 7 | 8 | # climate models and scenarios 9 | clim_project: cmip5 10 | models: ['IPSL-CM5A-LR', 'ACCESS1-0', 'ACCESS1-3', 'BNU-ESM', 'CCSM4', 'CESM1-BGC', 11 | 'CESM1-CAM5', 'CESM1-FASTCHEM', 'CESM1-WACCM', 'CMCC-CESM', 12 | 'CMCC-CM', 'CMCC-CMS', 'CNRM-CM5', 'CNRM-CM5-2', 'CSIRO-Mk3-6-0', 13 | 'CanCM4', 'CanESM2', 'EC-EARTH', 'FGOALS-g2', 'FIO-ESM', 14 | 'GFDL-CM2p1', 'GFDL-CM3', 'GFDL-ESM2G', 'GFDL-ESM2M', 'GISS-E2-H', 15 | 'GISS-E2-H-CC', 'GISS-E2-R', 'GISS-E2-R-CC', 'HadCM3', 16 | 'HadGEM2-AO', 'HadGEM2-CC', 'HadGEM2-ES', 'IPSL-CM5A-LR', 17 | 'IPSL-CM5A-MR', 'IPSL-CM5B-LR', 'MIROC-ESM', 'MIROC-ESM-CHEM', 18 | 'MIROC4h', 'MIROC5', 'MPI-ESM-LR', 'MPI-ESM-MR', 'MPI-ESM-P', 19 | 'MRI-CGCM3', 'MRI-ESM1', 'NorESM1-M', 'NorESM1-ME', 'bcc-csm1-1', 20 | 'bcc-csm1-1-m', 'inmcm4'] 21 | scenarios: [rcp26, rcp45, rcp60, rcp85] 22 | members: [r1i1p1] 23 | 24 | variables: [precip, temp, temp_min, temp_max] 25 | 26 | # time horizons 27 | start_month_hyd_year: Jan 28 | historical: 1960, 2000 29 | future_horizons: 30 | near: 2020, 2060 31 | -------------------------------------------------------------------------------- /config/snake_config_projections_cmip5_full.yml: -------------------------------------------------------------------------------- 1 | # Path to where the wflow model will be stored (include the project name) 2 | project_name: Gabon 3 | project_dir: examples/Gabon 4 | 5 | # Path to the data catalog to use (can be different for Linux/Windows/data stored on P drive or other) 6 | data_sources: config/deltares_data_climate_projections.yml 7 | 8 | # climate models and scenarios 9 | clim_project: cmip5 10 | models: ['IPSL-CM5A-LR', 'ACCESS1-0', 'ACCESS1-3', 'BNU-ESM', 'CCSM4', 'CESM1-BGC', 11 | 'CESM1-CAM5', 'CESM1-FASTCHEM', 'CESM1-WACCM', 'CMCC-CESM', 12 | 'CMCC-CM', 'CMCC-CMS', 'CNRM-CM5', 'CNRM-CM5-2', 'CSIRO-Mk3-6-0', 13 | 'CanCM4', 'CanESM2', 'EC-EARTH', 'FGOALS-g2', 'FIO-ESM', 14 | 'GFDL-CM2p1', 'GFDL-CM3', 'GFDL-ESM2G', 'GFDL-ESM2M', 'GISS-E2-H', 15 | 'GISS-E2-H-CC', 'GISS-E2-R', 'GISS-E2-R-CC', 'HadCM3', 16 | 'HadGEM2-AO', 'HadGEM2-CC', 'HadGEM2-ES', 'IPSL-CM5A-LR', 17 | 'IPSL-CM5A-MR', 'IPSL-CM5B-LR', 'MIROC-ESM', 'MIROC-ESM-CHEM', 18 | 'MIROC4h', 'MIROC5', 'MPI-ESM-LR', 'MPI-ESM-MR', 'MPI-ESM-P', 19 | 'MRI-CGCM3', 'MRI-ESM1', 'NorESM1-M', 'NorESM1-ME', 'bcc-csm1-1', 20 | 'bcc-csm1-1-m', 'inmcm4'] 21 | scenarios: [rcp26, rcp45, rcp60, rcp85] 22 | members: [r1i1p1] 23 | 24 | variables: [precip, temp, temp_min, temp_max] 25 | 26 | # time horizons 27 | start_month_hyd_year: Jan 28 | historical: 1960, 2000 29 | future_horizons: 30 | near: 2020, 2060 31 | 32 | 33 | 34 | -------------------------------------------------------------------------------- /run_snake_test.cmd: -------------------------------------------------------------------------------- 1 | call activate blueearth-cst 2 | 3 | rem Snakefile_model_creation 4 | snakemake -s Snakefile_model_creation --configfile config/snake_config_model_test.yml --dag | dot -Tpng > dag_model.png 5 | snakemake --unlock -s Snakefile_model_creation --configfile config/snake_config_model_test.yml 6 | snakemake all -c 1 -s Snakefile_model_creation --configfile config/snake_config_model_test.yml 7 | 8 | rem Snakefile climate_projections 9 | snakemake -s Snakefile_climate_projections --configfile config/snake_config_model_test.yml --dag | dot -Tpng > dag_projections.png 10 | snakemake --unlock -s Snakefile_climate_projections --configfile config/snake_config_model_test.yml 11 | snakemake all -c 1 -s Snakefile_climate_projections --configfile config/snake_config_model_test.yml --keep-going 12 | 13 | rem Snakefile_climate_experiment 14 | snakemake -s Snakefile_climate_experiment --configfile config/snake_config_model_test.yml --dag | dot -Tpng > dag_climate.png 15 | snakemake --unlock -s Snakefile_climate_experiment --configfile config/snake_config_model_test.yml 16 | snakemake all -c 1 -s Snakefile_climate_experiment --configfile config/snake_config_model_test.yml 17 | 18 | rem snakemake -s Snakefile_model_creation all -c 1 --keep-going --until add_gauges --report --dryrun 19 | rem keep going is when parallel runs to keep going parallel if one series goes wrong 20 | rem dryrun is to tell what it will be doing without actually running 21 | rem until - still the whole workflow but not all jobs 22 | rem --delete-temp-output - delete the temp files after the run 23 | pause 24 | -------------------------------------------------------------------------------- /config/weathergen_config.yml: -------------------------------------------------------------------------------- 1 | 2 | ########################################################################################### 3 | ################################## Weather Generator Advanced settings #################### 4 | 5 | 6 | general: 7 | variables: [precip, temp, temp_min, temp_max] 8 | 9 | generateWeatherSeries: 10 | warm.signif.level: 0.80 # Low-frequency signal statistical significance level (suggested: 0.80 - 0.95) 11 | warm.sample.num: 25000 # Number of initial annual precip series to be generated (suggested: 10,000 - 30,000) 12 | knn.sample.num: 100 # Number of repeated samples for each simulated year (suggested: 100-120) 13 | month.start: 1 # First month of the hydrological year (1 to 12) 14 | warm.variable: precip # Variable to be used for wavelet analysis & modeling (do not change) 15 | mc.wet.quantile: 0.2 # quantile threshold to define wet weather 16 | mc.extreme.quantile: 0.8 # quantile threshold to define very wet weather 17 | seed: 123 # Randomization seed (needs to be an integer value) 18 | evaluate.model: TRUE # Should performance plots be generated? 19 | evaluate.grid.num: 20 # Number of grids to be sampled for the comparison plots (for speed reasons, keep <50) 20 | compute.parallel: FALSE # Should the code run in parallel mode? 21 | dry.spell.change: [1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0] # monthly coefficient to adjust dry spell length 22 | wet.spell.change: [1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0] # monthly coefficient to adjust wet spell length 23 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # julia intermediate 2 | ARG julia_version=1.8.2 3 | FROM julia:${julia_version} AS jul 4 | 5 | FROM alpine:latest AS local_files 6 | 7 | WORKDIR /root/code 8 | ADD src src 9 | ADD Snakefile_model_creation Snakefile_model_creation 10 | ADD Snakefile_climate_experiment Snakefile_climate_experiment 11 | ADD Snakefile_climate_projections Snakefile_climate_projections 12 | 13 | # Python env 14 | FROM condaforge/mambaforge:4.14.0-0 15 | 16 | WORKDIR /root/work 17 | 18 | COPY environment_linux.yml environment.yaml 19 | RUN mamba env create -f environment.yaml -n snakemake -q \ 20 | && . activate snakemake \ 21 | && conda clean --all -y \ 22 | && echo "source activate snakemake" > ~/.bashrc \ 23 | && rm environment.yaml 24 | ENV PATH=/opt/conda/envs/snakemake/bin:${PATH} 25 | # to avoid "ERROR 1: PROJ: proj_create_from_database: Open of /opt/conda/share/proj failed" 26 | ENV PROJ_LIB=/opt/conda/envs/snakemake/share/proj 27 | 28 | # Julia wflow 29 | ENV DEBIAN_FRONTEND="noninteractive" TZ="Europe/Amsterdam" 30 | RUN apt-get update -y \ 31 | && apt-get install -y \ 32 | build-essential libatomic1 gfortran perl wget m4 cmake pkg-config curl \ 33 | git \ 34 | && rm -rf /var/lib/apt/lists/* 35 | 36 | COPY --from=jul /usr/local/julia /opt/julia 37 | 38 | ENV PATH=/opt/julia/bin:${PATH} 39 | 40 | RUN julia -e "import Pkg; Pkg.add(\"Wflow\"); using Wflow;" 41 | 42 | COPY src/weathergen/install_rpackages.R /tmp/install_rpackages.R 43 | 44 | RUN Rscript -e "install.packages('devtools',repos = 'http://cran.us.r-project.org')" 45 | RUN Rscript /tmp/install_rpackages.R 46 | 47 | COPY --from=local_files /root/code /root/work/ 48 | -------------------------------------------------------------------------------- /config/wflow_build_model.yml: -------------------------------------------------------------------------------- 1 | read_config: # read user-defined config template in build mode instead of the default 2 | config_fn: "./wflow_sbm.toml" 3 | 4 | setup_basemaps: 5 | hydrography_fn: merit_hydro_ihu # source hydrography data {merit_hydro, merit_hydro_1k} 6 | basin_index_fn: merit_hydro_index # source of basin index corresponding to hydrography_fn 7 | upscale_method: ihu # upscaling method for flow direction data, by default 'ihu' 8 | 9 | setup_rivers: 10 | hydrography_fn: merit_hydro_ihu # source hydrography data, should match basemaps source 11 | river_geom_fn: rivers_lin2019_v1 # river source data with river width and bankfull discharge 12 | river_upa: 32 # minimum upstream area threshold for the river map [km2] 13 | slope_len: 2000 # length over which tp calculate river slope [m] 14 | rivdph_method: powlaw # method to estimate depth {'powlaw', 'manning', 'gvf'} 15 | min_rivdph: 0.5 # minimum river depth [m] 16 | min_rivwth: 30 # minimum river width [m] 17 | smooth_len: 5000 # length over which to smooth river depth and river width [m] 18 | 19 | setup_lulcmaps: 20 | lulc_fn: vito # source for lulc maps: {globcover, vito, corine} 21 | 22 | setup_laimaps: 23 | lai_fn: modis_lai # source for LAI: {modis_lai} 24 | 25 | setup_soilmaps: 26 | soil_fn: soilgrids # source for soilmaps: {soilgrids} 27 | ptf_ksatver: brakensiek # pedotransfer function to calculate hydraulic conductivity: {brakensiek, cosby} 28 | 29 | setup_constant_pars: 30 | KsatHorFrac: 100 31 | Cfmax: 3.75653 32 | cf_soil: 0.038 33 | EoverR: 0.11 34 | InfiltCapPath: 5 35 | InfiltCapSoil: 600 36 | MaxLeakage: 0 37 | rootdistpar: -500 38 | TT: 0 39 | TTI: 2 40 | TTM: 0 41 | WHC: 0.1 42 | G_Cfmax: 5.3 43 | G_SIfrac: 0.002 44 | G_TT: 1.3 -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | """Global test attributes and fixtures""" 2 | 3 | import os 4 | from os.path import join, dirname, realpath 5 | import yaml 6 | import pytest 7 | 8 | TESTDIR = dirname(realpath(__file__)) 9 | SNAKEDIR = join(TESTDIR, "..") 10 | 11 | config_fn = join(TESTDIR, "snake_config_model_test.yml") 12 | 13 | 14 | # Function to get argument from config file and return default value if not found 15 | def get_config(config, arg, default=None, optional=True): 16 | """ 17 | Function to get argument from config file and return default value if not found 18 | 19 | Parameters 20 | ---------- 21 | config : dict 22 | config file 23 | arg : str 24 | argument to get from config file 25 | default : str/int/float/list, optional 26 | default value if argument not found, by default None 27 | optional : bool, optional 28 | if True, argument is optional, by default True 29 | """ 30 | if arg in config: 31 | return config[arg] 32 | elif optional: 33 | return default 34 | else: 35 | raise ValueError(f"Argument {arg} not found in config file") 36 | 37 | 38 | @pytest.fixture() 39 | def config(): 40 | """Return config dictionary""" 41 | with open(config_fn, "rb") as f: 42 | cfdict = yaml.safe_load(f) 43 | return cfdict 44 | 45 | 46 | @pytest.fixture() 47 | def project_dir(config): 48 | """Return project directory""" 49 | project_dir = get_config(config, "project_dir", optional=False) 50 | project_dir = join(SNAKEDIR, project_dir) 51 | return project_dir 52 | 53 | 54 | @pytest.fixture() 55 | def data_sources(config): 56 | """Return data sources""" 57 | data_sources = get_config(config, "data_sources", optional=False) 58 | data_sources = join(SNAKEDIR, data_sources) 59 | return data_sources 60 | 61 | 62 | @pytest.fixture() 63 | def model_build_config(config): 64 | """Return model build config""" 65 | model_build_config = get_config(config, "model_build_config", optional=False) 66 | model_build_config = join(SNAKEDIR, model_build_config) 67 | return model_build_config 68 | -------------------------------------------------------------------------------- /src/weathergen/impose_climate_change.R: -------------------------------------------------------------------------------- 1 | 2 | 3 | # GENERAL STRESS TEST PARAMETERS ############################################### 4 | 5 | # General R settings and prequisites 6 | source("./src/weathergen/global.R") 7 | 8 | args <- commandArgs(trailingOnly = TRUE) 9 | 10 | # Pass command line options 11 | # Config file 12 | yaml <- yaml::read_yaml(args[2]) 13 | # Stochastic weather realization to be perturbed 14 | rlz_fn <- args[1] 15 | rlz_input <- weathergenr::readNetcdf(rlz_fn, leap.days = FALSE) 16 | # Climate stress file 17 | cst_data <- read.csv(args[3]) 18 | 19 | 20 | # General stress test parameters 21 | output_path <- yaml$imposeClimateChanges$output.path 22 | nc_file_prefix <- yaml$imposeClimateChanges$nc.file.prefix 23 | nc_file_suffix <- yaml$imposeClimateChanges$nc.file.suffix 24 | 25 | # temp_change_type/precip_change_type [string] 26 | temp_change_transient = yaml$temp$transient_change 27 | precip_change_transient = yaml$precip$transient_change 28 | 29 | 30 | # PARAMETERS CHANGING PER RUN ################################################## 31 | 32 | # Apply climate changes to baseline weather data stored in the nc file 33 | rlz_future <- weathergenr::imposeClimateChanges( 34 | climate.data = rlz_input$data, 35 | climate.grid = rlz_input$grid, 36 | sim.dates = rlz_input$date, 37 | change.factor.precip.mean = cst_data$precip_mean , 38 | change.factor.precip.variance = cst_data$precip_variance, 39 | change.factor.temp.mean = cst_data$temp_mean, 40 | transient.temp.change = temp_change_transient, 41 | transient.precip.change = precip_change_transient, 42 | calculate.pet = TRUE, 43 | compute.parallel = FALSE, 44 | num.cores = NULL, 45 | fit.method = "mme" 46 | ) 47 | 48 | # Save to netcdf file 49 | weathergenr::writeNetcdf( 50 | data = rlz_future, 51 | coord.grid = rlz_input$grid, 52 | output.path = output_path, 53 | origin.date = rlz_input$date[1], 54 | calendar.type = "noleap", 55 | nc.template.file = rlz_fn, 56 | nc.compression = 4, 57 | nc.spatial.ref = "spatial_ref", 58 | nc.file.prefix = nc_file_prefix, 59 | nc.file.suffix = nc_file_suffix 60 | ) 61 | 62 | 63 | ################################################################################ 64 | 65 | -------------------------------------------------------------------------------- /src/metrics_definition.py: -------------------------------------------------------------------------------- 1 | import xarray as xr 2 | from xclim.indices.stats import frequency_analysis 3 | 4 | 5 | ## High flows 6 | def returninterval(df, T): 7 | ds = xr.Dataset.from_dataframe(df) 8 | ds.attrs["units"] = "m3/d" 9 | Q_interval = frequency_analysis(ds, t=T, dist="genextreme", mode="max", freq="YS") 10 | df_interval = xr.Dataset.to_dataframe(Q_interval) 11 | return df_interval.transpose().iloc[:, 0] 12 | 13 | 14 | def returnintervalmulti(df): 15 | ds = xr.Dataset.from_dataframe(df) 16 | ds.attrs["units"] = "m3/d" 17 | all_T = [2, 5, 10, 20, 50, 100, 200] 18 | Q_rps = frequency_analysis(ds, t=all_T, dist="genextreme", mode="max", freq="YS") 19 | return Q_rps 20 | 21 | 22 | def Q7d_maxyear(df): 23 | return df.rolling(7).mean().resample("YE").max().mean() 24 | 25 | 26 | def Q7d_total(df): 27 | return df.rolling(7).mean() 28 | 29 | 30 | def highpulse(df): 31 | return df[df > df.quantile(0.75)].resample("YE").count().mean() 32 | 33 | 34 | def wetmonth_mean(df): 35 | monthlysum = df.groupby(df.index.month).sum() 36 | wetmonth = monthlysum.idxmax().iloc[0] 37 | df_wetmonth = df[df.index.month == wetmonth] 38 | return df_wetmonth.resample("YE").mean().mean() 39 | 40 | 41 | ## Low flows 42 | def returninterval_Q7d(df, T): 43 | df7D = df.rolling(7).mean() 44 | ds = xr.Dataset.from_dataframe(df7D) 45 | ds.attrs["units"] = "m3/d" 46 | Q_interval = frequency_analysis(ds, t=T, dist="genextreme", mode="min", freq="YS") 47 | df_interval = xr.Dataset.to_dataframe(Q_interval) 48 | return df_interval.transpose().iloc[:, 0] 49 | 50 | 51 | def Q7d_min(df): 52 | return df.rolling(7).mean().resample("YE").min().mean() 53 | 54 | 55 | def lowpulse(df): 56 | return df[df < df.quantile(0.25)].resample("YE").count().mean() 57 | 58 | 59 | def drymonth_mean(df): 60 | monthlysum = df.groupby(df.index.month).sum() 61 | drymonth = monthlysum.idxmin().iloc[0] 62 | df_drymonth = df[df.index.month == drymonth] 63 | return df_drymonth.resample("YE").mean().mean() 64 | 65 | 66 | def BFI(df): 67 | Q7d = df.rolling(7).mean().resample("YE").min() 68 | annmean = df.resample("YE").mean() 69 | return (Q7d / annmean).mean() 70 | -------------------------------------------------------------------------------- /src/prepare_weagen_config.py: -------------------------------------------------------------------------------- 1 | import os 2 | import math 3 | import yaml 4 | 5 | # Snakemake config file 6 | yml_snake = snakemake.params.snake_config 7 | weagen_config = snakemake.output.weagen_config 8 | cftype = snakemake.params.cftype 9 | 10 | 11 | def read_yml(yml_fn): 12 | """ "Read yml file and return a dictionnary""" 13 | with open(yml_fn, "r") as stream: 14 | yml = yaml.load(stream, Loader=yaml.FullLoader) 15 | return yml 16 | 17 | 18 | print(f"Preparing and writting the weather generator config file {weagen_config}") 19 | 20 | # Read existing config file 21 | yml_snake = read_yml(yml_snake) 22 | 23 | if cftype == "generate": 24 | # Get the simulation years 25 | middle_year = snakemake.params.middle_year 26 | wflow_run_length = snakemake.params.sim_years 27 | # Compute number of years needed based on the wflow run length and horizon and end of historical period in 2010 28 | nr_years_weagen = math.ceil((middle_year + wflow_run_length / 2) - 2010 + 2) 29 | 30 | # arguments from the default weagen config file 31 | yml_dict = read_yml(snakemake.params.default_config) 32 | # add new arguments from snakemake and yml_snake 33 | yml_add = { 34 | "output.path": snakemake.params.output_path, 35 | "sim.year.start": 2010, 36 | "sim.year.num": nr_years_weagen, 37 | "nc.file.prefix": snakemake.params.nc_file_prefix, 38 | "realizations_num": yml_snake["realizations_num"], 39 | } 40 | for k, v in yml_add.items(): 41 | yml_dict["generateWeatherSeries"][k] = v 42 | 43 | else: # stress test 44 | # new arguments 45 | yml_dict = { 46 | "imposeClimateChanges": { 47 | "output.path": snakemake.params.output_path, 48 | "nc.file.prefix": snakemake.params.nc_file_prefix, 49 | "nc.file.suffix": snakemake.params.nc_file_suffix, 50 | } 51 | } 52 | # arguments from yml_snake 53 | yml_dict["temp"] = yml_snake["temp"] 54 | yml_dict["precip"] = yml_snake["precip"] 55 | 56 | # Write the new weagen config 57 | if not os.path.isdir(os.path.dirname(weagen_config)): 58 | os.makedirs(os.path.dirname(weagen_config)) 59 | with open(weagen_config, "w") as f: 60 | yaml.dump(yml_dict, f, default_flow_style=False, sort_keys=False) 61 | -------------------------------------------------------------------------------- /pixi.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | authors = ["Sam Vente "] 3 | channels = ["conda-forge", "r"] 4 | description = "Add a short description here" 5 | name = "cst" 6 | platforms = ["win-64", "linux-64"] 7 | version = "0.1.0" 8 | 9 | [tasks] 10 | model-creation = { cmd = ["snakemake","all","-c","1","-s","Snakefile_model_creation","--configfile","config/snake_config_model_test.yml"]} 11 | climate-projections = {cmd = ["snakemake","all","-c","1","-s","Snakefile_climate_projections","--configfile","config/snake_config_model_test.yml","--keep-going"]} 12 | climate-experiment = {cmd = ["snakemake","all","-c","1","-s","Snakefile_climate_experiment","--configfile","config/snake_config_model_test.yml"]} 13 | 14 | docker-build = {cmd = ["docker", "build", "-t", "cst-workflow:$CST_WORKFLOW_VERSION"], env = {CST_WORKFLOW_VERSION="0.0.1"}} 15 | docker-push = {cmd = [" docker push https://containers.deltares.nl/CST/cst_workflows:$CST_WORKFLOW_TAG"], env = {CST_WORKFLOW_TAG = "latest"}, depends-on = ["docker-tag"]} 16 | docker-publish = {depends-on = ["docker-push"]} 17 | docker-tag = {cmd = ["docker", "tag", "cst-workflow:0.0.1", "https://containers.deltares.nl/CST/cst_workflows:$CST_WORKFLOW_TAG"], env = {CST_WORKFLOW_TAG = "latest"}, depends-on = ["docker-build"]} 18 | 19 | [target.win-64.dependencies] 20 | rtools = "*" 21 | 22 | [dependencies] 23 | cartopy = "*" 24 | datrie = "*" 25 | dvc = "*" 26 | descartes = "*" 27 | flit = ">=3.2" 28 | gcsfs = ">=2023.12.1" 29 | gdal = ">=3.1" 30 | geopandas = ">=0.10" 31 | graphviz = "*" 32 | hydromt = ">=0.9.4,<1.0" 33 | hydromt_wflow = ">=0.4.1,<0.6" 34 | jupyter = "*" 35 | matplotlib = "*" 36 | numpy = "*" 37 | pandas = ">=2.0.0" 38 | pip = "*" 39 | python = ">=3.9,<3.12" 40 | r-base = "*" 41 | r-essentials = "*" 42 | r-akima = "*" 43 | r-patchwork = "*" 44 | r-doparallel = "*" 45 | r-devtools = "*" 46 | r-dplyr = "*" 47 | r-e1071 = "*" 48 | r-fitdistrplus = "*" 49 | r-forecast = "*" 50 | r-ggplot2 = "*" 51 | r-mass = "*" 52 | r-ncdf4 = "*" 53 | r-scales = "*" 54 | r-sf = "*" 55 | r-tibble = "*" 56 | r-tidyr = "*" 57 | "r-r.utils" = "*" 58 | r-yaml = "*" 59 | rpy2 = "*" 60 | scipy = "*" 61 | seaborn = "*" 62 | shapely = ">=2.0.0" 63 | tabulate = "==0.8.10" 64 | xarray = "<=2024.3.0" 65 | xclim = "*" 66 | zarr = "*" 67 | 68 | [pypi-dependencies] 69 | hydroengine = "*" 70 | gwwapi = "*" 71 | snakemake = "*" 72 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # file based on github/gitignore 2 | 3 | # Byte-compiled / optimized / DLL files 4 | __pycache__/ 5 | *.py[cod] 6 | *$py.class 7 | 8 | # C extensions 9 | *.so 10 | 11 | # Distribution / packaging 12 | .Python 13 | config/archieve/ 14 | build/ 15 | develop-eggs/ 16 | dist/ 17 | downloads/ 18 | eggs/ 19 | .eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | wheels/ 26 | *.egg-info/ 27 | .installed.cfg 28 | *.egg 29 | MANIFEST 30 | 31 | # PyInstaller 32 | # Usually these files are written by a python script from a template 33 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 34 | *.manifest 35 | #*.spec 36 | 37 | # Installer logs 38 | pip-log.txt 39 | pip-delete-this-directory.txt 40 | 41 | # Unit test / coverage reports 42 | htmlcov/ 43 | .tox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | .pytest_cache 48 | nosetests.xml 49 | coverage.xml 50 | *.cover 51 | .hypothesis/ 52 | 53 | # Translations 54 | *.mo 55 | *.pot 56 | 57 | # Django stuff: 58 | *.log 59 | .static_storage/ 60 | .media/ 61 | local_settings.py 62 | 63 | # Flask stuff: 64 | instance/ 65 | .webassets-cache 66 | 67 | # Scrapy stuff: 68 | .scrapy 69 | 70 | # Sphinx documentation 71 | docs/_build/ 72 | docs/generated/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # pyenv 81 | .python-version 82 | 83 | # celery beat schedule file 84 | celerybeat-schedule 85 | 86 | # SageMath parsed files 87 | *.sage.py 88 | 89 | # Environments 90 | .env 91 | .venv 92 | env/ 93 | venv/ 94 | ENV/ 95 | env.bak/ 96 | venv.bak/ 97 | 98 | # Spyder project settings 99 | .spyderproject 100 | .spyproject 101 | 102 | # VScode 103 | .vscode 104 | 105 | # PyCharm 106 | .idea 107 | 108 | # Rope project settings 109 | .ropeproject 110 | 111 | # mkdocs documentation 112 | /site 113 | 114 | # mypy 115 | .mypy_cache/ 116 | 117 | # dask 118 | dask-worker-space/ 119 | 120 | # snakemake 121 | .snakemake/ 122 | 123 | # Examples 124 | examples/ 125 | .Rproj.user 126 | 127 | # R-related files 128 | /config/reticulate_config.R 129 | 130 | # Images 131 | *.png 132 | !/docs/_images/*.png 133 | 134 | # Tests 135 | tests/test_project 136 | 137 | # pixi environments 138 | .pixi 139 | *.egg-info 140 | -------------------------------------------------------------------------------- /tests/wflow_build_model.yml: -------------------------------------------------------------------------------- 1 | read_config: # read user-defined config template in build mode instead of the default 2 | config_fn: "../config/wflow_sbm.toml" 3 | 4 | setup_basemaps: 5 | hydrography_fn: merit_hydro_ihu # source hydrography data {merit_hydro, merit_hydro_1k} 6 | basin_index_fn: merit_hydro_index # source of basin index corresponding to hydrography_fn 7 | upscale_method: ihu # upscaling method for flow direction data, by default 'ihu' 8 | 9 | setup_rivers: 10 | hydrography_fn: merit_hydro_ihu # source hydrography data, should match basemaps source 11 | river_geom_fn: rivers_lin2019_v1 # river source data with river width and bankfull discharge 12 | river_upa: 32 # minimum upstream area threshold for the river map [km2] 13 | slope_len: 2000 # length over which tp calculate river slope [m] 14 | rivdph_method: powlaw # method to estimate depth {'powlaw', 'manning', 'gvf'} 15 | min_rivdph: 0.5 # minimum river depth [m] 16 | min_rivwth: 30 # minimum river width [m] 17 | smooth_len: 5000 # length over which to smooth river depth and river width [m] 18 | 19 | #setup_reservoirs: 20 | # reservoirs_fn: hydro_reservoirs # source for reservoirs based on GRAND: {hydro_reservoirs}; None to skip 21 | # min_area: 1.0 # minimum lake area to consider [km2] 22 | # timeseries_fn: hydroengine # if hydroengine is used to calculate some reservoir attributes instead of the GRanD and HydroLAKES db. 23 | 24 | #setup_lakes: 25 | # lakes_fn: hydro_lakes # source for lakes based on hydroLAKES: {hydro_lakes}; None to skip 26 | # min_area: 10.0 # minimum reservoir area to consider [km2] 27 | 28 | #setup_glaciers: 29 | # glaciers_fn: rgi # source for glaciers based on Randolph Glacier Inventory {rgi}; None to skip 30 | # min_area: 1.0 # minimum glacier area to consider [km2] 31 | 32 | setup_lulcmaps: 33 | lulc_fn: vito # source for lulc maps: {globcover, vito, corine} 34 | 35 | setup_laimaps: 36 | lai_fn: modis_lai # source for LAI: {modis_lai} 37 | 38 | setup_soilmaps: 39 | soil_fn: soilgrids # source for soilmaps: {soilgrids} 40 | ptf_ksatver: brakensiek # pedotransfer function to calculate hydraulic conductivity: {brakensiek, cosby} 41 | 42 | setup_constant_pars: 43 | KsatHorFrac: 100 44 | Cfmax: 3.75653 45 | cf_soil: 0.038 46 | EoverR: 0.11 47 | InfiltCapPath: 5 48 | InfiltCapSoil: 600 49 | MaxLeakage: 0 50 | rootdistpar: -500 51 | TT: 0 52 | TTI: 2 53 | TTM: 0 54 | WHC: 0.1 55 | G_Cfmax: 5.3 56 | G_SIfrac: 0.002 57 | G_TT: 1.3 -------------------------------------------------------------------------------- /src/setup_reservoirs_lakes_glaciers.py: -------------------------------------------------------------------------------- 1 | """Function to update a wflow model and add gauges and outputs""" 2 | from hydromt_wflow import WflowModel 3 | from hydromt.exceptions import NoDataException 4 | from hydromt.cli.cli_utils import parse_config 5 | 6 | import os 7 | from os.path import join 8 | from pathlib import Path 9 | from typing import Union 10 | 11 | 12 | def update_wflow_waterbodies_glaciers( 13 | wflow_root: Union[str, Path], 14 | config_fn: Union[str, Path], 15 | data_catalog: Union[str, Path] = "deltares_data", 16 | ): 17 | """ 18 | Update wflow model with reservoirs, lakes and glaciers. 19 | 20 | Write a file when everything is done for snakemake tracking. 21 | 22 | Parameters 23 | ---------- 24 | wflow_root : Union[str, Path] 25 | Path to the wflow model root folder 26 | config_fn : Union[str, Path] 27 | Path to the config file for setup of reservoirs, lakes and glaciers 28 | data_catalog : str 29 | Name of the data catalog to use 30 | """ 31 | 32 | # Instantiate wflow model 33 | mod = WflowModel(wflow_root, mode="r+", data_libs=data_catalog) 34 | 35 | # Read the config file 36 | config = parse_config(config_fn) 37 | 38 | # List of methods that ran successfully 39 | successful_methods = [] 40 | # List of methods that failed 41 | failed_methods = [] 42 | reasons = [] 43 | 44 | # Loop over reservoirs, lakes and glaciers methods 45 | for method in config: 46 | kwargs = {} if config[method] is None else config[method] 47 | try: 48 | mod._run_log_method(method, **kwargs) 49 | successful_methods.append(method) 50 | except (NoDataException, FileNotFoundError) as error: 51 | failed_methods.append(method) 52 | reasons.append(error) 53 | 54 | # Write model if there is any data to add 55 | if len(successful_methods) > 0: 56 | mod.write() 57 | 58 | # Write a file when everything is done for snakemake tracking 59 | text_out = join(wflow_root, "staticgeoms", "reservoirs_lakes_glaciers.txt") 60 | with open(text_out, "w") as f: 61 | f.write(f"Successful methods: {successful_methods}\n") 62 | f.write(f"Failed methods: {failed_methods}\n") 63 | f.write(f"Reasons: {reasons}\n") 64 | 65 | 66 | if __name__ == "__main__": 67 | if "snakemake" in globals(): 68 | sm = globals()["snakemake"] 69 | update_wflow_waterbodies_glaciers( 70 | wflow_root=os.path.dirname(sm.input.basin_nc), 71 | data_catalog=sm.params.data_catalog, 72 | config_fn=sm.params.config, 73 | ) 74 | else: 75 | update_wflow_waterbodies_glaciers( 76 | wflow_root=join(os.getcwd(), "examples", "my_project", "hydrology_model"), 77 | data_catalog="deltares_data", 78 | config_fn=join(os.getcwd(), "config", "wflow_update_waterbodies.yml"), 79 | ) 80 | -------------------------------------------------------------------------------- /config/wflow_sbm.toml: -------------------------------------------------------------------------------- 1 | casename = "wflow_sbm" 2 | calendar = "proleptic_gregorian" 3 | starttime = 2010-02-01T00:00:00 4 | endtime = 2010-02-10T00:00:00 5 | time_units = "days since 1900-01-01 00:00:00" 6 | timestepsecs = 86400 7 | 8 | [state] 9 | path_input = "instate/instates.nc" 10 | path_output = "run_default/outstate/outstates.nc" 11 | 12 | [input] 13 | path_forcing = "inmaps.nc" 14 | path_static = "staticmaps.nc" 15 | gauges = "wflow_gauges" 16 | ldd = "wflow_ldd" 17 | river_location = "wflow_river" 18 | subcatchment = "wflow_subcatch" 19 | forcing = [ "vertical.precipitation", "vertical.temperature", "vertical.potential_evaporation",] 20 | cyclic = [ "vertical.leaf_area_index",] 21 | 22 | [model] 23 | type = "sbm" 24 | masswasting = true 25 | snow = true 26 | reinit = true 27 | reservoirs = false 28 | lakes = false 29 | glacier = false 30 | kin_wave_iteration = true 31 | kw_river_tstep = 900 32 | kw_land_tstep = 3600 33 | thicknesslayers = [ 100, 300, 800,] 34 | 35 | 36 | [csv] 37 | path = "run_default/output.csv" 38 | 39 | [[csv.column]] 40 | header = "Q" 41 | map = "gauges" 42 | parameter = "lateral.river.q_av" 43 | 44 | [[csv.column]] 45 | header = "P" 46 | map = "subcatchment" 47 | parameter = "vertical.precipitation" 48 | reducer = "mean" 49 | 50 | [[csv.column]] 51 | header = "EP" 52 | map = "subcatchment" 53 | parameter = "vertical.potential_evaporation" 54 | reducer = "mean" 55 | 56 | [[csv.column]] 57 | header = "T" 58 | map = "subcatchment" 59 | parameter = "vertical.temperature" 60 | reducer = "mean" 61 | 62 | [state.vertical] 63 | satwaterdepth = "satwaterdepth" 64 | snow = "snow" 65 | tsoil = "tsoil" 66 | ustorelayerdepth = "ustorelayerdepth" 67 | snowwater = "snowwater" 68 | canopystorage = "canopystorage" 69 | 70 | [input.vertical] 71 | altitude = "wflow_dem" 72 | c = "c" 73 | cf_soil = "cf_soil" 74 | cfmax = "Cfmax" 75 | e_r = "EoverR" 76 | f = "f" 77 | infiltcappath = "InfiltCapPath" 78 | infiltcapsoil = "InfiltCapSoil" 79 | kext = "Kext" 80 | "kv₀" = "KsatVer" 81 | leaf_area_index = "LAI" 82 | m = "M_" 83 | maxleakage = "MaxLeakage" 84 | pathfrac = "PathFrac" 85 | potential_evaporation = "pet" 86 | precipitation = "precip" 87 | rootdistpar = "rootdistpar" 88 | rootingdepth = "RootingDepth" 89 | soilminthickness = "SoilMinThickness" 90 | soilthickness = "SoilThickness" 91 | specific_leaf = "Sl" 92 | storage_wood = "Swood" 93 | temperature = "temp" 94 | tt = "TT" 95 | tti = "TTI" 96 | ttm = "TTM" 97 | water_holding_capacity = "WHC" 98 | waterfrac = "WaterFrac" 99 | "θᵣ" = "thetaR" 100 | "θₛ" = "thetaS" 101 | 102 | [state.lateral.river] 103 | q = "q_river" 104 | h = "h_river" 105 | h_av = "h_av_river" 106 | 107 | [state.lateral.subsurface] 108 | ssf = "ssf" 109 | 110 | [state.lateral.land] 111 | q = "q_land" 112 | h = "h_land" 113 | h_av = "h_av_land" 114 | 115 | [input.lateral.river] 116 | length = "wflow_riverlength" 117 | n = "N_River" 118 | slope = "RiverSlope" 119 | width = "wflow_riverwidth" 120 | 121 | [input.lateral.subsurface] 122 | ksathorfrac.value = 100 123 | 124 | [input.lateral.land] 125 | n = "N" 126 | slope = "Slope" 127 | -------------------------------------------------------------------------------- /src/weathergen/generate_weather.R: -------------------------------------------------------------------------------- 1 | # General R settings and prequisites 2 | source("./src/weathergen/global.R") 3 | 4 | # Install required packages -- ONLY ONCE! 5 | # weathergen is assumed to be installed in R-environment 6 | library(yaml) 7 | 8 | args <- commandArgs(trailingOnly = TRUE) 9 | 10 | # Pass command line options 11 | yaml <- yaml::read_yaml(args[2]) 12 | weathergen_input_ncfile <- args[1] 13 | 14 | # Parse global parameters from the yaml configuration file 15 | historical_realizations_num <- yaml$generateWeatherSeries$realizations_num 16 | weathergen_output_path <- yaml$generateWeatherSeries$output.path 17 | 18 | # Step 1) Read weather data from the netcdf file 19 | ncdata <- weathergenr::readNetcdf(weathergen_input_ncfile) 20 | 21 | # Step 2) Generate new weather realizations 22 | stochastic_weather <- weathergenr::generateWeatherSeries( 23 | weather.data = ncdata$data, 24 | weather.grid = ncdata$grid, 25 | weather.date = ncdata$date, 26 | variable.names = yaml$general$variables, 27 | sim.year.num = yaml$generateWeatherSeries$sim.year.num, 28 | sim.year.start = yaml$generateWeatherSeries$sim.year.start, 29 | month.start = yaml$generateWeatherSeries$month.start, 30 | realization.num = historical_realizations_num, 31 | warm.variable = yaml$generateWeatherSeries$warm.variable, 32 | warm.signif.level = yaml$generateWeatherSeries$warm.signif.level, 33 | warm.sample.num = yaml$generateWeatherSeries$warm.sample.num, 34 | # warm.subset.criteria = yaml$generateWeatherSeries$warm.subset.criteria, #not needeed 35 | knn.sample.num = yaml$generateWeatherSeries$knn.sample.num, 36 | mc.wet.quantile = yaml$generateWeatherSeries$mc.wet.quantile, 37 | mc.extreme.quantile = yaml$generateWeatherSeries$mc.extreme.quantile, 38 | dry.spell.change = yaml$generateWeatherSeries$dry.spell.change, 39 | wet.spell.change = yaml$generateWeatherSeries$wet.spell.change, 40 | evaluate.model = yaml$generateWeatherSeries$evaluate.model, 41 | evaluate.grid.num = yaml$generateWeatherSeries$evaluate.grid.num, 42 | output.path = weathergen_output_path, 43 | seed = yaml$generateWeatherSeries$seed, 44 | compute.parallel = yaml$generateWeatherSeries$compute.parallel 45 | ) 46 | 47 | # STEP 3) Save each stochastic realization back to a netcdf file 48 | for (n in 1:historical_realizations_num) { 49 | 50 | # Resample order 51 | day_order <- match(stochastic_weather$resampled[[n]], ncdata$date) 52 | 53 | # Obtain stochastic series by re-ordering historical data 54 | stochastic_rlz <- lapply(ncdata$data, function(x) x[day_order,]) 55 | 56 | # save to netcdf 57 | weathergenr::writeNetcdf( 58 | data = stochastic_rlz, 59 | coord.grid = ncdata$grid, 60 | output.path = paste0(weathergen_output_path,"realization_",n,"/"), 61 | origin.date = stochastic_weather$dates[1], 62 | calendar.type = "noleap", 63 | nc.template.file = weathergen_input_ncfile, 64 | nc.compression = 4, 65 | nc.spatial.ref = "spatial_ref", 66 | nc.file.prefix = yaml$generateWeatherSeries$nc.file.prefix, 67 | nc.file.suffix = paste0(n,"_cst_0") 68 | ) 69 | 70 | } 71 | -------------------------------------------------------------------------------- /src/prepare_cst_parameters.py: -------------------------------------------------------------------------------- 1 | import os 2 | from os.path import join 3 | from pathlib import Path 4 | import pandas as pd 5 | import numpy as np 6 | import yaml 7 | 8 | from typing import Union, List 9 | 10 | 11 | def prep_cst_parameters( 12 | config_fn: Union[str, Path], 13 | csv_fns: List[Union[str, Path]], 14 | ): 15 | """ 16 | Prepare a csv file for each stress test scenario. 17 | 18 | Parameters 19 | ---------- 20 | config_fn : str, Path 21 | Path to the config file 22 | csv_fns : List[str, Path] 23 | List of paths to the output csv files. If None saves in same directory as 24 | config_fn and names from stress test parameters. 25 | """ 26 | 27 | # Read the yaml config 28 | with open(config_fn, "r") as stream: 29 | yml = yaml.load(stream, Loader=yaml.FullLoader) 30 | 31 | # Temperature change attributes 32 | delta_temp_mean_min = yml["temp"]["mean"]["min"] 33 | delta_temp_mean_max = yml["temp"]["mean"]["max"] 34 | temp_step_num = yml["temp"]["step_num"] + 1 35 | 36 | # Precip change attributes 37 | delta_precip_mean_min = yml["precip"]["mean"]["min"] 38 | delta_precip_mean_max = yml["precip"]["mean"]["max"] 39 | delta_precip_variance_min = yml["precip"]["variance"]["min"] 40 | delta_precip_variance_max = yml["precip"]["variance"]["min"] 41 | precip_step_num = yml["precip"]["step_num"] + 1 42 | 43 | # Number of stress tests 44 | ST_NUM = temp_step_num * precip_step_num 45 | # Stress test values per variables 46 | temp_values = np.linspace( 47 | delta_temp_mean_min, delta_temp_mean_max, temp_step_num, axis=1 48 | ) 49 | precip_values = np.linspace( 50 | delta_precip_mean_min, delta_precip_mean_max, precip_step_num, axis=1 51 | ) 52 | precip_var_values = np.linspace( 53 | delta_precip_variance_min, delta_precip_variance_max, precip_step_num, axis=1 54 | ) 55 | 56 | # Generate csv file for each stress test scenario 57 | i = 0 58 | for j in range(temp_step_num): 59 | temp_j = temp_values[:, j] 60 | for k in range(precip_step_num): 61 | precip_k = precip_values[:, k] 62 | precip_var_k = precip_var_values[:, k] 63 | 64 | # Create df and save to csv 65 | data = { 66 | "temp_mean": temp_j, 67 | "precip_mean": precip_k, 68 | "precip_variance": precip_var_k, 69 | } 70 | df = pd.DataFrame(data=data, dtype=np.float32, index=np.arange(1, 13)) 71 | df.index.name = "month" 72 | if csv_fns is None: 73 | csv_fn = join( 74 | os.path.dirname(config_fn), 75 | f"cst_{i+1}.csv", 76 | ) 77 | else: 78 | csv_fn = csv_fns[i] 79 | df.to_csv(csv_fn) 80 | 81 | i += 1 82 | 83 | 84 | if __name__ == "__main__": 85 | if "snakemake" in globals(): 86 | sm = globals()["snakemake"] 87 | prep_cst_parameters( 88 | config_fn=sm.input.config, 89 | csv_fns=sm.output.st_csv_fns, 90 | ) 91 | else: 92 | prep_cst_parameters( 93 | config_fn=join(os.getcwd(), "config", "snake_config_model_test.yml"), 94 | ) 95 | -------------------------------------------------------------------------------- /src/setup_gauges_and_outputs.py: -------------------------------------------------------------------------------- 1 | """Function to update a wflow model and add gauges and outputs""" 2 | from hydromt_wflow import WflowModel 3 | import os 4 | from os.path import join 5 | from pathlib import Path 6 | from typing import Union, List 7 | 8 | 9 | # Supported wflow outputs 10 | WFLOW_VARS = { 11 | "river discharge": "lateral.river.q_av", 12 | "precipitation": "vertical.precipitation", 13 | "overland flow": "lateral.land.q_av", 14 | "actual evapotranspiration": "vertical.actevap", 15 | "groundwater recharge": "vertical.recharge", 16 | "snow": "vertical.snowwater", 17 | } 18 | 19 | 20 | def update_wflow_gauges_outputs( 21 | wflow_root: Union[str, Path], 22 | data_catalog: Union[str, Path] = "deltares_data", 23 | gauges_fn: Union[str, Path, None] = None, 24 | outputs: List[str] = ["river discharge"], 25 | ): 26 | """ 27 | Update wflow model with output and optionnally gauges locations 28 | 29 | Parameters 30 | ---------- 31 | wflow_root : Union[str, Path] 32 | Path to the wflow model root folder 33 | data_catalog : str 34 | Name of the data catalog to use 35 | gauges_fn : Union[str, Path, None], optional 36 | Path to the gauges locations file, by default None 37 | outputs : List[str], optional 38 | List of outputs to add to the model, by default ["river discharge"] 39 | Available outputs are: 40 | - "river discharge" 41 | - "precipitation" 42 | - "overland flow" 43 | - "actual evapotranspiration" 44 | - "groundwater recharge" 45 | - "snow" 46 | """ 47 | 48 | # Instantiate wflow model 49 | mod = WflowModel(wflow_root, mode="r+", data_libs=data_catalog) 50 | 51 | # Add outlets 52 | mod.setup_outlets( 53 | river_only=True, 54 | gauge_toml_header=["Q"], 55 | gauge_toml_param=["lateral.river.q_av"], 56 | ) 57 | 58 | # Add gauges 59 | if gauges_fn is not None and os.path.isfile(gauges_fn): 60 | mod.setup_gauges( 61 | gauges_fn=gauges_fn, 62 | snap_to_river=True, 63 | derive_subcatch=True, 64 | toml_output="csv", 65 | gauge_toml_header=["Q", "P"], 66 | gauge_toml_param=["lateral.river.q_av", "vertical.precipitation"], 67 | ) 68 | 69 | # Add additional outputs to the config 70 | # For now assumes basin-average timeseries apart for river.q_av which is saved by default for all outlets and gauges 71 | if "river discharge" in outputs: 72 | outputs.remove("river discharge") 73 | 74 | for var in outputs: 75 | if var in WFLOW_VARS: 76 | mod.config["csv"]["column"].append( 77 | { 78 | "header": f"{var}_basavg", 79 | "reducer": "mean", 80 | "parameter": WFLOW_VARS[var], 81 | } 82 | ) 83 | 84 | mod.write() 85 | 86 | 87 | if __name__ == "__main__": 88 | if "snakemake" in globals(): 89 | sm = globals()["snakemake"] 90 | update_wflow_gauges_outputs( 91 | wflow_root=os.path.dirname(sm.input.basin_nc), 92 | data_catalog=sm.params.data_catalog, 93 | gauges_fn=sm.params.output_locs, 94 | outputs=sm.params.outputs, 95 | ) 96 | else: 97 | update_wflow_gauges_outputs( 98 | wflow_root=join(os.getcwd(), "examples", "my_project", "hydrology_model"), 99 | data_catalog="deltares_data", 100 | gauges_fn=None, 101 | outputs=["river discharge"], 102 | ) 103 | -------------------------------------------------------------------------------- /src/setup_time_horizon.py: -------------------------------------------------------------------------------- 1 | """Prepare a hydromt config file to be able to add forcing to a wflow model""" 2 | 3 | import hydromt 4 | from hydromt_wflow import WflowModel 5 | from pathlib import Path 6 | from typing import Union, Optional 7 | 8 | 9 | def prep_hydromt_update_forcing_config( 10 | starttime: str, 11 | endtime: str, 12 | fn_yml: Union[str, Path] = "wflow_build_forcing_historical.yml", 13 | precip_source: str = "era5", 14 | wflow_root: Optional[Union[str, Path]] = None, 15 | ): 16 | """Prepare a hydromt config file to be able to add forcing to a wflow model 17 | 18 | Parameters 19 | ---------- 20 | starttime : str 21 | Start time of the forcing, format YYYY-MM-DDTHH:MM:SS 22 | endtime : str 23 | End time of the forcing, format YYYY-MM-DDTHH:MM:SS 24 | fn_yml : str, Path 25 | Path to the output hydromt config file 26 | precip_source : str 27 | Name of the precipitation source to use 28 | wflow_root : str, Path 29 | Path to the wflow model root directory, if provided reads the model 30 | and adjust the forcing computation chunksizes depending on model size. 31 | """ 32 | # Check precip source and set options accordingly 33 | if precip_source == "eobs": 34 | clim_source = "eobs" 35 | oro_source = "eobs_orography" 36 | pet_method = "makkink" 37 | else: # (chirps is precip only) 38 | clim_source = "era5" 39 | oro_source = "era5_orography" 40 | pet_method = "debruin" 41 | 42 | # Check if wflow_root is provided and adjust the forcing computation chunksizes 43 | if wflow_root is not None: 44 | mod = WflowModel(root=wflow_root, mode="r") 45 | size = mod.grid.raster.size 46 | if size > 1e6: 47 | chunksize = 1 48 | elif size > 2.5e5: 49 | chunksize = 30 50 | elif size > 1e5: 51 | chunksize = 100 52 | else: 53 | chunksize = 365 54 | else: 55 | chunksize = 30 56 | 57 | forcing_options = { 58 | "setup_config": { 59 | "starttime": starttime, 60 | "endtime": endtime, 61 | "timestepsecs": 86400, 62 | "input.path_forcing": "../climate_historical/wflow_data/inmaps_historical.nc", 63 | }, 64 | "setup_precip_forcing": { 65 | "precip_fn": precip_source, 66 | "chunksize": chunksize, 67 | }, 68 | "setup_temp_pet_forcing": { 69 | "temp_pet_fn": clim_source, 70 | "press_correction": True, 71 | "temp_correction": True, 72 | "dem_forcing_fn": oro_source, 73 | "pet_method": pet_method, 74 | "skip_pet": False, 75 | "chunksize": chunksize, 76 | }, 77 | "write_config": {}, 78 | "write_forcing": {}, 79 | } 80 | 81 | # Save it to a hydroMT ini file 82 | hydromt.config.configwrite(fn_yml, forcing_options) 83 | 84 | 85 | if __name__ == "__main__": 86 | if "snakemake" in globals(): 87 | sm = globals()["snakemake"] 88 | prep_hydromt_update_forcing_config( 89 | starttime=sm.params.starttime, 90 | endtime=sm.params.endtime, 91 | fn_yml=sm.output.forcing_yml, 92 | precip_source=sm.params.clim_source, 93 | wflow_root=sm.params.basin_dir, 94 | ) 95 | else: 96 | prep_hydromt_update_forcing_config( 97 | starttime="2010-01-01T00:00:00", 98 | endtime="2010-12-31T00:00:00", 99 | fn_yml="wflow_build_forcing_historical.yml", 100 | precip_source="era5", 101 | ) 102 | -------------------------------------------------------------------------------- /src/downscale_climate_forcing.py: -------------------------------------------------------------------------------- 1 | from hydromt_wflow import WflowModel 2 | from pathlib import Path 3 | import os 4 | import numpy as np 5 | 6 | # Snakemake parameters 7 | config_out_fn = snakemake.output.toml 8 | fn_out = Path(snakemake.output.nc) 9 | fn_in = snakemake.input.nc 10 | data_libs = snakemake.input.data_sources 11 | model_root = snakemake.params.model_dir 12 | 13 | precip_source = snakemake.params.clim_source 14 | 15 | # Time horizon climate experiment and number of hydrological model run 16 | horizontime_climate = snakemake.params.horizontime_climate 17 | wflow_run_length = snakemake.params.run_length 18 | # Get start and end year 19 | startyear = int(horizontime_climate - np.ceil(wflow_run_length / 2)) 20 | endyear = int(horizontime_climate + np.round(wflow_run_length / 2)) 21 | starttime = f"{startyear}-01-01T00:00:00" 22 | endtime = f"{endyear}-12-31T00:00:00" 23 | 24 | oro_source = f"{precip_source}_orography" 25 | if precip_source == "eobs": 26 | pet_method = "makkink" 27 | else: # (chirps is precip only so combined with era5) 28 | pet_method = "debruin" 29 | 30 | # Get name of climate scenario (rlz_*_cst_*) 31 | fn_in_path = Path(fn_in, resolve_path=True) 32 | climate_name = os.path.basename(fn_in_path).split(".")[0] 33 | 34 | # Get options for toml file name 35 | config_out_fn = Path(config_out_fn) 36 | config_out_root = os.path.dirname(config_out_fn) 37 | config_out_name = os.path.basename(config_out_fn) 38 | 39 | # Instantiate model 40 | mod = WflowModel(root=model_root, mode="r+", data_libs=data_libs) 41 | 42 | # For large / small model domains adjust chunksize to compute forcing 43 | size = mod.grid.raster.size 44 | if size > 1e6: 45 | chunksize = 1 46 | elif size > 2.5e5: 47 | chunksize = 30 48 | elif size > 1e5: 49 | chunksize = 100 50 | else: 51 | chunksize = 365 52 | 53 | # Hydromt ini dictionnaries for update options 54 | update_options = { 55 | "setup_config": { 56 | "calendar": "noleap", 57 | "starttime": starttime, 58 | "endtime": endtime, 59 | "timestepsecs": 86400, 60 | "state.path_input": os.path.join("..", "instate", "instates.nc"), 61 | "state.path_output": f"outstates_{climate_name}.nc", 62 | "input.path_static": os.path.join("..", "staticmaps.nc"), 63 | "input.path_forcing": os.path.join("..", "..", "..", "..", fn_out), 64 | "csv.path": f"output_{climate_name}.csv", 65 | }, 66 | "set_root": { 67 | "root": config_out_root, 68 | "mode": "r+", 69 | }, 70 | "setup_precip_forcing": { 71 | "precip_fn": climate_name, 72 | "precip_clim_fn": None, 73 | "chunksize": chunksize, 74 | }, 75 | "setup_temp_pet_forcing": { 76 | "temp_pet_fn": climate_name, 77 | "press_correction": True, 78 | "temp_correction": True, 79 | "dem_forcing_fn": oro_source, 80 | "pet_method": pet_method, 81 | "chunksize": chunksize, 82 | }, 83 | # "write_forcing": {}, 84 | "write_config": { 85 | "config_name": config_out_name, 86 | "config_root": config_out_root, 87 | }, 88 | } 89 | 90 | ### Run Hydromt update using update_options dict ### 91 | # Update 92 | mod.update(opt=update_options) 93 | 94 | # The slicing of DateTimeNoLeap is not so well done by hydromt 95 | # Implement here 96 | for var in mod.forcing.keys(): 97 | da = mod.forcing[var] 98 | da = da.sel(time=slice(starttime, endtime)) 99 | mod.forcing[var] = da 100 | 101 | # Write forcing 102 | mod.write_forcing() 103 | 104 | # Weagen has strange timestamps, update in the wflow config 105 | start = da.time.values[0].strftime(format="%Y-%m-%dT%H:%M:%S") 106 | end = da.time.values[-1].strftime(format="%Y-%m-%dT%H:%M:%S") 107 | 108 | mod.set_config("starttime", start) 109 | mod.set_config("endtime", end) 110 | mod.write_config(config_name=config_out_name, config_root=config_out_root) 111 | -------------------------------------------------------------------------------- /src/copy_config_files.py: -------------------------------------------------------------------------------- 1 | """Copy snake config and other config files to the output directory.""" 2 | import os 3 | from os.path import join, dirname 4 | from pathlib import Path 5 | from typing import Union, List 6 | 7 | 8 | def copy_config_files( 9 | config: Union[str, Path], 10 | output_dir: Union[str, Path], 11 | config_out_name: str = None, 12 | other_config_files: List[Union[str, Path]] = [], 13 | ): 14 | """ 15 | Copy snake config and other config files to the output directory. 16 | 17 | If config_out_name is provided, the name of the output config will be changed. 18 | 19 | Parameters 20 | ---------- 21 | config : Union[str, Path] 22 | path to the snake config file 23 | output_dir : Union[str, Path] 24 | path to the output directory 25 | config_out_name : str, optional 26 | name of the output snake config file, by default None to use the same name 27 | as the input config 28 | other_config_files : List[Union[str, Path]], optional 29 | list of paths to other config files to copy, by default [] 30 | 31 | """ 32 | # Create output directory if it does not exist 33 | if not os.path.exists(output_dir): 34 | os.makedirs(output_dir) 35 | 36 | # Get the name of the output snake config file 37 | if config_out_name is None: 38 | config_out_name = os.path.basename(config) 39 | # Copy the snake config file to the output directory 40 | print(f"Copying {config_out_name} to {output_dir}") 41 | with open(config, "r") as f: 42 | snake_config = f.read() 43 | with open(join(output_dir, config_out_name), "w") as f: 44 | f.write(snake_config) 45 | 46 | # Copy other config files to the output directory 47 | for config_file in other_config_files: 48 | # Check if the file does exist 49 | # (eg predefined catalogs of hydromt do not have a path) 50 | if os.path.isfile(config_file): 51 | with open(config_file, "r") as f: 52 | config = f.read() 53 | config_name = os.path.basename(config_file) 54 | print(f"Copying {config_name} to {output_dir}") 55 | with open(join(output_dir, config_name), "w") as f: 56 | f.write(config) 57 | 58 | 59 | if __name__ == "__main__": 60 | if "snakemake" in globals(): 61 | sm = globals()["snakemake"] 62 | # Get the in and out path of the snake (main) config file 63 | config_snake = sm.input.config_snake 64 | config_snake_out = sm.output.config_snake_out 65 | 66 | # Derive output dir from the output path of the snake config file 67 | output_dir = dirname(config_snake_out) 68 | # Get new file name for the snake config file from config_snake_out 69 | config_snake_out_name = os.path.basename(config_snake_out) 70 | 71 | # Get other config files to copy based on workflow name 72 | workflow_name = sm.params.workflow_name 73 | other_config_files = [] 74 | if workflow_name == "model_creation": 75 | # Get the in and out path of the model build config file 76 | config_build = sm.input.config_build 77 | config_wb = sm.input.config_waterbodies 78 | data_sources = sm.params.data_catalogs 79 | other_config_files.extend([config_build, config_wb, data_sources]) 80 | elif ( 81 | workflow_name == "climate_projections" 82 | or workflow_name == "climate_experiment" 83 | ): 84 | data_sources = sm.params.data_catalogs 85 | other_config_files.extend([data_sources]) 86 | 87 | # Call the main function 88 | copy_config_files( 89 | config=config_snake, 90 | output_dir=output_dir, 91 | config_out_name=config_snake_out_name, 92 | other_config_files=other_config_files, 93 | ) 94 | 95 | else: 96 | copy_config_files( 97 | config="config/snake_config_model_test.yml", 98 | output_dir="examples/test/config", 99 | config_out_name=None, 100 | other_config_files=[], 101 | ) 102 | -------------------------------------------------------------------------------- /tests/data/tests_data_catalog.yml: -------------------------------------------------------------------------------- 1 | era5: 2 | data_type: RasterDataset 3 | path: era5.nc 4 | driver: netcdf 5 | filesystem: local 6 | meta: 7 | category: meteo 8 | history: Extracted from Copernicus Climate Data Store; resampled by Deltares to 9 | daily frequency 10 | paper_doi: 10.1002/qj.3803 11 | paper_ref: Hersbach et al. (2019) 12 | source_license: https://cds.climate.copernicus.eu/cdsapp/#!/terms/licence-to-use-copernicus-products 13 | source_url: https://doi.org/10.24381/cds.bd0915c6 14 | source_version: ERA5 daily data on pressure levels 15 | crs: 4326 16 | era5_orography: 17 | data_type: RasterDataset 18 | path: era5_orography.nc 19 | driver: netcdf 20 | filesystem: local 21 | meta: 22 | category: meteo 23 | history: Extracted from Copernicus Climate Data Store 24 | paper_doi: 10.1002/qj.3803 25 | paper_ref: Hersbach et al. (2019) 26 | source_license: https://cds.climate.copernicus.eu/cdsapp/#!/terms/licence-to-use-copernicus-products 27 | source_url: https://doi.org/10.24381/cds.bd0915c6 28 | source_version: ERA5 hourly data on pressure levels 29 | crs: 4326 30 | merit_hydro_ihu: 31 | data_type: RasterDataset 32 | path: merit_hydro_ihu/{variable}.tif 33 | driver: raster 34 | filesystem: local 35 | meta: 36 | category: topography 37 | paper_doi: 10.5281/zenodo.5166932 38 | paper_ref: Eilander et al. (2020) 39 | source_license: ODC-By 1.0 40 | source_url: https://zenodo.org/record/5166932#.YVbxJ5pByUk 41 | source_version: 1.0 42 | crs: 4326 43 | merit_hydro_index: 44 | data_type: GeoDataFrame 45 | path: merit_hydro_index.gpkg 46 | driver: vector 47 | filesystem: local 48 | meta: 49 | category: topography 50 | paper_doi: 10.5194/hess-2020-582 51 | paper_ref: Eilander et al. (in review) 52 | source_license: CC-BY-NC 4.0 53 | crs: 4326 54 | modis_lai: 55 | data_type: RasterDataset 56 | path: modis_lai.nc 57 | driver: netcdf 58 | filesystem: local 59 | meta: 60 | category: landuse 61 | notes: this dataset has been extracted from GEE ('MODIS/006/MCD15A3H') for the 62 | period '2003-01-01', '2017-12-31' 63 | paper_doi: 10.5067/MODIS/MCD15A3H.006 64 | paper_ref: Myneni et al (2015) 65 | source_license: https://lpdaac.usgs.gov/data/data-citation-and-policies/ 66 | source_url: https://lpdaac.usgs.gov/products/mcd15a3hv006/ 67 | source_version: MCD15A3H V006 68 | crs: 4326 69 | rivers_lin2019_v1: 70 | data_type: GeoDataFrame 71 | path: rivers_lin2019_v1.gpkg 72 | driver: vector 73 | filesystem: local 74 | meta: 75 | category: hydrography 76 | paper_doi: 10.5281/zenodo.3552776 77 | paper_ref: Lin et al. (2019) 78 | source_license: CC-BY-NC 4.0 79 | source_url: https://zenodo.org/record/3552776#.YVbOrppByUk 80 | source_version: 1 81 | soilgrids: 82 | data_type: RasterDataset 83 | path: soilgrids/{variable}.tif 84 | driver: raster 85 | filesystem: local 86 | meta: 87 | category: soil 88 | notes: "soilthickness is based on 1) soilgrids (global) and 2) dataset for Eurasia\ 89 | \ (ESDAC, 2004; Panagos et al., 2012): ESDAC, 2004. The european soil database\ 90 | \ distribution version 2.0, european commission and the European soil bureau\ 91 | \ network. esdac.jrc.ec.europa.eu, accessed: 2017-11-17. Panagos, P., Van\ 92 | \ Liedekerke, M., Jones, A., Montanarella, L., 2012. European soil data centre:\ 93 | \ Response to european policy support and public data requirements. Land Use\ 94 | \ Policy 29 (2), 329\xE2\u20AC\u201C338. \n" 95 | paper_doi: 10.1371/journal.pone.0169748 96 | paper_ref: Hengl et al. (2017) 97 | source_license: ODbL 98 | source_url: https://www.isric.org/explore/soilgrids/faq-soilgrids-2017 99 | source_version: 2017 100 | crs: 4326 101 | vito: 102 | data_type: RasterDataset 103 | path: vito.tif 104 | driver: raster 105 | filesystem: local 106 | meta: 107 | category: landuse 108 | paper_doi: 10.5281/zenodo.3939038 109 | paper_ref: Buchhorn et al (2020) 110 | source_url: https://land.copernicus.eu/global/products/lc 111 | source_version: v2.0.2 112 | crs: 4326 113 | -------------------------------------------------------------------------------- /src/get_region_preview.py: -------------------------------------------------------------------------------- 1 | """A command line script for delineating basins and subbasin with river geometries. 2 | The script expects a region string in the following format: 3 | -r "{'basin': [x,y]}" 4 | 5 | A data catalog file path: 6 | -d path/to/datacatalog 7 | 8 | A path to an output directory for the region GeoJSON 9 | -p path/to/output/dir 10 | 11 | There is also the option to use different hydrography and river files: 12 | -h , by default merit_hydro_ihu 13 | -n , by default rivers_atlas_v10 14 | 15 | The resulting (sub)basin is written as a GeoJSON to the given output directory 16 | in a folder named region 17 | """ 18 | 19 | from json import loads 20 | import argparse 21 | import logging 22 | from typing import List 23 | import os 24 | 25 | import hydromt 26 | from hydromt.cli.api import get_region 27 | import geopandas as gpd 28 | import pandas as pd 29 | 30 | from dask import config as dask_config 31 | 32 | dask_config.set(scheduler="single-threaded") 33 | 34 | 35 | logger = logging.getLogger(__name__) 36 | 37 | 38 | def get_basin_preview( 39 | region: dict, datacatalog_fn: str | List, hydrography_fn: str = "merit_hydro_ihu" 40 | ) -> dict | None: 41 | try: 42 | logger.info("Generating region with hydromt") 43 | region_geojson = get_region( 44 | region, datacatalog_fn, hydrography_fn=hydrography_fn 45 | ) 46 | 47 | region_geojson = loads(region_geojson) 48 | region_geom = gpd.GeoDataFrame.from_features(region_geojson, crs=4326) 49 | region_geom.drop(columns="value", inplace=True) 50 | return region_geom 51 | except IndexError as e: 52 | logger.warning(f"Region out of index, see following error: {e}") 53 | return None 54 | 55 | 56 | def get_river_preview( 57 | region: gpd.GeoDataFrame, 58 | datacatalog_fn: str | List, 59 | rivers_fn: str = "river_atlas_v10", 60 | ) -> gpd.GeoDataFrame | None: 61 | datacatalog = hydromt.DataCatalog(data_libs=datacatalog_fn) 62 | surface_water_source = datacatalog.get_source(source=rivers_fn) 63 | try: 64 | surface_water_data = surface_water_source.get_data(geom=region.geometry) 65 | surface_water_data = surface_water_data.clip( 66 | region 67 | ) # clip off geometries outside region 68 | return surface_water_data 69 | except IndexError as e: 70 | logger.warning(f"River geometry out of index, see following error{e}") 71 | return None 72 | 73 | 74 | if __name__ == "__main__": 75 | parser = argparse.ArgumentParser( 76 | "Get preview of region and rivers for a given geometry" 77 | ) 78 | parser.add_argument( 79 | "-r", 80 | "--region", 81 | help="Geometry of interest for which a basin/subbasin needs to be delineated.", 82 | ) 83 | parser.add_argument("-d", "--datacatalog", help="Path to data catalog") 84 | parser.add_argument("-p", "--path", help="Path to save the region geojson file to") 85 | parser.add_argument( 86 | "-f", 87 | "--hydrography_fn", 88 | help="hydrography file name for delineating (sub)basins", 89 | required=False, 90 | default="merit_hydro_ihu", 91 | ) 92 | parser.add_argument( 93 | "-n", 94 | "--rivers_fn", 95 | help="file name of rivers dataset to use", 96 | required=False, 97 | default="rivers_lin2019_v1", 98 | ) 99 | args = parser.parse_args() 100 | if not os.path.exists(args.path): 101 | raise ValueError(f"Directory '{args.path}' does not exist") 102 | region_json = args.region.replace("'", '"') 103 | region = loads(region_json) 104 | region_geom = get_basin_preview( 105 | region=region, 106 | datacatalog_fn=args.datacatalog, 107 | hydrography_fn=args.hydrography_fn, 108 | ) 109 | river_geom = get_river_preview( 110 | region=region_geom, 111 | datacatalog_fn=args.datacatalog, 112 | rivers_fn=args.rivers_fn, 113 | ) 114 | river_geom = river_geom[["geometry"]] 115 | region = gpd.GeoDataFrame(pd.concat([region_geom, river_geom])) 116 | file_path = os.path.join(args.path, "region.geojson") 117 | region_geojson = region.to_file(filename=file_path, driver="GeoJSON") 118 | -------------------------------------------------------------------------------- /src/prepare_climate_data_catalog.py: -------------------------------------------------------------------------------- 1 | import os 2 | import hydromt 3 | from pathlib import Path 4 | from typing import Union, List 5 | 6 | 7 | def prepare_clim_data_catalog( 8 | fns: List[Union[str, Path]], 9 | data_libs_like: Union[str, Path], 10 | source_like: str, 11 | fn_out: Union[str, Path] = None, 12 | ): 13 | """ 14 | Prepares a data catalog for files path listed in fns using the same attributes as source_like 15 | in data_libs_like. 16 | If fn_out is provided writes the data catalog to that path. 17 | 18 | Parameters 19 | ---------- 20 | fns: list(path) 21 | Path to the new data sources files. 22 | data_libs_like: str or list(str) 23 | Path to the existing data catalog where source_like is stored. 24 | source_like: str 25 | Data sources with the same attributes as the new sources in fns. 26 | fn_out: str, Optional 27 | If provided, writes the new data catalog to the corresponding path. 28 | 29 | Returns 30 | ------- 31 | climate_data_catalog: hydromt.DataCatalog 32 | Data catalog of the new sources in fns. 33 | """ 34 | 35 | data_catalog = hydromt.DataCatalog(data_libs=data_libs_like) 36 | dc_like = data_catalog[source_like].to_dict() 37 | 38 | climate_data_catalog = hydromt.DataCatalog() 39 | climate_data_dict = dict() 40 | 41 | for fn in fns: 42 | fn = Path(fn, resolve_path=True) 43 | name = os.path.basename(fn).split(".")[0] 44 | dc_fn = dc_like.copy() 45 | dc_fn["path"] = fn 46 | dc_fn["driver"] = "netcdf" 47 | if "driver_kwargs" not in dc_fn: 48 | dc_fn["driver_kwargs"] = dict() 49 | dc_fn["driver_kwargs"]["preprocess"] = "transpose_dims" 50 | dc_fn["driver_kwargs"]["lock"] = False 51 | if source_like == "chirps" or source_like == "chirps_global": # precip only 52 | dc_fn["meta"][ 53 | "processing" 54 | ] = f"Climate data generated from {source_like} for precipitation and era5 using Deltares/weathergenr" 55 | else: 56 | dc_fn["meta"][ 57 | "processing" 58 | ] = f"Climate data generated from {source_like} using Deltares/weathergenr" 59 | # remove entries that have already been processed while reading in the data: 60 | for v in ["unit_mult", "unit_add", "rename"]: 61 | if v in dc_fn: 62 | dc_fn.pop(v) 63 | climate_data_dict[name] = dc_fn 64 | 65 | # Add local orography for chirps resolution 66 | if source_like == "chirps" or source_like == "chirps_global": 67 | fn_oro = Path(fns[0], resolve_path=True) 68 | fn_oro = os.path.join( 69 | os.path.dirname(fn_oro), 70 | "..", 71 | "..", 72 | "climate_historical", 73 | "raw_data", 74 | f"{source_like}_orography.nc", 75 | ) 76 | fn_oro = Path(fn_oro, resolve_path=True) 77 | dc_oro = { 78 | "crs": 4326, 79 | "data_type": "RasterDataset", 80 | "driver": "netcdf", 81 | "kwargs": { 82 | "chunks": { 83 | "latitude": 100, 84 | "longitude": 100, 85 | }, 86 | "lock": False, 87 | }, 88 | "meta": { 89 | "category": "topography", 90 | "processing": f"Resampled DEM from MERIT Hydro to the resolution of {source_like}", 91 | }, 92 | "path": fn_oro, 93 | } 94 | climate_data_dict[f"{source_like}_orography"] = dc_oro 95 | 96 | climate_data_catalog.from_dict(climate_data_dict) 97 | if fn_out is not None: 98 | climate_data_catalog.to_yml(fn_out) 99 | 100 | 101 | if __name__ == "__main__": 102 | if "snakemake" in globals(): 103 | sm = globals()["snakemake"] 104 | # Read the two list of nc files and combine 105 | nc_fns = sm.input.cst_nc 106 | nc_fns2 = sm.input.rlz_nc 107 | nc_fns.extend(nc_fns2) 108 | 109 | prepare_clim_data_catalog( 110 | fns=nc_fns, 111 | data_libs_like=sm.params.data_sources, 112 | source_like=sm.params.clim_source, 113 | fn_out=sm.output.clim_data, 114 | ) 115 | else: 116 | raise ValueError("This script should be run from a snakemake environment") 117 | -------------------------------------------------------------------------------- /config/snake_config_model_test_linux.yml: -------------------------------------------------------------------------------- 1 | 2 | ########################################################################################### 3 | #################################### General setting ###################################### 4 | 5 | # Path to where the wflow model will be stored (include the project name) 6 | project_dir: examples/Gabon 7 | static_dir: config 8 | 9 | # Path to the data catalog to use (can be different for Linux/Windows/data stored on P drive or other) 10 | data_sources: config/deltares_data_linux.yml 11 | # # Path to the data catalog to use (can be different for Linux/Windows/data stored on P drive or other) TODO: REMOVE 12 | # data_sources_climate: config/deltares_data_climate_projections_linux.yml 13 | data_sources_climate: config/cmip6_data.yml 14 | 15 | # Info on the historical climate data is used in the historic model run + reference data for the weather generator 16 | # Time horizon of the historical period 17 | starttime: "2000-01-01T00:00:00" 18 | endtime: "2020-12-31T00:00:00" 19 | # Historical climate data source (name as available in the data_sources catalog file). Either [era5, chirps_global, chirps, eobs]. 20 | clim_historical: era5 21 | 22 | ########################################################################################### 23 | #################################### Model building ####################################### 24 | 25 | # hydromt "region" for building the model, coordinates are in EPSG 4326 26 | # for more info on how to fill this, you can see the hydromt documentation: 27 | # https://deltares.github.io/hydromt/latest/user_guide/cli.html#region-options 28 | # For wflow please use either 'basin' or 'subbasin' keywords 29 | model_region: "{'subbasin': [9.666, 0.4476], 'uparea': 100}" 30 | # Grid resolution of the model in degrees 31 | model_resolution: 0.0062475 32 | 33 | # If available, please here put the path to the available observation stations locations files 34 | # csv file containing three columns: station_ID, x, y 35 | output_locations: "data/observations/output-locations-test.csv" # If set to None, will default to the wflow outlets 36 | # Path to the file with observed discharge timeseries per station 37 | observations_timeseries: "data/observations/observations_timeseries_test.csv" 38 | 39 | ########################################################################################### 40 | ################################## Climate projections #################################### 41 | # climate models and scenarios 42 | clim_project: cmip6 43 | #models: ['NOAA-GFDL/GFDL-ESM4', 'INM/INM-CM4-8' ,'INM/INM-CM5-0','NIMS-KMA/KACE-1-0-G','NCC/NorESM2-MM','NCC/NorESM2-LM','CMCC/CMCC-CM2-SR5','CMCC/CMCC-ESM2'] 44 | models: ['NOAA-GFDL/GFDL-ESM4', 'CMCC/CMCC-ESM2', 'INM/INM-CM5-0'] 45 | #scenarios: [ssp126, ssp245, ssp370, ssp585] 46 | scenarios: [ssp245, ssp585] 47 | members: [r1i1p1f1] 48 | 49 | variables: [precip, temp] 50 | 51 | # time horizons of the climate projections 52 | start_month_hyd_year: Jan 53 | historical: 1980, 2010 54 | future_horizons: 55 | near: 2030, 2060 56 | far: 2070, 2100 57 | 58 | ########################################################################################### 59 | ################################## Climate experiment ##################################### 60 | # Name of the experiment 61 | experiment_name: experiment 62 | # Number of climate realizations 63 | realizations_num: 2 64 | # Future Time horizon for the climate experiment 65 | horizontime_climate: 2050 66 | # Length of future run in years 67 | run_length: 20 68 | # Run historical realization 69 | run_historical: FALSE 70 | 71 | # Climate stress test parameters 72 | # Temperature change factors 73 | # including number of steps and min, max change 74 | temp: 75 | step_num: 1 76 | transient_change: TRUE 77 | # Monthly coefficients to set the minimum and maximum bounds of mean temp changes 78 | mean: 79 | min: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] 80 | max: [3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0] 81 | 82 | # Precipitation change factors 83 | # including number of steps and min, max change 84 | precip: 85 | step_num: 2 86 | transient_change: TRUE 87 | # Monthly coefficients to set the minimum and maximum bounds of mean precip changes 88 | mean: 89 | min: [0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7] 90 | max: [1.3, 1.3, 1.3, 1.3, 1.3, 1.3, 1.3, 1.3, 1.3, 1.3, 1.3, 1.3] 91 | # Monthly coefficients to set the minimum and maximum bounds of precip variability 92 | variance: 93 | min: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] 94 | max: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] 95 | 96 | # Drought return period 97 | Tlow: 2 98 | # Flood return period 99 | Tpeak: 10 100 | # Aggregate realizations before computing statistics 101 | aggregate_rlz: TRUE -------------------------------------------------------------------------------- /tests/snake_config_model_test.yml: -------------------------------------------------------------------------------- 1 | 2 | ########################################################################################### 3 | #################################### General settings ###################################### 4 | 5 | # Path to where the wflow model will be stored (include the project name) 6 | project_dir: tests/test_project 7 | 8 | # Path to the data catalog to use (can be different for Linux/Windows/data stored on P drive or other) 9 | data_sources: tests/data/tests_data_catalog.yml 10 | # Path to the data catalog to use (can be different for Linux/Windows/data stored on P drive or other) 11 | #data_sources_climate: config/deltares_data_climate_projections.yml 12 | data_sources_climate: config/cmip6_data.yml 13 | 14 | # Info on the historical climate data is used in the historic model run + reference data for the weather generator 15 | # Time horizon of the historical period 16 | starttime: "2015-01-01T00:00:00" 17 | endtime: "2020-12-31T00:00:00" 18 | 19 | # Historical climate data source (name as available in the data_sources catalog file). Either [era5, chirps_global, chirps, eobs]. 20 | clim_historical: era5 21 | 22 | # List of wflow output variables to save 23 | wflow_outvars: ['river discharge', 'precipitation', 'overland flow', 'actual evapotranspiration', 'groundwater recharge', 'snow'] 24 | 25 | ########################################################################################### 26 | #################################### Model building ####################################### 27 | 28 | # hydromt "region" for building the model, coordinates are in EPSG 4326 29 | # for more info on how to fill this, you can see the hydromt documentation: 30 | # https://deltares.github.io/hydromt/latest/user_guide/cli.html#region-options 31 | # For wflow please use either 'basin' or 'subbasin' keywords 32 | model_region: "{'subbasin': [9.738, 0.4212], 'uparea': 70}" 33 | # Grid resolution of the model in degrees 34 | model_resolution: 0.0062475 35 | 36 | # If available, please here put the path to the available observation stations locations files 37 | # csv file containing three columns: station_ID, x, y 38 | output_locations: "tests/data/observations/output-locations-test.csv" # If set to None, will default to the wflow outlets 39 | # Path to the file with observed discharge timeseries per station 40 | observations_timeseries: "tests/data/observations/observations_timeseries_test.csv" 41 | 42 | ########################################################################################### 43 | ################################## Climate projections #################################### 44 | # climate models and scenarios 45 | clim_project: cmip6 46 | #models: ['NOAA-GFDL/GFDL-ESM4', 'INM/INM-CM4-8' ,'INM/INM-CM5-0','NIMS-KMA/KACE-1-0-G','NCC/NorESM2-MM','NCC/NorESM2-LM','CMCC/CMCC-CM2-SR5','CMCC/CMCC-ESM2'] 47 | models: ['NOAA-GFDL/GFDL-ESM4', 'INM/INM-CM5-0'] 48 | #scenarios: [ssp126, ssp245, ssp370, ssp585] 49 | scenarios: [ssp245, ssp585] 50 | members: [r1i1p1f1] 51 | 52 | variables: [precip, temp] 53 | 54 | # time horizons of the climate projections 55 | start_month_hyd_year: Jan 56 | historical: 2000, 2010 57 | future_horizons: 58 | near: 2050, 2060 59 | far: 2090, 2100 60 | 61 | # Additional gridded outputs (else only basin average) 62 | save_grids: FALSE 63 | 64 | ########################################################################################### 65 | ################################## Stress Test Experiment ################################# 66 | 67 | # Name of the experiment 68 | experiment_name: experiment 69 | # Number of climate realizations 70 | realizations_num: 2 71 | # Future Time horizon for the climate experiment 72 | horizontime_climate: 2050 73 | # Length of future run in years 74 | run_length: 10 75 | # Run historical realization 76 | run_historical: FALSE 77 | 78 | # Climate stress test parameters 79 | # Temperature change factors 80 | # including number of steps and min, max change 81 | temp: 82 | step_num: 1 83 | transient_change: TRUE 84 | # Monthly coefficients to set the minimum and maximum bounds of mean temp changes 85 | mean: 86 | min: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] 87 | max: [3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0] 88 | 89 | # Precipitation change factors 90 | # including number of steps and min, max change 91 | precip: 92 | step_num: 1 93 | transient_change: TRUE 94 | # Monthly coefficients to set the minimum and maximum bounds of mean precip changes 95 | mean: 96 | min: [0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7] 97 | max: [1.3, 1.3, 1.3, 1.3, 1.3, 1.3, 1.3, 1.3, 1.3, 1.3, 1.3, 1.3] 98 | # Monthly coefficients to set the minimum and maximum bounds of precip variability 99 | variance: 100 | min: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] 101 | max: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] 102 | 103 | # Runoff statistics 104 | # Drought return period 105 | Tlow: 2 106 | # Flood return period 107 | Tpeak: 10 108 | # Aggregate realizations before computing statistics 109 | aggregate_rlz: TRUE -------------------------------------------------------------------------------- /config/snake_config_model_test.yml: -------------------------------------------------------------------------------- 1 | 2 | ########################################################################################### 3 | #################################### General settings ###################################### 4 | 5 | # Path to where the wflow model will be stored (include the project name) 6 | project_dir: examples/test 7 | static_dir: config 8 | 9 | # Path to the data catalog to use (can be different for Linux/Windows/data stored on P drive or other) 10 | data_sources: config/deltares_data.yml 11 | # Path to the data catalog to use (can be different for Linux/Windows/data stored on P drive or other) 12 | #data_sources_climate: config/deltares_data_climate_projections.yml 13 | data_sources_climate: config/cmip6_data.yml 14 | 15 | # Info on the historical climate data is used in the historic model run + reference data for the weather generator 16 | # Time horizon of the historical period 17 | starttime: "2000-01-01T00:00:00" 18 | endtime: "2020-12-31T00:00:00" 19 | 20 | # Historical climate data source (name as available in the data_sources catalog file). Either [era5, chirps_global, chirps, eobs]. 21 | clim_historical: era5 22 | 23 | # List of wflow output variables to save 24 | wflow_outvars: ['river discharge', 'precipitation', 'overland flow', 'actual evapotranspiration', 'groundwater recharge', 'snow'] 25 | 26 | ########################################################################################### 27 | #################################### Model building ####################################### 28 | 29 | # hydromt "region" for building the model, coordinates are in EPSG 4326 30 | # for more info on how to fill this, you can see the hydromt documentation: 31 | # https://deltares.github.io/hydromt/latest/user_guide/cli.html#region-options 32 | # For wflow please use either 'basin' or 'subbasin' keywords 33 | model_region: "{'subbasin': [9.666, 0.4476], 'uparea': 100}" 34 | # Grid resolution of the model in degrees 35 | model_resolution: 0.0062475 36 | # Path to model build configuration 37 | model_build_config: "config/wflow_build_model.yml" 38 | 39 | # If available, please here put the path to the available observation stations locations files 40 | # csv file containing three columns: station_ID, x, y 41 | output_locations: "data/observations/output-locations-test.csv" # If set to None, will default to the wflow outlets 42 | # Path to the file with observed discharge timeseries per station 43 | observations_timeseries: "data/observations/observations_timeseries_test.csv" 44 | 45 | ########################################################################################### 46 | ################################## Climate projections #################################### 47 | # climate models and scenarios 48 | clim_project: cmip6 49 | #models: ['NOAA-GFDL/GFDL-ESM4', 'INM/INM-CM4-8' ,'INM/INM-CM5-0','NIMS-KMA/KACE-1-0-G','NCC/NorESM2-MM','NCC/NorESM2-LM','CMCC/CMCC-CM2-SR5','CMCC/CMCC-ESM2'] 50 | models: ['NOAA-GFDL/GFDL-ESM4', 'CMCC/CMCC-ESM2', 'INM/INM-CM5-0'] 51 | #scenarios: [ssp126, ssp245, ssp370, ssp585] 52 | scenarios: [ssp245, ssp585] 53 | members: [r1i1p1f1] 54 | 55 | variables: [precip, temp] 56 | 57 | # time horizons of the climate projections 58 | start_month_hyd_year: Jan 59 | historical: 1980, 2010 60 | future_horizons: 61 | near: 2030, 2060 62 | far: 2070, 2100 63 | 64 | # Additional gridded outputs (else only basin average) 65 | save_grids: FALSE 66 | 67 | ########################################################################################### 68 | ################################## Stress Test Experiment ################################# 69 | # Name of the experiment 70 | experiment_name: experiment 71 | # Number of climate realizations 72 | realizations_num: 2 73 | # Future Time horizon for the climate experiment 74 | horizontime_climate: 2050 75 | # Length of future run in years 76 | run_length: 20 77 | # Run historical realization 78 | run_historical: FALSE 79 | 80 | # Climate stress test parameters 81 | # Temperature change factors 82 | # including number of steps and min, max change 83 | temp: 84 | step_num: 1 85 | transient_change: TRUE 86 | # Monthly coefficients to set the minimum and maximum bounds of mean temp changes 87 | mean: 88 | min: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] 89 | max: [3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0] 90 | 91 | # Precipitation change factors 92 | # including number of steps and min, max change 93 | precip: 94 | step_num: 2 95 | transient_change: TRUE 96 | # Monthly coefficients to set the minimum and maximum bounds of mean precip changes 97 | mean: 98 | min: [0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7] 99 | max: [1.3, 1.3, 1.3, 1.3, 1.3, 1.3, 1.3, 1.3, 1.3, 1.3, 1.3, 1.3] 100 | # Monthly coefficients to set the minimum and maximum bounds of precip variability 101 | variance: 102 | min: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] 103 | max: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] 104 | 105 | # Runoff statistics 106 | # Drought return period 107 | Tlow: 2 108 | # Flood return period 109 | Tpeak: 10 110 | # Aggregate realizations before computing statistics 111 | aggregate_rlz: TRUE -------------------------------------------------------------------------------- /src/get_change_climate_proj_summary.py: -------------------------------------------------------------------------------- 1 | """ 2 | Open monthly change files for all models/scenarios/horizon and compute/plot statistics 3 | """ 4 | 5 | import hydromt 6 | import os 7 | from pathlib import Path 8 | import seaborn as sns 9 | import xarray as xr 10 | import numpy as np 11 | 12 | from typing import Union, List, Dict 13 | 14 | 15 | def preprocess_coords(ds: xr.Dataset) -> xr.Dataset: 16 | """Preprocess function to remove unwanted coords""" 17 | coords_to_remove = ["height"] 18 | for coord in coords_to_remove: 19 | if coord in ds.coords: 20 | ds = ds.drop_vars(coord) 21 | return ds 22 | 23 | 24 | def summary_climate_proj( 25 | clim_dir: Union[Path, str], 26 | clim_files: List[Union[Path, str]], 27 | horizons: Dict, 28 | ): 29 | """ 30 | Compute climate change statitistics for all models/scenario/horizons. 31 | 32 | Also prepare response surface plot. 33 | 34 | Output in ``clim_dir``: 35 | - annual_change_scalar_stats_summary.nc/.csv: all change statistics (netcdf or csv) 36 | - annual_change_scalar_stats_summary_mean.csv: only mean change 37 | - plots/projected_climate_statistics.png: surface response plot 38 | 39 | Parameters 40 | ---------- 41 | clim_dir: Path 42 | Path to the projected climate directory of the project 43 | clim_files: List[Path, str] 44 | Path to the netcdf files of results per climate model / scenario / horizons 45 | horizons: Dict 46 | Time horizon names and start and end year separated with a comma. 47 | E.g {"far": "2070, 2100", "near": "2030, 2060"} 48 | """ 49 | # merge summary maps across models, scnearios and horizons. 50 | prefix = "annual_change_scalar_stats" 51 | # for prefix in prefixes: 52 | print(f"merging netcdf files {prefix}") 53 | # open annual scalar summary and merge 54 | list_files_not_empty = [] 55 | for file in clim_files: 56 | ds_f = xr.open_dataset(file, lock=False) 57 | # don't read in the dummy datasets 58 | if len(ds_f) > 0: 59 | list_files_not_empty.append(file) 60 | ds = xr.open_mfdataset( 61 | list_files_not_empty, coords="minimal", preprocess=preprocess_coords, lock=False 62 | ) 63 | dvars = ds.raster.vars 64 | name_nc_out = f"{prefix}_summary.nc" 65 | ds.to_netcdf( 66 | os.path.join(clim_dir, name_nc_out), 67 | encoding={k: {"zlib": True} for k in dvars}, 68 | ) 69 | 70 | # write as a csv 71 | ds.to_dataframe().to_csv( 72 | os.path.join(clim_dir, "annual_change_scalar_stats_summary.csv") 73 | ) 74 | 75 | # just keep mean for temp and precip for response surface plots 76 | df = ds.sel(stats="mean").to_dataframe() 77 | df.to_csv(os.path.join(clim_dir, "annual_change_scalar_stats_summary_mean.csv")) 78 | 79 | # plot change 80 | if not os.path.exists(os.path.join(clim_dir, "plots")): 81 | os.mkdir(os.path.join(clim_dir, "plots")) 82 | 83 | # Rename horizon names to the middle year of the period 84 | hz_list = df.index.levels[df.index.names.index("horizon")].tolist() 85 | for hz in horizons: 86 | # Get start and end year 87 | period = horizons[hz].split(",") 88 | period = [int(i) for i in period] 89 | horizon_year = int((period[0] + period[1]) / 2) 90 | # Replace hz values by horizon_year in hz_list 91 | hz_list = [horizon_year if h == hz else h for h in hz_list] 92 | 93 | # Set new values in multiindex dataframe 94 | df.index = df.index.set_levels(hz_list, level="horizon") 95 | 96 | scenarios = np.unique(df.index.get_level_values("scenario")) 97 | clrs = [] 98 | for s in scenarios: 99 | if s == "ssp126": 100 | clrs.append("#003466") 101 | if s == "ssp245": 102 | clrs.append("#f69320") 103 | if s == "ssp370": 104 | clrs.append("#df0000") 105 | elif s == "ssp585": 106 | clrs.append("#980002") 107 | g = sns.JointGrid( 108 | data=df, 109 | x="precip", 110 | y="temp", 111 | hue="scenario", 112 | ) 113 | g.plot_joint( 114 | sns.scatterplot, s=100, alpha=0.5, data=df, style="horizon", palette=clrs 115 | ) 116 | g.plot_marginals(sns.kdeplot, palette=clrs) 117 | g.set_axis_labels( 118 | xlabel="Change in mean precipitation (%)", 119 | ylabel="Change in mean temperature (degC)", 120 | ) 121 | g.ax_joint.grid() 122 | g.ax_joint.legend(loc="right", bbox_to_anchor=(1.5, 0.5)) 123 | g.savefig(os.path.join(clim_dir, "plots", "projected_climate_statistics.png")) 124 | 125 | 126 | if __name__ == "__main__": 127 | if "snakemake" in globals(): 128 | sm = globals()["snakemake"] 129 | # Snakemake options 130 | clim_project_dir = sm.params.clim_project_dir 131 | list_files = sm.input.stats_nc_change 132 | horizons = sm.params.horizons 133 | 134 | # Call the main function 135 | summary_climate_proj( 136 | clim_dir=clim_project_dir, 137 | clim_files=list_files, 138 | horizons=horizons, 139 | ) 140 | else: 141 | raise ValueError("This script should be run from a snakemake environment") 142 | -------------------------------------------------------------------------------- /src/plot_map.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Thu Jan 13 16:23:11 2022 4 | 5 | @author: bouaziz 6 | """ 7 | 8 | # %% plot map 9 | 10 | import xarray as xr 11 | import numpy as np 12 | from os.path import basename 13 | import os 14 | import matplotlib.pyplot as plt 15 | from matplotlib import colors 16 | import matplotlib.patheffects as pe 17 | 18 | # plot maps dependencies 19 | import matplotlib.patches as mpatches 20 | import cartopy.crs as ccrs 21 | 22 | # import descartes # required to plot polygons 23 | import cartopy.io.img_tiles as cimgt 24 | 25 | from hydromt_wflow import WflowModel 26 | 27 | project_dir = snakemake.params.project_dir 28 | gauges_fn = snakemake.params.output_locations 29 | if gauges_fn is not None: 30 | gauges_name = f'gauges_{basename(gauges_fn).split(".")[0]}' 31 | else: 32 | gauges_name = None 33 | 34 | Folder_plots = f"{project_dir}/plots/wflow_model_performance" 35 | root = f"{project_dir}/hydrology_model" 36 | 37 | 38 | mod = WflowModel(root, mode="r") 39 | 40 | # read and mask the model elevation 41 | da = mod.grid["wflow_dem"].raster.mask_nodata() 42 | da.attrs.update(long_name="elevation", units="m") 43 | # read/derive river geometries 44 | gdf_riv = mod.rivers 45 | # read/derive model basin boundary 46 | gdf_bas = mod.basins 47 | plt.style.use("seaborn-v0_8-whitegrid") # set nice style 48 | # we assume the model maps are in the geographic CRS EPSG:4326 49 | proj = ccrs.PlateCarree() 50 | # adjust zoomlevel and figure size to your basis size & aspect 51 | zoom_level = 10 52 | figsize = (10, 8) 53 | shaded = False # shaded elevation (looks nicer with more pixels (e.g.: larger basins))! 54 | 55 | 56 | # initialize image with geoaxes 57 | fig = plt.figure(figsize=figsize) 58 | ax = fig.add_subplot(projection=proj) 59 | extent = np.array(da.raster.box.buffer(0.02).total_bounds)[[0, 2, 1, 3]] 60 | ax.set_extent(extent, crs=proj) 61 | 62 | # add sat background image 63 | ax.add_image(cimgt.QuadtreeTiles(), zoom_level, alpha=0.5) 64 | 65 | ## plot elevation\ 66 | # create nice colormap 67 | vmin, vmax = da.quantile([0.0, 0.98]).compute() 68 | c_dem = plt.cm.terrain(np.linspace(0.25, 1, 256)) 69 | cmap = colors.LinearSegmentedColormap.from_list("dem", c_dem) 70 | norm = colors.Normalize(vmin=vmin, vmax=vmax) 71 | kwargs = dict(cmap=cmap, norm=norm) 72 | # plot 'normal' elevation 73 | da.plot( 74 | transform=proj, ax=ax, zorder=1, cbar_kwargs=dict(aspect=30, shrink=0.8), **kwargs 75 | ) 76 | # plot elevation with shades 77 | if shaded: 78 | ls = colors.LightSource(azdeg=315, altdeg=45) 79 | dx, dy = da.raster.res 80 | _rgb = ls.shade( 81 | da.fillna(0).values, 82 | norm=kwargs["norm"], 83 | cmap=kwargs["cmap"], 84 | blend_mode="soft", 85 | dx=dx, 86 | dy=dy, 87 | vert_exag=200, 88 | ) 89 | rgb = xr.DataArray(dims=("y", "x", "rgb"), data=_rgb, coords=da.raster.coords) 90 | rgb = xr.where(np.isnan(da), np.nan, rgb) 91 | rgb.plot.imshow(transform=proj, ax=ax, zorder=2) 92 | 93 | # plot rivers with increasing width with stream order 94 | gdf_riv.plot( 95 | ax=ax, linewidth=gdf_riv["strord"] / 2, color="blue", zorder=3, label="river" 96 | ) 97 | # plot the basin boundary 98 | gdf_bas.boundary.plot(ax=ax, color="k", linewidth=0.3) 99 | # plot various vector layers if present 100 | if "gauges" in mod.geoms: 101 | mod.geoms["gauges"].plot( 102 | ax=ax, marker="d", markersize=25, facecolor="k", zorder=5, label="gauges" 103 | ) 104 | if gauges_name is not None and gauges_name in mod.geoms: 105 | mod.geoms[gauges_name].plot( 106 | ax=ax, 107 | marker="d", 108 | markersize=25, 109 | facecolor="blue", 110 | zorder=5, 111 | label="output locs", 112 | ) 113 | if "station_name" in mod.geoms[gauges_name].columns: 114 | mod.geoms[gauges_name].apply( 115 | lambda x: ax.annotate( 116 | text=x["station_name"], 117 | xy=x.geometry.coords[0], 118 | xytext=(2.0, 2.0), 119 | textcoords="offset points", 120 | # ha='left', 121 | # va = 'top', 122 | fontsize=5, 123 | fontweight="bold", 124 | color="black", 125 | path_effects=[pe.withStroke(linewidth=2, foreground="white")], 126 | ), 127 | axis=1, 128 | ) 129 | 130 | patches = ( 131 | [] 132 | ) # manual patches for legend, see https://github.com/geopandas/geopandas/issues/660 133 | if "lakes" in mod.geoms: 134 | kwargs = dict(facecolor="lightblue", edgecolor="black", linewidth=1, label="lakes") 135 | mod.geoms["lakes"].plot(ax=ax, zorder=4, **kwargs) 136 | patches.append(mpatches.Patch(**kwargs)) 137 | if "reservoirs" in mod.geoms: 138 | kwargs = dict(facecolor="blue", edgecolor="black", linewidth=1, label="reservoirs") 139 | mod.geoms["reservoirs"].plot(ax=ax, zorder=4, **kwargs) 140 | patches.append(mpatches.Patch(**kwargs)) 141 | if "glaciers" in mod.geoms: 142 | kwargs = dict(facecolor="grey", edgecolor="grey", linewidth=1, label="glaciers") 143 | mod.geoms["glaciers"].plot(ax=ax, zorder=4, **kwargs) 144 | patches.append(mpatches.Patch(**kwargs)) 145 | 146 | ax.xaxis.set_visible(True) 147 | ax.yaxis.set_visible(True) 148 | ax.set_ylabel(f"latitude [degree north]") 149 | ax.set_xlabel(f"longitude [degree east]") 150 | _ = ax.set_title(f"") 151 | legend = ax.legend( 152 | handles=[*ax.get_legend_handles_labels()[0], *patches], 153 | title="Legend", 154 | loc="lower right", 155 | frameon=True, 156 | framealpha=0.7, 157 | edgecolor="k", 158 | facecolor="white", 159 | ) 160 | 161 | # save figure 162 | # NOTE create figs folder in model root if it does not exist 163 | # fn_out = join(mod.root, "figs", "basemap.png") 164 | plt.savefig(os.path.join(Folder_plots, "basin_area.png"), dpi=300, bbox_inches="tight") 165 | -------------------------------------------------------------------------------- /src/extract_historical_climate.py: -------------------------------------------------------------------------------- 1 | """Extract historical climate data for a given region and time period.""" 2 | 3 | import os 4 | from os.path import join 5 | from pathlib import Path 6 | import geopandas as gpd 7 | import hydromt 8 | 9 | from typing import Union 10 | 11 | from dask.diagnostics import ProgressBar 12 | from hydromt.workflows.forcing import temp 13 | 14 | 15 | def prep_historical_climate( 16 | region_fn: Union[str, Path], 17 | fn_out: Union[str, Path], 18 | data_libs: Union[str, Path] = "deltares_data", 19 | clim_source: str = "era5", 20 | starttime: str = "1980-01-01T00:00:00", 21 | endtime: str = "2010-12-31T00:00:00", 22 | ): 23 | """ 24 | Extract historical climate data for a given region and time period. 25 | 26 | If clim_source is chirps or chirps_global, then only precip is extracted and will be 27 | combined with other climate data from era5. 28 | 29 | Parameters 30 | ---------- 31 | region_fn : str, Path 32 | Path to the region geojson file 33 | fn_out : str, Path 34 | Path to the output netcdf file 35 | data_libs : str, Path 36 | Path to the data catalogs yaml file or pre-defined catalogs 37 | clim_source : str 38 | Name of the climate source to use 39 | starttime : str 40 | Start time of the forcing, format YYYY-MM-DDTHH:MM:SS 41 | endtime : str 42 | End time of the forcing, format YYYY-MM-DDTHH:MM:SS 43 | """ 44 | # Read region 45 | region = gpd.read_file(region_fn) 46 | # Read data catalog 47 | data_catalog = hydromt.DataCatalog(data_libs=data_libs) 48 | 49 | # Extract climate data 50 | print("Extracting historical climate grid") 51 | if clim_source == "chirps" or clim_source == "chirps_global": # precip only 52 | print( 53 | f"{clim_source} only contains precipitation data. Combining with climate data from era5" 54 | ) 55 | # Get precip first 56 | ds = data_catalog.get_rasterdataset( 57 | clim_source, 58 | bbox=region.geometry.total_bounds, 59 | time_tuple=(starttime, endtime), 60 | buffer=1, 61 | variables=["precip"], 62 | ).to_dataset() 63 | # Get clim 64 | ds_clim = data_catalog.get_rasterdataset( 65 | "era5", 66 | bbox=region.geometry.total_bounds, 67 | time_tuple=(starttime, endtime), 68 | buffer=1, 69 | variables=["temp", "temp_min", "temp_max", "kin", "kout", "press_msl"], 70 | ) 71 | # Prepare orography data corresponding to chirps from merit hydro DEM 72 | # (needed for downscaling of climate variables) 73 | print( 74 | f"Preparing orography data for {clim_source} to downscale climate variables." 75 | ) 76 | dem = data_catalog.get_rasterdataset( 77 | "merit_hydro", 78 | bbox=region.geometry.total_bounds, 79 | time_tuple=(starttime, endtime), 80 | buffer=1, 81 | variables=["elevtn"], 82 | ) 83 | dem = dem.raster.reproject_like(ds, method="average") 84 | # Resample other variables and add to ds_precip 85 | print(f"Downscaling era5 variables to the resolution of {clim_source}") 86 | for var in ["press_msl", "kin", "kout"]: 87 | ds[var] = ds_clim[var].raster.reproject_like(ds, method="nearest_index") 88 | 89 | # Read era5 dem for temp downscaling 90 | dem_era5 = data_catalog.get_rasterdataset( 91 | "era5_orography", 92 | geom=ds.raster.box, # clip dem with forcing bbox for full coverage 93 | buffer=2, 94 | variables=["elevtn"], 95 | ).squeeze() 96 | for var in ["temp", "temp_min", "temp_max"]: 97 | ds[var] = temp( 98 | ds_clim[var], 99 | dem, 100 | dem_forcing=dem_era5, 101 | lapse_correction=True, 102 | freq=None, 103 | reproj_method="nearest_index", 104 | lapse_rate=-0.0065, 105 | ) 106 | # Save dem grid to netcdf 107 | fn_dem = os.path.join(os.path.dirname(fn_out), f"{clim_source}_orography.nc") 108 | dem.to_netcdf(fn_dem, mode="w") 109 | 110 | else: 111 | # Here we can afford larger chunks as we only extract and save 112 | data_catalog_temp = data_catalog.to_dict() 113 | if "driver_kwargs" not in data_catalog_temp[clim_source]: 114 | data_catalog_temp[clim_source]["driver_kwargs"] = {"chunks": "auto"} 115 | else: 116 | data_catalog_temp[clim_source]["driver_kwargs"]["chunks"] = "auto" 117 | data_catalog = hydromt.DataCatalog().from_dict(data_catalog_temp) 118 | 119 | ds = data_catalog.get_rasterdataset( 120 | clim_source, 121 | bbox=region.geometry.total_bounds, 122 | time_tuple=(starttime, endtime), 123 | buffer=1, 124 | variables=[ 125 | "precip", 126 | "temp", 127 | "temp_min", 128 | "temp_max", 129 | "kin", 130 | "kout", 131 | "press_msl", 132 | ], 133 | ) 134 | 135 | dvars = ds.raster.vars 136 | encoding = {k: {"zlib": True} for k in dvars} 137 | 138 | print("Saving to netcdf") 139 | delayed_obj = ds.to_netcdf(fn_out, encoding=encoding, mode="w", compute=False) 140 | with ProgressBar(): 141 | delayed_obj.compute() 142 | 143 | 144 | if __name__ == "__main__": 145 | if "snakemake" in globals(): 146 | sm = globals()["snakemake"] 147 | prep_historical_climate( 148 | region_fn=sm.input.prj_region, 149 | fn_out=sm.output.climate_nc, 150 | data_libs=sm.params.data_sources, 151 | clim_source=sm.params.clim_source, 152 | starttime="1980-01-01T00:00:00", 153 | endtime="2010-12-31T00:00:00", 154 | ) 155 | else: 156 | prep_historical_climate( 157 | region_fn=join( 158 | os.getcwd(), 159 | "examples", 160 | "my_project", 161 | "hydrology_model", 162 | "staticgeoms", 163 | "region.geojson", 164 | ), 165 | fn_out=join( 166 | os.getcwd(), 167 | "examples", 168 | "my_project", 169 | "climate_historical", 170 | "raw_data", 171 | "extract_historical.nc", 172 | ), 173 | data_libs="deltares_data", 174 | clim_source="era5", 175 | starttime="1980-01-01T00:00:00", 176 | endtime="2010-12-31T00:00:00", 177 | ) 178 | -------------------------------------------------------------------------------- /Snakefile_model_creation: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | # Get the snake_config file from the command line 4 | args = sys.argv 5 | config_path = args[args.index("--configfile") + 1] 6 | 7 | # Parsing the Snakemake config file (options for basins to build, data catalog, model output directory) 8 | # Function to get argument from config file and return default value if not found 9 | def get_config(config, arg, default=None, optional=True): 10 | """ 11 | Function to get argument from config file and return default value if not found 12 | 13 | Parameters 14 | ---------- 15 | config : dict 16 | config file 17 | arg : str 18 | argument to get from config file 19 | default : str/int/float/list, optional 20 | default value if argument not found, by default None 21 | optional : bool, optional 22 | if True, argument is optional, by default True 23 | """ 24 | if arg in config: 25 | return config[arg] 26 | elif optional: 27 | return default 28 | else: 29 | raise ValueError(f"Argument {arg} not found in config file") 30 | 31 | project_dir = get_config(config, 'project_dir', optional=False) 32 | static_dir = get_config(config, 'static_dir', optional=False) 33 | model_region = get_config(config, 'model_region', optional=False) 34 | model_resolution = get_config(config, 'model_resolution', 0.00833333) 35 | model_build_config = get_config(config, 'model_build_config', f'{static_dir}/wflow_build_model.yml') 36 | waterbodies_config = get_config(config, 'waterbodies_config', f'{static_dir}/wflow_update_waterbodies.yml') 37 | DATA_SOURCES = get_config(config, "data_sources", optional=False) 38 | 39 | output_locations = get_config(config, "output_locations", None) 40 | observations_timeseries = get_config(config, "observations_timeseries", None) 41 | 42 | wflow_outvars = get_config(config, "wflow_outvars", ['river discharge']) 43 | 44 | basin_dir = f"{project_dir}/hydrology_model" 45 | 46 | # Master rule: end with all model run and analysed with saving a output plot 47 | rule all: 48 | input: 49 | f"{project_dir}/plots/wflow_model_performance/hydro_wflow_1.png", 50 | f"{project_dir}/plots/wflow_model_performance/basin_area.png", 51 | f"{project_dir}/plots/wflow_model_performance/precip.png", 52 | f"{project_dir}/config/snake_config_model_creation.yml", 53 | 54 | # Rule to copy config files to the project_dir/config folder 55 | rule copy_config: 56 | input: 57 | config_build = model_build_config, 58 | config_snake = config_path, 59 | config_waterbodies = waterbodies_config, 60 | params: 61 | data_catalogs = DATA_SOURCES, 62 | workflow_name = "model_creation", 63 | output: 64 | config_snake_out = f"{project_dir}/config/snake_config_model_creation.yml", 65 | script: 66 | "src/copy_config_files.py" 67 | 68 | # Rule to build model hydromt build wflow 69 | rule create_model: 70 | input: 71 | hydromt_ini = model_build_config, 72 | output: 73 | basin_nc = f"{basin_dir}/staticmaps.nc", 74 | shell: 75 | """hydromt build wflow "{basin_dir}" --region "{model_region}" --opt setup_basemaps.res="{model_resolution}" -i "{input.hydromt_ini}" -d "{DATA_SOURCES}" --fo -vv""" 76 | 77 | # Rule to add reservoirs, lakes and glaciers to the built model (temporary hydromt fix) 78 | # Can be moved back to create_model rule when hydromt is updated 79 | rule add_reservoirs_lakes_glaciers: 80 | input: 81 | basin_nc = ancient(f"{basin_dir}/staticmaps.nc") 82 | output: 83 | text_out = f"{basin_dir}/staticgeoms/reservoirs_lakes_glaciers.txt" 84 | params: 85 | data_catalog = DATA_SOURCES, 86 | config = waterbodies_config, 87 | script: 88 | "src/setup_reservoirs_lakes_glaciers.py" 89 | 90 | # Rule to add gauges to the built model 91 | rule add_gauges_and_outputs: 92 | input: 93 | basin_nc = ancient(f"{basin_dir}/staticmaps.nc"), 94 | text = f"{basin_dir}/staticgeoms/reservoirs_lakes_glaciers.txt" 95 | output: 96 | gauges_fid = f"{basin_dir}/staticgeoms/gauges.geojson" 97 | params: 98 | output_locs = output_locations, 99 | outputs = wflow_outvars, 100 | data_catalog = DATA_SOURCES 101 | script: 102 | "src/setup_gauges_and_outputs.py" 103 | 104 | # Rule to prepare the time horizon 105 | rule setup_runtime: 106 | input: 107 | gauges_fid = f"{basin_dir}/staticgeoms/gauges.geojson" 108 | output: 109 | forcing_yml = f"{project_dir}/config/wflow_build_forcing_historical.yml" 110 | params: 111 | starttime = get_config(config, "starttime", optional=False), 112 | endtime = get_config(config, "endtime", optional=False), 113 | clim_source = get_config(config, "clim_historical", optional=False), 114 | basin_dir = basin_dir, 115 | script: "src/setup_time_horizon.py" 116 | 117 | # Rule to add forcing to the updated model 118 | rule add_forcing: 119 | input: 120 | forcing_ini = f"{project_dir}/config/wflow_build_forcing_historical.yml" 121 | output: 122 | forcing_fid = f"{project_dir}/climate_historical/wflow_data/inmaps_historical.nc" 123 | shell: 124 | """hydromt update wflow "{basin_dir}" -i "{input.forcing_ini}" -d "{DATA_SOURCES}" -vv""" 125 | 126 | # Rule to run the wflow model 127 | rule run_wflow: 128 | input: 129 | forcing_fid = f"{project_dir}/climate_historical/wflow_data/inmaps_historical.nc" 130 | output: 131 | csv_file = f"{basin_dir}/run_default/output.csv" 132 | params: 133 | toml_fid = f"{basin_dir}/wflow_sbm.toml" 134 | shell: 135 | """ julia --threads 4 -e "using Wflow; Wflow.run()" "{params.toml_fid}" """ 136 | 137 | # Rule to analyse and plot wflow model run results --> final output 138 | rule plot_results: 139 | input: 140 | csv_file = f"{basin_dir}/run_default/output.csv", 141 | script = "src/plot_results.py" 142 | output: 143 | output_png = f"{project_dir}/plots/wflow_model_performance/hydro_wflow_1.png", 144 | params: 145 | project_dir = f"{project_dir}", 146 | observations_file = observations_timeseries, 147 | gauges_output_fid = output_locations, 148 | script: "src/plot_results.py" 149 | 150 | # Rule to plot the wflow basin, rivers, gauges and DEM on a map 151 | rule plot_map: 152 | input: 153 | gauges_fid = f"{basin_dir}/staticgeoms/gauges.geojson" 154 | output: 155 | output_map_png = f"{project_dir}/plots/wflow_model_performance/basin_area.png", 156 | params: 157 | project_dir = f"{project_dir}", 158 | output_locations = output_locations, 159 | script: "src/plot_map.py" 160 | 161 | # Rule to plot the forcing on a map 162 | rule plot_forcing: 163 | input: 164 | forcing_fid = f"{project_dir}/climate_historical/wflow_data/inmaps_historical.nc", 165 | output: 166 | output_forcing_map = f"{project_dir}/plots/wflow_model_performance/precip.png", 167 | params: 168 | project_dir = f"{project_dir}", 169 | gauges_fid = f"{basin_dir}/staticgeoms/gauges.geojson" 170 | script: "src/plot_map_forcing.py" 171 | -------------------------------------------------------------------------------- /src/plot_map_forcing.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Thu Jan 13 16:23:11 2022 4 | 5 | @author: bouaziz 6 | """ 7 | 8 | # plot map 9 | 10 | import xarray as xr 11 | import numpy as np 12 | from os.path import basename, join 13 | import os 14 | from pathlib import Path 15 | from typing import Union 16 | import matplotlib.pyplot as plt 17 | from matplotlib import colors 18 | 19 | # plot maps dependencies 20 | import matplotlib.patches as mpatches 21 | import cartopy.crs as ccrs 22 | 23 | # import descartes # required to plot polygons 24 | import cartopy.io.img_tiles as cimgt 25 | 26 | from hydromt_wflow import WflowModel 27 | 28 | 29 | def plot_map_model(mod, da, figname, gauges_name): 30 | # read/derive river geometries 31 | gdf_riv = mod.rivers 32 | # read/derive model basin boundary 33 | gdf_bas = mod.basins 34 | plt.style.use("seaborn-v0_8-whitegrid") # set nice style 35 | # we assume the model maps are in the geographic CRS EPSG:4326 36 | proj = ccrs.PlateCarree() 37 | # adjust zoomlevel and figure size to your basis size & aspect 38 | zoom_level = 10 39 | figsize = (10, 8) 40 | shaded = ( 41 | False # shaded elevation (looks nicer with more pixels (e.g.: larger basins))! 42 | ) 43 | 44 | # initialize image with geoaxes 45 | fig = plt.figure(figsize=figsize) 46 | ax = fig.add_subplot(projection=proj) 47 | extent = np.array(da.raster.box.buffer(0.02).total_bounds)[[0, 2, 1, 3]] 48 | ax.set_extent(extent, crs=proj) 49 | 50 | # add sat background image 51 | ax.add_image(cimgt.QuadtreeTiles(), zoom_level, alpha=0.5) 52 | 53 | # plot da variables. 54 | da.plot( 55 | transform=proj, 56 | ax=ax, 57 | zorder=1, 58 | cbar_kwargs=dict(aspect=30, shrink=0.8), 59 | ) # **kwargs) 60 | # plot elevation with shades 61 | if shaded: 62 | ls = colors.LightSource(azdeg=315, altdeg=45) 63 | dx, dy = da.raster.res 64 | _rgb = ls.shade( 65 | da.fillna(0).values, 66 | norm=kwargs["norm"], 67 | cmap=kwargs["cmap"], 68 | blend_mode="soft", 69 | dx=dx, 70 | dy=dy, 71 | vert_exag=200, 72 | ) 73 | rgb = xr.DataArray(dims=("y", "x", "rgb"), data=_rgb, coords=da.raster.coords) 74 | rgb = xr.where(np.isnan(da), np.nan, rgb) 75 | rgb.plot.imshow(transform=proj, ax=ax, zorder=2) 76 | 77 | # plot rivers with increasing width with stream order 78 | gdf_riv.plot( 79 | ax=ax, linewidth=gdf_riv["strord"] / 2, color="blue", zorder=3, label="river" 80 | ) 81 | # plot the basin boundary 82 | gdf_bas.boundary.plot(ax=ax, color="k", linewidth=0.3) 83 | # plot various vector layers if present 84 | if "gauges" in mod.geoms: 85 | mod.geoms["gauges"].plot( 86 | ax=ax, marker="d", markersize=25, facecolor="k", zorder=5, label="gauges" 87 | ) 88 | if gauges_name in mod.geoms: 89 | mod.geoms[gauges_name].plot( 90 | ax=ax, 91 | marker="d", 92 | markersize=25, 93 | facecolor="blue", 94 | zorder=5, 95 | label="output locs", 96 | ) 97 | patches = ( 98 | [] 99 | ) # manual patches for legend, see https://github.com/geopandas/geopandas/issues/660 100 | if "lakes" in mod.geoms: 101 | kwargs = dict( 102 | facecolor="lightblue", edgecolor="black", linewidth=1, label="lakes" 103 | ) 104 | mod.geoms["lakes"].plot(ax=ax, zorder=4, **kwargs) 105 | patches.append(mpatches.Patch(**kwargs)) 106 | if "reservoirs" in mod.geoms: 107 | kwargs = dict( 108 | facecolor="blue", edgecolor="black", linewidth=1, label="reservoirs" 109 | ) 110 | mod.geoms["reservoirs"].plot(ax=ax, zorder=4, **kwargs) 111 | patches.append(mpatches.Patch(**kwargs)) 112 | if "glaciers" in mod.geoms: 113 | kwargs = dict(facecolor="grey", edgecolor="grey", linewidth=1, label="glaciers") 114 | mod.geoms["glaciers"].plot(ax=ax, zorder=4, **kwargs) 115 | patches.append(mpatches.Patch(**kwargs)) 116 | 117 | ax.xaxis.set_visible(True) 118 | ax.yaxis.set_visible(True) 119 | ax.set_ylabel(f"latitude [degree north]") 120 | ax.set_xlabel(f"longitude [degree east]") 121 | _ = ax.set_title(f"wflow base map") 122 | legend = ax.legend( 123 | handles=[*ax.get_legend_handles_labels()[0], *patches], 124 | title="Legend", 125 | loc="lower right", 126 | frameon=True, 127 | framealpha=0.7, 128 | edgecolor="k", 129 | facecolor="white", 130 | ) 131 | 132 | # save figure 133 | plt.savefig( 134 | os.path.join(Folder_plots, f"{figname}.png"), dpi=300, bbox_inches="tight" 135 | ) 136 | 137 | 138 | def plot_forcing( 139 | wflow_root: Union[str, Path], 140 | plot_dir=None, 141 | gauges_name: str = None, 142 | ): 143 | """ 144 | Plot the wflow forcing in separate maps. 145 | 146 | Parameters 147 | ---------- 148 | wflow_root : Union[str, Path] 149 | Path to the wflow model root folder 150 | plot_dir : str, optional 151 | Path to the output folder. If None (default), create a folder "plots" 152 | in the wflow_root folder. 153 | gauges_name : str, optional 154 | Name of the gauges to plot. If None (default), no gauges are plot. 155 | """ 156 | mod = WflowModel(wflow_root, mode="r") 157 | 158 | # If plotting dir is None, create 159 | if plot_dir is None: 160 | plot_dir = os.path.join(wflow_root, "plots") 161 | if not os.path.exists(plot_dir): 162 | os.makedirs(plot_dir) 163 | 164 | # Forcing variables to plot 165 | forcing_vars = { 166 | "precip": {"long_name": "precipitation", "unit": "mm y$^{-1}$"}, 167 | "pet": {"long_name": "potential evap.", "unit": "mm y$^{-1}$"}, 168 | "temp": {"long_name": "temperature", "unit": "degC"}, 169 | } 170 | 171 | # plot mean annual precip temp and potential evap. 172 | for forcing_var, forcing_char in forcing_vars.items(): 173 | print(forcing_var, forcing_char) 174 | if forcing_var == "temp": 175 | da = mod.forcing[forcing_var].resample(time="A").mean("time").mean("time") 176 | else: 177 | da = mod.forcing[forcing_var].resample(time="A").sum("time").mean("time") 178 | da = da.where(da > 0) 179 | da = da.where(mod.grid["wflow_subcatch"] >= 0) 180 | da.attrs.update(long_name=forcing_char["long_name"], units=forcing_char["unit"]) 181 | figname = f"{forcing_var}" 182 | plot_map_model(mod, da, figname, gauges_name) 183 | 184 | 185 | if __name__ == "__main__": 186 | if "snakemake" in globals(): 187 | sm = globals()["snakemake"] 188 | 189 | # Parse snake options 190 | project_dir = sm.params.project_dir 191 | gauges_fn = sm.params.gauges_fid 192 | gauges_name = basename(gauges_fn).split(".")[0] 193 | 194 | Folder_plots = f"{project_dir}/plots/wflow_model_performance" 195 | root = f"{project_dir}/hydrology_model" 196 | 197 | plot_forcing( 198 | wflow_root=root, 199 | plot_dir=Folder_plots, 200 | gauges_name=gauges_name, 201 | ) 202 | else: 203 | plot_forcing( 204 | wflow_root=join(os.getcwd(), "examples", "my_project", "hydrology_model") 205 | ) 206 | -------------------------------------------------------------------------------- /Snakefile_climate_projections: -------------------------------------------------------------------------------- 1 | import itertools 2 | import sys 3 | 4 | # read path of the config file to give to the weagen scripts 5 | args = sys.argv 6 | config_path = args[args.index("--configfile") + 1] 7 | 8 | # Parsing the Snakemake config file (options for basins to build, data catalog, model output directory) 9 | # Function to get argument from config file and return default value if not found 10 | def get_config(config, arg, default=None, optional=True): 11 | """ 12 | Function to get argument from config file and return default value if not found 13 | 14 | Parameters 15 | ---------- 16 | config : dict 17 | config file 18 | arg : str 19 | argument to get from config file 20 | default : str/int/float/list, optional 21 | default value if argument not found, by default None 22 | optional : bool, optional 23 | if True, argument is optional, by default True 24 | """ 25 | if arg in config: 26 | return config[arg] 27 | elif optional: 28 | return default 29 | else: 30 | raise ValueError(f"Argument {arg} not found in config file") 31 | 32 | project_dir = get_config(config, 'project_dir', optional=False) 33 | DATA_SOURCES = get_config(config, "data_sources_climate", optional=False) 34 | 35 | clim_project = get_config(config, "clim_project", optional=False) 36 | models = get_config(config, "models", optional=False) 37 | scenarios = get_config(config, "scenarios", optional=False) 38 | members = get_config(config, "members", optional=False) 39 | variables = get_config(config, "variables", optional=False) 40 | 41 | start_month_hyd_year = get_config(config, "start_month_hyd_year", "Jan") 42 | time_horizon_hist = get_config(config, "historical", optional=False) 43 | future_horizons = get_config(config, "future_horizons", optional=False) 44 | 45 | save_grids = get_config(config, "save_grids", False) 46 | 47 | basin_dir = f"{project_dir}/hydrology_model" 48 | clim_project_dir = f"{project_dir}/climate_projections/{clim_project}" 49 | 50 | ### Dictionary elements from the config based on wildcards 51 | def get_horizon(wildcards): 52 | return config["future_horizons"][wildcards.horizon] 53 | 54 | # Master rule: end with csv with change factors for each scenario and model 55 | rule all: 56 | input: 57 | (clim_project_dir + "/annual_change_scalar_stats_summary.nc"), 58 | (clim_project_dir + "/annual_change_scalar_stats_summary.csv"), 59 | (clim_project_dir + "/annual_change_scalar_stats_summary_mean.csv"), 60 | stats_change_plt = (clim_project_dir + "/plots/projected_climate_statistics.png"), 61 | precip_plt = (clim_project_dir + "/plots/precipitation_anomaly_projections_abs.png"), 62 | temp_plt = (clim_project_dir + "/plots/temperature_anomaly_projections_abs.png"), 63 | snake_config = f"{project_dir}/config/snake_config_climate_projections.yml", 64 | 65 | ruleorder: monthly_stats_hist > monthly_stats_fut > monthly_change > monthly_change_scalar_merge 66 | 67 | # Rule to copy config files to the project_dir/config folder 68 | rule copy_config: 69 | input: 70 | config_snake = config_path, 71 | params: 72 | data_catalogs = DATA_SOURCES, 73 | workflow_name = "climate_projections", 74 | output: 75 | config_snake_out = f"{project_dir}/config/snake_config_climate_projections.yml", 76 | script: 77 | "src/copy_config_files.py" 78 | 79 | # Rule to calculate mean monthly statistics for historical and future scenarios - grid saved to netcdf 80 | # also calculate monthly time series averaged over the grid. 81 | rule monthly_stats_hist: 82 | input: 83 | region_fid = ancient(f"{basin_dir}/staticgeoms/region.geojson"), 84 | output: 85 | #stats_nc_hist = (clim_project_dir + "/historical_stats_{model}.nc"), 86 | stats_time_nc_hist = temp(clim_project_dir + "/historical_stats_time_{model}.nc"), 87 | params: 88 | yml_fid = DATA_SOURCES, 89 | project_dir = f"{project_dir}", 90 | name_scenario = "historical", 91 | name_members = members, 92 | name_model = "{model}", 93 | name_clim_project = clim_project, 94 | variables = variables, 95 | save_grids = save_grids, 96 | script: "src/get_stats_climate_proj.py" 97 | 98 | # Rule to calculate mean monthly statistics for historical and future scenarios - grid saved to netcdf 99 | # also calculate monthly time series averaged over the grid. 100 | rule monthly_stats_fut: 101 | input: 102 | region_fid = ancient(f"{basin_dir}/staticgeoms/region.geojson"), 103 | stats_time_nc_hist = (clim_project_dir + "/historical_stats_time_{model}.nc"), #make sure starts with previous job 104 | output: 105 | stats_time_nc = temp(clim_project_dir + "/stats_time-{model}_{scenario}.nc"), 106 | params: 107 | yml_fid = DATA_SOURCES, 108 | project_dir = f"{project_dir}", 109 | name_scenario = "{scenario}", 110 | name_members = members, 111 | name_model = "{model}", 112 | name_clim_project = clim_project, 113 | variables = variables, 114 | save_grids = save_grids, 115 | script: "src/get_stats_climate_proj.py" 116 | 117 | # Rule to calculate change stats over the grid 118 | rule monthly_change: 119 | input: 120 | stats_time_nc_hist = ancient(clim_project_dir + "/historical_stats_time_{model}.nc"), 121 | stats_time_nc = ancient(clim_project_dir + "/stats_time-{model}_{scenario}.nc"), 122 | output: 123 | stats_nc_change = temp(clim_project_dir + "/annual_change_scalar_stats-{model}_{scenario}_{horizon}.nc"), 124 | params: 125 | clim_project_dir = f"{clim_project_dir}", 126 | start_month_hyd_year = start_month_hyd_year, 127 | name_model = "{model}", 128 | name_scenario = "{scenario}", 129 | name_horizon = "{horizon}", 130 | time_horizon_hist = time_horizon_hist, 131 | time_horizon_fut = get_horizon, 132 | save_grids = save_grids, 133 | stats_nc_hist = (clim_project_dir + "/historical_stats_{model}.nc"), 134 | stats_nc = (clim_project_dir + "/stats-{model}_{scenario}.nc"), 135 | script: "src/get_change_climate_proj.py" 136 | 137 | #rule to merge results in one netcdf / todo: add plotting 138 | rule monthly_change_scalar_merge: 139 | input: 140 | stats_nc_change = ancient(expand((clim_project_dir + "/annual_change_scalar_stats-{model}_{scenario}_{horizon}.nc"), model = models, scenario = scenarios, horizon = future_horizons)), 141 | output: 142 | stats_change_summary = (clim_project_dir + "/annual_change_scalar_stats_summary.nc"), 143 | stats_change_summary_csv = (clim_project_dir + "/annual_change_scalar_stats_summary.csv"), 144 | stats_change_summary_csv_mean = (clim_project_dir + "/annual_change_scalar_stats_summary_mean.csv"), 145 | stats_change_plt = (clim_project_dir + "/plots/projected_climate_statistics.png"), 146 | params: 147 | clim_project_dir = f"{clim_project_dir}", 148 | horizons = future_horizons, 149 | script: "src/get_change_climate_proj_summary.py" 150 | 151 | #rule to plot timeseries 152 | rule plot_climate_proj_timeseries: 153 | input: 154 | stats_change_summary = (clim_project_dir + "/annual_change_scalar_stats_summary.nc"), 155 | stats_time_nc_hist =[(clim_project_dir + f"/historical_stats_time_{mod}.nc") for mod in models], 156 | stats_time_nc = expand((clim_project_dir + "/stats_time-{model}_{scenario}.nc"), model = models, scenario = scenarios), 157 | params: 158 | clim_project_dir = f"{clim_project_dir}", 159 | scenarios = scenarios, 160 | horizons = future_horizons, 161 | save_grids = save_grids, 162 | change_grids = [(clim_project_dir + f"/monthly_change_mean_grid-{mod}_{sc}_{hz}.nc") for mod,sc,hz in list(itertools.product(models,scenarios,future_horizons))], 163 | output: 164 | precip_plt = (clim_project_dir + "/plots/precipitation_anomaly_projections_abs.png"), 165 | temp_plt = (clim_project_dir + "/plots/temperature_anomaly_projections_abs.png"), 166 | timeseries_csv = (clim_project_dir + "/gcm_timeseries.nc"), 167 | script: "src/plot_proj_timeseries.py" 168 | -------------------------------------------------------------------------------- /Snakefile_climate_experiment: -------------------------------------------------------------------------------- 1 | import sys 2 | import numpy as np 3 | 4 | # Parsing the Snakemake config file (options for basins to build, data catalog, model output directory) 5 | #configfile: "config/snake_config_test.yml" 6 | # read path of the config file to give to the weagen scripts 7 | args = sys.argv 8 | config_path = args[args.index("--configfile") + 1] 9 | 10 | # Function to get argument from config file and return default value if not found 11 | def get_config(config, arg, default=None, optional=True): 12 | """ 13 | Function to get argument from config file and return default value if not found 14 | 15 | Parameters 16 | ---------- 17 | config : dict 18 | config file 19 | arg : str 20 | argument to get from config file 21 | default : str/int/float/list, optional 22 | default value if argument not found, by default None 23 | optional : bool, optional 24 | if True, argument is optional, by default True 25 | """ 26 | if arg in config: 27 | return config[arg] 28 | elif optional: 29 | return default 30 | else: 31 | raise ValueError(f"Argument {arg} not found in config file") 32 | 33 | project_dir = get_config(config, 'project_dir', optional=False) 34 | static_dir = get_config(config, 'static_dir', optional=False) 35 | experiment = get_config(config, 'experiment_name', optional=False) 36 | RLZ_NUM = get_config(config, 'realizations_num', 1) 37 | ST_NUM = (get_config(config['temp'], 'step_num', 1) + 1) * (get_config(config['precip'], 'step_num', 1) + 1) 38 | run_hist = get_config(config,"run_historical", False) 39 | if run_hist: 40 | ST_START = 0 41 | else: 42 | ST_START = 1 43 | 44 | DATA_SOURCES = get_config(config, "data_sources", optional=False) 45 | 46 | basin_dir = f"{project_dir}/hydrology_model" 47 | exp_dir = f"{project_dir}/climate_{experiment}" 48 | 49 | # Data catalog historical 50 | clim_source = get_config(config, "clim_historical", optional=False) 51 | # Time horizon climate experiment and number of hydrological model run 52 | horizontime_climate = get_config(config, 'horizontime_climate', optional=False) 53 | wflow_run_length = get_config(config, 'run_length', 20) 54 | 55 | # Master rule: end with all model run and analysed with saving a output plot 56 | rule all: 57 | input: 58 | Qstats = f"{exp_dir}/model_results/Qstats.csv", 59 | basin = f"{exp_dir}/model_results/basin.csv", 60 | snake_config = f"{project_dir}/config/snake_config_climate_experiment.yml", 61 | 62 | # Rule to copy config files to the project_dir/config folder 63 | rule copy_config: 64 | input: 65 | config_snake = config_path, 66 | params: 67 | data_catalogs = DATA_SOURCES, 68 | workflow_name = "climate_experiment", 69 | output: 70 | config_snake_out = f"{project_dir}/config/snake_config_climate_experiment.yml", 71 | script: 72 | "src/copy_config_files.py" 73 | 74 | # Rule to extract historic climate data at native resolution for the project area 75 | rule extract_climate_grid: 76 | input: 77 | prj_region = ancient(f"{basin_dir}/staticgeoms/region.geojson"), 78 | params: 79 | data_sources = DATA_SOURCES, 80 | clim_source = clim_source, 81 | output: 82 | climate_nc = f"{project_dir}/climate_historical/raw_data/extract_historical.nc", 83 | script: 84 | "src/extract_historical_climate.py" 85 | 86 | # Prepare stress test experiment 87 | rule climate_stress_parameters: 88 | input: 89 | config = ancient(config_path), 90 | output: 91 | st_csv_fns = [f"{exp_dir}/stress_test/cst_{st_num}.csv" for st_num in np.arange(1, ST_NUM+1)] 92 | script: 93 | "src/prepare_cst_parameters.py" 94 | 95 | # Prepare config files for the weather generator: generate 96 | rule prepare_weagen_config: 97 | output: 98 | weagen_config = f"{exp_dir}/weathergen_config.yml", 99 | params: 100 | cftype = "generate", 101 | snake_config = config_path, 102 | default_config = f"{static_dir}/weathergen_config.yml", 103 | output_path = f"{exp_dir}/", 104 | middle_year = horizontime_climate, 105 | sim_years = wflow_run_length, 106 | nc_file_prefix = "rlz" 107 | script: 108 | "src/prepare_weagen_config.py" 109 | 110 | # Prepare config files for the weather generator: climate change 111 | rule prepare_weagen_config_st: 112 | output: 113 | weagen_config = f"{exp_dir}/realization_"+"{rlz_num}"+"/weathergen_config_rlz_"+"{rlz_num}"+"_cst_"+"{st_num}"+".yml", 114 | params: 115 | cftype = "stress_test", 116 | snake_config = config_path, 117 | output_path = f"{exp_dir}/realization_"+"{rlz_num}"+"/", 118 | nc_file_prefix = "rlz_"+"{rlz_num}"+"_cst", 119 | nc_file_suffix = "{st_num}", 120 | script: 121 | "src/prepare_weagen_config.py" 122 | 123 | # Generate climate realization 124 | rule generate_weather_realization: 125 | input: 126 | climate_nc = ancient(f"{project_dir}/climate_historical/raw_data/extract_historical.nc"), 127 | weagen_config = f"{exp_dir}/weathergen_config.yml", 128 | output: 129 | temp([f"{exp_dir}/realization_{rlz_num}/rlz_{rlz_num}_cst_0.nc" for rlz_num in np.arange(1, RLZ_NUM+1)]) 130 | shell: 131 | """Rscript --vanilla src/weathergen/generate_weather.R {input.climate_nc} {input.weagen_config} """ 132 | 133 | # Generate climate stress tests 134 | rule generate_climate_stress_test: 135 | input: 136 | rlz_nc = f"{exp_dir}/realization_"+"{rlz_num}"+"/rlz_"+"{rlz_num}"+"_cst_0.nc", 137 | st_csv = f"{exp_dir}/stress_test/cst_"+"{st_num}"+".csv", 138 | weagen_config = f"{exp_dir}/realization_"+"{rlz_num}"+"/weathergen_config_rlz_"+"{rlz_num}"+"_cst_"+"{st_num}"+".yml", 139 | output: 140 | rlz_st_nc = temp(f"{exp_dir}/realization_"+"{rlz_num}"+"/rlz_"+"{rlz_num}"+"_cst_"+"{st_num}"+".nc") 141 | shell: 142 | """Rscript --vanilla src/weathergen/impose_climate_change.R {input.rlz_nc} {input.weagen_config} {input.st_csv}""" 143 | 144 | # Prepare data catalog of the climate files 145 | rule climate_data_catalog: 146 | input: 147 | cst_nc = expand((f"{exp_dir}/realization_"+"{rlz_num}"+"/rlz_"+"{rlz_num}"+"_cst_"+"{st_num}"+".nc"), rlz_num = np.arange(1, RLZ_NUM+1), st_num = np.arange(1, ST_NUM+1)), 148 | rlz_nc = [f"{exp_dir}/realization_{rlz_num}/rlz_{rlz_num}_cst_0.nc" for rlz_num in np.arange(1, RLZ_NUM+1)] 149 | output: 150 | clim_data = f"{exp_dir}/data_catalog_climate_experiment.yml" 151 | params: 152 | data_sources = DATA_SOURCES, 153 | clim_source = clim_source, 154 | script: 155 | "src/prepare_climate_data_catalog.py" 156 | 157 | # Downscale climate forcing for use with wflow 158 | rule downscale_climate_realization: 159 | input: 160 | nc = f"{exp_dir}/realization_"+"{rlz_num}"+"/rlz_"+"{rlz_num}"+"_cst_"+"{st_num2}"+".nc", 161 | data_sources = [f"{exp_dir}/data_catalog_climate_experiment.yml", DATA_SOURCES] 162 | output: 163 | nc = temp(f"{exp_dir}/realization_"+"{rlz_num}"+"/inmaps_rlz_"+"{rlz_num}"+"_cst_"+"{st_num2}"+".nc"), 164 | toml = f"{basin_dir}/run_climate_{experiment}/wflow_sbm_rlz_"+"{rlz_num}"+"_cst_"+"{st_num2}"+".toml" 165 | params: 166 | model_dir = basin_dir, 167 | clim_source = clim_source, 168 | horizontime_climate = horizontime_climate, 169 | run_length = wflow_run_length, 170 | script: 171 | "src/downscale_climate_forcing.py" 172 | 173 | # Run Wflow for all climate forcing 174 | rule run_wflow: 175 | input: 176 | forcing_fid = f"{exp_dir}/realization_"+"{rlz_num}"+"/inmaps_rlz_"+"{rlz_num}"+"_cst_"+"{st_num2}"+".nc", 177 | toml_fid = f"{basin_dir}/run_climate_{experiment}/wflow_sbm_rlz_"+"{rlz_num}"+"_cst_"+"{st_num2}"+".toml" 178 | output: 179 | csv_file = f"{basin_dir}/run_climate_{experiment}/output_rlz_"+"{rlz_num}"+"_cst_"+"{st_num2}"+".csv", 180 | state_file = temp(f"{basin_dir}/run_climate_{experiment}/outstates_rlz_"+"{rlz_num}"+"_cst_"+"{st_num2}"+".nc") 181 | shell: 182 | """ julia --threads 4 -e "using Wflow; Wflow.run()" "{input.toml_fid}" """ 183 | 184 | # Export wflow results 185 | rule export_wflow_results: 186 | input: 187 | rlz_csv_fns = expand((f"{basin_dir}/run_climate_{experiment}/output_rlz_"+"{rlz_num}"+"_cst_"+"{st_num2}"+".csv"), rlz_num=np.arange(1, RLZ_NUM+1), st_num2=np.arange(ST_START, ST_NUM+1)), 188 | output: 189 | Qstats = f"{exp_dir}/model_results/Qstats.csv", 190 | basin = f"{exp_dir}/model_results/basin.csv", 191 | params: 192 | exp_dir = exp_dir, 193 | aggr_rlz = get_config(config, 'aggregate_rlz', True), 194 | st_num = ST_NUM, 195 | Tlow = get_config(config,"Tlow", 2), 196 | Tpeak = get_config(config,"Tpeak", 10), 197 | script: 198 | "src/export_wflow_results.py" 199 | -------------------------------------------------------------------------------- /config/deltares_data_climate_projections.yml: -------------------------------------------------------------------------------- 1 | root: p:/i1000365-007-blue-earth/ClimateChange 2 | isimip3_{model}_{scenario}_{member}: 3 | crs: 4326 4 | data_type: RasterDataset 5 | driver: netcdf 6 | kwargs: 7 | chunks: 8 | lat: 50 9 | lon: 50 10 | time: -1 11 | decode_times: true 12 | parallel: true 13 | meta: 14 | category: climate 15 | paper_doi: https://doi.org/10.5194/gmd-12-3055-2019 16 | paper_ref: Lange, 2019 17 | source_license: CC BY 4.0 18 | source_url: https://www.isimip.org/outputdata/ 19 | source_version: phase3 20 | placeholders: 21 | model: [gfdl, ipsl, mpi, mri, ukesm] 22 | scenario: [ssp126, ssp370, ssp585, historical] 23 | member: [r1i1p1f1] 24 | path: ISIMIP3/{model}/{scenario}/{variable}/{model}*{member}*{scenario}*{variable}_global_daily*.nc 25 | rename: 26 | pr: precip 27 | tas: temp 28 | unit_add: 29 | temp: -273.15 30 | unit_mult: 31 | precip: 86400 32 | 33 | cmip5_{model}_historical_{member}: 34 | crs: 4326 35 | data_type: RasterDataset 36 | driver: netcdf 37 | kwargs: 38 | chunks: 39 | lat: 125 40 | lon: 120 41 | time: 100 42 | drop_variables: ['time_bnds', 'lat_bnds', 'lon_bnds', 'height', 'average_DT'] 43 | decode_times: true 44 | parallel: true 45 | combine: by_coords 46 | preprocess: to_datetimeindex 47 | compat: override 48 | coords: minimal 49 | meta: 50 | category: climate 51 | paper_doi: 10.1175/BAMS-D-11-00094.1 52 | paper_ref: Taylor et al. 2012 53 | source_license: todo 54 | source_url: https://esgf-node.llnl.gov/projects/esgf-llnl/ 55 | source_version: 1.3.1 56 | placeholders: 57 | model: ['ACCESS1-0', 'ACCESS1-3', 'BNU-ESM', 'CCSM4', 'CESM1-BGC', 58 | 'CESM1-CAM5', 'CESM1-FASTCHEM', 'CESM1-WACCM', 'CMCC-CESM', 59 | 'CMCC-CM', 'CMCC-CMS', 'CNRM-CM5', 'CNRM-CM5-2', 'CSIRO-Mk3-6-0', 60 | 'CanCM4', 'CanESM2', 'EC-EARTH', 'FGOALS-g2', 'FIO-ESM', 61 | 'GFDL-CM2p1', 'GFDL-CM3', 'GFDL-ESM2G', 'GFDL-ESM2M', 'GISS-E2-H', 62 | 'GISS-E2-H-CC', 'GISS-E2-R', 'GISS-E2-R-CC', 'HadCM3', 63 | 'HadGEM2-AO', 'HadGEM2-CC', 'HadGEM2-ES', 'IPSL-CM5A-LR', 64 | 'IPSL-CM5A-MR', 'IPSL-CM5B-LR', 'MIROC-ESM', 'MIROC-ESM-CHEM', 65 | 'MIROC4h', 'MIROC5', 'MPI-ESM-LR', 'MPI-ESM-MR', 'MPI-ESM-P', 66 | 'MRI-CGCM3', 'MRI-ESM1', 'NorESM1-M', 'NorESM1-ME', 'bcc-csm1-1', 67 | 'bcc-csm1-1-m', 'inmcm4'] 68 | member: [r1i1p1] 69 | path: p:/i1000365-007-blue-earth/ClimateChange/hydromt/CMIP5/month/historical/{model}/{variable}/{variable}_*{model}_*historical_{member}*.nc 70 | rename: 71 | pr: precip 72 | tas: temp 73 | tasmin: temp_min 74 | tasmax: temp_max 75 | unit_add: 76 | temp: -273.15 77 | temp_min: -273.15 78 | temp_max: -273.15 79 | unit_mult: 80 | precip: 86400 81 | 82 | cmip5_{model}_rcp26_{member}: 83 | crs: 4326 84 | data_type: RasterDataset 85 | driver: netcdf 86 | kwargs: 87 | chunks: 88 | lat: 125 89 | lon: 120 90 | time: 50 91 | drop_variables: ['time_bnds', 'lat_bnds', 'lon_bnds', 'height', 'average_DT'] 92 | decode_times: true 93 | parallel: true 94 | combine: by_coords 95 | preprocess: to_datetimeindex 96 | compat: override 97 | coords: minimal 98 | meta: 99 | category: climate 100 | paper_doi: 10.1175/BAMS-D-11-00094.1 101 | paper_ref: Taylor et al. 2012 102 | source_license: todo 103 | source_url: https://esgf-node.llnl.gov/projects/esgf-llnl/ 104 | source_version: 1.3.1 105 | placeholders: 106 | model: ['BNU-ESM', 'CCSM4', 'CESM1-CAM5', 'CNRM-CM5', 'CSIRO-Mk3-6-0', 107 | 'CanESM2', 'FGOALS-g2', 'FIO-ESM', 'GFDL-CM3', 'GFDL-ESM2G', 108 | 'GISS-E2-H', 'GISS-E2-R', 'HadGEM2-AO', 'HadGEM2-ES', 'IPSL-CM5A-LR', 109 | 'IPSL-CM5A-MR', 'MIROC-ESM', 'MIROC-ESM-CHEM', 110 | 'MIROC5', 'MPI-ESM-LR', 'MPI-ESM-MR', 'MRI-CGCM3', 'NorESM1-M', 111 | 'NorESM1-ME', 'bcc-csm1-1', 'bcc-csm1-1-m'] 112 | member: [r1i1p1] 113 | path: p:/i1000365-007-blue-earth/ClimateChange/hydromt/CMIP5/month/rcp26/{model}/{variable}/{variable}_*{model}_*rcp26_{member}*.nc 114 | rename: 115 | pr: precip 116 | tas: temp 117 | tasmin: temp_min 118 | tasmax: temp_max 119 | unit_add: 120 | temp: -273.15 121 | temp_min: -273.15 122 | temp_max: -273.15 123 | unit_mult: 124 | precip: 86400 125 | 126 | cmip5_{model}_rcp45_{member}: 127 | crs: 4326 128 | data_type: RasterDataset 129 | driver: netcdf 130 | kwargs: 131 | chunks: 132 | lat: 125 133 | lon: 120 134 | time: 50 135 | drop_variables: ['time_bnds', 'lat_bnds', 'lon_bnds', 'height', 'average_DT'] 136 | decode_times: true 137 | parallel: true 138 | combine: by_coords 139 | preprocess: to_datetimeindex 140 | compat: override 141 | coords: minimal 142 | meta: 143 | category: climate 144 | paper_doi: 10.1175/BAMS-D-11-00094.1 145 | paper_ref: Taylor et al. 2012 146 | source_license: todo 147 | source_url: https://esgf-node.llnl.gov/projects/esgf-llnl/ 148 | source_version: 1.3.1 149 | placeholders: 150 | model: ['ACCESS1-0', 'ACCESS1-3', 'BNU-ESM', 'CCSM4', 'CESM1-BGC', 151 | 'CESM1-CAM5', 'CMCC-CM', 'CMCC-CMS', 'CNRM-CM5', 'CSIRO-Mk3-6-0', 152 | 'CanCM4', 'CanESM2', 'FGOALS-g2', 'FIO-ESM', 'GFDL-CM2p1', 153 | 'GFDL-CM3', 'GFDL-ESM2G', 'GFDL-ESM2M', 'GISS-E2-H', 154 | 'GISS-E2-H-CC', 'GISS-E2-R', 'GISS-E2-R-CC', 'HadCM3', 155 | 'HadGEM2-AO', 'HadGEM2-CC', 'HadGEM2-ES', 'IPSL-CM5A-LR', 156 | 'IPSL-CM5A-MR', 'IPSL-CM5B-LR', 'MIROC-ESM', 'MIROC-ESM-CHEM', 157 | 'MIROC4h', 'MIROC5', 'MPI-ESM-LR', 'MPI-ESM-MR', 'MRI-CGCM3', 158 | 'NorESM1-M', 'NorESM1-ME', 'bcc-csm1-1', 'bcc-csm1-1-m', 'inmcm4'] 159 | member: [r1i1p1] 160 | path: p:/i1000365-007-blue-earth/ClimateChange/hydromt/CMIP5/month/rcp45/{model}/{variable}/{variable}_*{model}_*rcp45_{member}*.nc 161 | rename: 162 | pr: precip 163 | tas: temp 164 | tasmin: temp_min 165 | tasmax: temp_max 166 | unit_add: 167 | temp: -273.15 168 | temp_min: -273.15 169 | temp_max: -273.15 170 | unit_mult: 171 | precip: 86400 172 | 173 | cmip5_{model}_rcp60_{member}: 174 | crs: 4326 175 | data_type: RasterDataset 176 | driver: netcdf 177 | kwargs: 178 | chunks: 179 | lat: 125 180 | lon: 120 181 | time: 50 182 | drop_variables: ['time_bnds', 'lat_bnds', 'lon_bnds', 'height', 'average_DT'] 183 | decode_times: true 184 | parallel: true 185 | combine: by_coords 186 | preprocess: to_datetimeindex 187 | compat: override 188 | coords: minimal 189 | meta: 190 | category: climate 191 | paper_doi: 10.1175/BAMS-D-11-00094.1 192 | paper_ref: Taylor et al. 2012 193 | source_license: todo 194 | source_url: https://esgf-node.llnl.gov/projects/esgf-llnl/ 195 | source_version: 1.3.1 196 | placeholders: 197 | model: ['CCSM4', 'CESM1-CAM5', 'CSIRO-Mk3-6-0', 'FIO-ESM', 'GFDL-CM3', 198 | 'GFDL-ESM2G', 'GFDL-ESM2M', 'GISS-E2-H', 'GISS-E2-R', 'HadGEM2-AO', 199 | 'HadGEM2-ES', 'IPSL-CM5A-MR', 'IPSL-CM5A-LR', 'MIROC-ESM', 200 | 'MIROC-ESM-CHEM', 'MIROC5', 'MRI-CGCM3', 'NorESM1-M', 'NorESM1-ME', 201 | 'bcc-csm1-1', 'bcc-csm1-1-m'] 202 | member: [r1i1p1] 203 | path: p:/i1000365-007-blue-earth/ClimateChange/hydromt/CMIP5/month/rcp60/{model}/{variable}/{variable}_*{model}_*rcp60_{member}*.nc 204 | rename: 205 | pr: precip 206 | tas: temp 207 | tasmin: temp_min 208 | tasmax: temp_max 209 | unit_add: 210 | temp: -273.15 211 | temp_min: -273.15 212 | temp_max: -273.15 213 | unit_mult: 214 | precip: 86400 215 | 216 | cmip5_{model}_rcp85_{member}: 217 | crs: 4326 218 | data_type: RasterDataset 219 | driver: netcdf 220 | kwargs: 221 | chunks: 222 | lat: 125 223 | lon: 120 224 | time: 50 225 | drop_variables: ['time_bnds', 'lat_bnds', 'lon_bnds', 'height', 'average_DT'] 226 | decode_times: true 227 | parallel: true 228 | combine: by_coords 229 | preprocess: to_datetimeindex 230 | compat: override 231 | coords: minimal 232 | meta: 233 | category: climate 234 | paper_doi: 10.1175/BAMS-D-11-00094.1 235 | paper_ref: Taylor et al. 2012 236 | source_license: todo 237 | source_url: https://esgf-node.llnl.gov/projects/esgf-llnl/ 238 | source_version: 1.3.1 239 | placeholders: 240 | model: ['ACCESS1-0', 'ACCESS1-3', 'BNU-ESM', 'CCSM4', 'CESM1-BGC', 241 | 'CESM1-CAM5', 'CMCC-CESM', 'CMCC-CM', 'CMCC-CMS', 'CNRM-CM5', 242 | 'CSIRO-Mk3-6-0', 'CanESM2', 'EC-EARTH', 'FGOALS-g2', 'FIO-ESM', 243 | 'GFDL-CM3', 'GFDL-ESM2G', 'GFDL-ESM2M', 'GISS-E2-H', 'GISS-E2-R', 244 | 'HadGEM2-AO', 'HadGEM2-CC', 'HadGEM2-ES', 'IPSL-CM5A-LR', 245 | 'IPSL-CM5A-MR', 'IPSL-CM5B-LR', 'MIROC-ESM', 'MIROC-ESM-CHEM', 246 | 'MIROC5', 'MPI-ESM-LR', 'MPI-ESM-MR', 'MRI-CGCM3', 'NorESM1-M', 247 | 'NorESM1-ME', 'SP-CCSM4', 'bcc-csm1-1', 'bcc-csm1-1-m', 'inmcm4'] 248 | member: [r1i1p1] 249 | path: p:/i1000365-007-blue-earth/ClimateChange/hydromt/CMIP5/month/rcp85/{model}/{variable}/{variable}_*{model}_*rcp85_{member}*.nc 250 | rename: 251 | pr: precip 252 | tas: temp 253 | tasmin: temp_min 254 | tasmax: temp_max 255 | unit_add: 256 | temp: -273.15 257 | temp_min: -273.15 258 | temp_max: -273.15 259 | unit_mult: 260 | precip: 86400 -------------------------------------------------------------------------------- /config/deltares_data_climate_projections_linux.yml: -------------------------------------------------------------------------------- 1 | root: /p/i1000365-007-blue-earth/ClimateChange 2 | isimip3_{model}_{scenario}_{member}: 3 | crs: 4326 4 | data_type: RasterDataset 5 | driver: netcdf 6 | kwargs: 7 | chunks: 8 | lat: 50 9 | lon: 50 10 | time: -1 11 | decode_times: true 12 | parallel: true 13 | meta: 14 | category: climate 15 | paper_doi: https://doi.org/10.5194/gmd-12-3055-2019 16 | paper_ref: Lange, 2019 17 | source_license: CC BY 4.0 18 | source_url: https://www.isimip.org/outputdata/ 19 | source_version: phase3 20 | placeholders: 21 | model: [gfdl, ipsl, mpi, mri, ukesm] 22 | scenario: [ssp126, ssp370, ssp585, historical] 23 | member: [r1i1p1f1] 24 | path: ISIMIP3/{model}/{scenario}/{variable}/{model}*{member}*{scenario}*{variable}_global_daily*.nc 25 | rename: 26 | pr: precip 27 | tas: temp 28 | unit_add: 29 | temp: -273.15 30 | unit_mult: 31 | precip: 86400 32 | 33 | cmip5_{model}_historical_{member}: 34 | crs: 4326 35 | data_type: RasterDataset 36 | driver: netcdf 37 | kwargs: 38 | chunks: 39 | lat: 125 40 | lon: 120 41 | time: 100 42 | drop_variables: ['time_bnds', 'lat_bnds', 'lon_bnds', 'height', 'average_DT'] 43 | decode_times: true 44 | parallel: true 45 | combine: by_coords 46 | preprocess: to_datetimeindex 47 | compat: override 48 | coords: minimal 49 | meta: 50 | category: climate 51 | paper_doi: 10.1175/BAMS-D-11-00094.1 52 | paper_ref: Taylor et al. 2012 53 | source_license: todo 54 | source_url: https://esgf-node.llnl.gov/projects/esgf-llnl/ 55 | source_version: 1.3.1 56 | placeholders: 57 | model: ['ACCESS1-0', 'ACCESS1-3', 'BNU-ESM', 'CCSM4', 'CESM1-BGC', 58 | 'CESM1-CAM5', 'CESM1-FASTCHEM', 'CESM1-WACCM', 'CMCC-CESM', 59 | 'CMCC-CM', 'CMCC-CMS', 'CNRM-CM5', 'CNRM-CM5-2', 'CSIRO-Mk3-6-0', 60 | 'CanCM4', 'CanESM2', 'EC-EARTH', 'FGOALS-g2', 'FIO-ESM', 61 | 'GFDL-CM2p1', 'GFDL-CM3', 'GFDL-ESM2G', 'GFDL-ESM2M', 'GISS-E2-H', 62 | 'GISS-E2-H-CC', 'GISS-E2-R', 'GISS-E2-R-CC', 'HadCM3', 63 | 'HadGEM2-AO', 'HadGEM2-CC', 'HadGEM2-ES', 'IPSL-CM5A-LR', 64 | 'IPSL-CM5A-MR', 'IPSL-CM5B-LR', 'MIROC-ESM', 'MIROC-ESM-CHEM', 65 | 'MIROC4h', 'MIROC5', 'MPI-ESM-LR', 'MPI-ESM-MR', 'MPI-ESM-P', 66 | 'MRI-CGCM3', 'MRI-ESM1', 'NorESM1-M', 'NorESM1-ME', 'bcc-csm1-1', 67 | 'bcc-csm1-1-m', 'inmcm4'] 68 | member: [r1i1p1] 69 | path: /p/i1000365-007-blue-earth/ClimateChange/hydromt/CMIP5/month/historical/{model}/{variable}/{variable}_*{model}_*historical_{member}*.nc 70 | rename: 71 | pr: precip 72 | tas: temp 73 | tasmin: temp_min 74 | tasmax: temp_max 75 | unit_add: 76 | temp: -273.15 77 | temp_min: -273.15 78 | temp_max: -273.15 79 | unit_mult: 80 | precip: 86400 81 | 82 | cmip5_{model}_rcp26_{member}: 83 | crs: 4326 84 | data_type: RasterDataset 85 | driver: netcdf 86 | kwargs: 87 | chunks: 88 | lat: 125 89 | lon: 120 90 | time: 50 91 | drop_variables: ['time_bnds', 'lat_bnds', 'lon_bnds', 'height', 'average_DT'] 92 | decode_times: true 93 | parallel: true 94 | combine: by_coords 95 | preprocess: to_datetimeindex 96 | compat: override 97 | coords: minimal 98 | meta: 99 | category: climate 100 | paper_doi: 10.1175/BAMS-D-11-00094.1 101 | paper_ref: Taylor et al. 2012 102 | source_license: todo 103 | source_url: https://esgf-node.llnl.gov/projects/esgf-llnl/ 104 | source_version: 1.3.1 105 | placeholders: 106 | model: ['BNU-ESM', 'CCSM4', 'CESM1-CAM5', 'CNRM-CM5', 'CSIRO-Mk3-6-0', 107 | 'CanESM2', 'FGOALS-g2', 'FIO-ESM', 'GFDL-CM3', 'GFDL-ESM2G', 108 | 'GISS-E2-H', 'GISS-E2-R', 'HadGEM2-AO', 'HadGEM2-ES', 'IPSL-CM5A-LR', 109 | 'IPSL-CM5A-MR', 'MIROC-ESM', 'MIROC-ESM-CHEM', 110 | 'MIROC5', 'MPI-ESM-LR', 'MPI-ESM-MR', 'MRI-CGCM3', 'NorESM1-M', 111 | 'NorESM1-ME', 'bcc-csm1-1', 'bcc-csm1-1-m'] 112 | member: [r1i1p1] 113 | path: /p/i1000365-007-blue-earth/ClimateChange/hydromt/CMIP5/month/rcp26/{model}/{variable}/{variable}_*{model}_*rcp26_{member}*.nc 114 | rename: 115 | pr: precip 116 | tas: temp 117 | tasmin: temp_min 118 | tasmax: temp_max 119 | unit_add: 120 | temp: -273.15 121 | temp_min: -273.15 122 | temp_max: -273.15 123 | unit_mult: 124 | precip: 86400 125 | 126 | cmip5_{model}_rcp45_{member}: 127 | crs: 4326 128 | data_type: RasterDataset 129 | driver: netcdf 130 | kwargs: 131 | chunks: 132 | lat: 125 133 | lon: 120 134 | time: 50 135 | drop_variables: ['time_bnds', 'lat_bnds', 'lon_bnds', 'height', 'average_DT'] 136 | decode_times: true 137 | parallel: true 138 | combine: by_coords 139 | preprocess: to_datetimeindex 140 | compat: override 141 | coords: minimal 142 | meta: 143 | category: climate 144 | paper_doi: 10.1175/BAMS-D-11-00094.1 145 | paper_ref: Taylor et al. 2012 146 | source_license: todo 147 | source_url: https://esgf-node.llnl.gov/projects/esgf-llnl/ 148 | source_version: 1.3.1 149 | placeholders: 150 | model: ['ACCESS1-0', 'ACCESS1-3', 'BNU-ESM', 'CCSM4', 'CESM1-BGC', 151 | 'CESM1-CAM5', 'CMCC-CM', 'CMCC-CMS', 'CNRM-CM5', 'CSIRO-Mk3-6-0', 152 | 'CanCM4', 'CanESM2', 'FGOALS-g2', 'FIO-ESM', 'GFDL-CM2p1', 153 | 'GFDL-CM3', 'GFDL-ESM2G', 'GFDL-ESM2M', 'GISS-E2-H', 154 | 'GISS-E2-H-CC', 'GISS-E2-R', 'GISS-E2-R-CC', 'HadCM3', 155 | 'HadGEM2-AO', 'HadGEM2-CC', 'HadGEM2-ES', 'IPSL-CM5A-LR', 156 | 'IPSL-CM5A-MR', 'IPSL-CM5B-LR', 'MIROC-ESM', 'MIROC-ESM-CHEM', 157 | 'MIROC4h', 'MIROC5', 'MPI-ESM-LR', 'MPI-ESM-MR', 'MRI-CGCM3', 158 | 'NorESM1-M', 'NorESM1-ME', 'bcc-csm1-1', 'bcc-csm1-1-m', 'inmcm4'] 159 | member: [r1i1p1] 160 | path: /p/i1000365-007-blue-earth/ClimateChange/hydromt/CMIP5/month/rcp45/{model}/{variable}/{variable}_*{model}_*rcp45_{member}*.nc 161 | rename: 162 | pr: precip 163 | tas: temp 164 | tasmin: temp_min 165 | tasmax: temp_max 166 | unit_add: 167 | temp: -273.15 168 | temp_min: -273.15 169 | temp_max: -273.15 170 | unit_mult: 171 | precip: 86400 172 | 173 | cmip5_{model}_rcp60_{member}: 174 | crs: 4326 175 | data_type: RasterDataset 176 | driver: netcdf 177 | kwargs: 178 | chunks: 179 | lat: 125 180 | lon: 120 181 | time: 50 182 | drop_variables: ['time_bnds', 'lat_bnds', 'lon_bnds', 'height', 'average_DT'] 183 | decode_times: true 184 | parallel: true 185 | combine: by_coords 186 | preprocess: to_datetimeindex 187 | compat: override 188 | coords: minimal 189 | meta: 190 | category: climate 191 | paper_doi: 10.1175/BAMS-D-11-00094.1 192 | paper_ref: Taylor et al. 2012 193 | source_license: todo 194 | source_url: https://esgf-node.llnl.gov/projects/esgf-llnl/ 195 | source_version: 1.3.1 196 | placeholders: 197 | model: ['CCSM4', 'CESM1-CAM5', 'CSIRO-Mk3-6-0', 'FIO-ESM', 'GFDL-CM3', 198 | 'GFDL-ESM2G', 'GFDL-ESM2M', 'GISS-E2-H', 'GISS-E2-R', 'HadGEM2-AO', 199 | 'HadGEM2-ES', 'IPSL-CM5A-MR', 'IPSL-CM5A-LR', 'MIROC-ESM', 200 | 'MIROC-ESM-CHEM', 'MIROC5', 'MRI-CGCM3', 'NorESM1-M', 'NorESM1-ME', 201 | 'bcc-csm1-1', 'bcc-csm1-1-m'] 202 | member: [r1i1p1] 203 | path: /p/i1000365-007-blue-earth/ClimateChange/hydromt/CMIP5/month/rcp60/{model}/{variable}/{variable}_*{model}_*rcp60_{member}*.nc 204 | rename: 205 | pr: precip 206 | tas: temp 207 | tasmin: temp_min 208 | tasmax: temp_max 209 | unit_add: 210 | temp: -273.15 211 | temp_min: -273.15 212 | temp_max: -273.15 213 | unit_mult: 214 | precip: 86400 215 | 216 | cmip5_{model}_rcp85_{member}: 217 | crs: 4326 218 | data_type: RasterDataset 219 | driver: netcdf 220 | kwargs: 221 | chunks: 222 | lat: 125 223 | lon: 120 224 | time: 50 225 | drop_variables: ['time_bnds', 'lat_bnds', 'lon_bnds', 'height', 'average_DT'] 226 | decode_times: true 227 | parallel: true 228 | combine: by_coords 229 | preprocess: to_datetimeindex 230 | compat: override 231 | coords: minimal 232 | meta: 233 | category: climate 234 | paper_doi: 10.1175/BAMS-D-11-00094.1 235 | paper_ref: Taylor et al. 2012 236 | source_license: todo 237 | source_url: https://esgf-node.llnl.gov/projects/esgf-llnl/ 238 | source_version: 1.3.1 239 | placeholders: 240 | model: ['ACCESS1-0', 'ACCESS1-3', 'BNU-ESM', 'CCSM4', 'CESM1-BGC', 241 | 'CESM1-CAM5', 'CMCC-CESM', 'CMCC-CM', 'CMCC-CMS', 'CNRM-CM5', 242 | 'CSIRO-Mk3-6-0', 'CanESM2', 'EC-EARTH', 'FGOALS-g2', 'FIO-ESM', 243 | 'GFDL-CM3', 'GFDL-ESM2G', 'GFDL-ESM2M', 'GISS-E2-H', 'GISS-E2-R', 244 | 'HadGEM2-AO', 'HadGEM2-CC', 'HadGEM2-ES', 'IPSL-CM5A-LR', 245 | 'IPSL-CM5A-MR', 'IPSL-CM5B-LR', 'MIROC-ESM', 'MIROC-ESM-CHEM', 246 | 'MIROC5', 'MPI-ESM-LR', 'MPI-ESM-MR', 'MRI-CGCM3', 'NorESM1-M', 247 | 'NorESM1-ME', 'SP-CCSM4', 'bcc-csm1-1', 'bcc-csm1-1-m', 'inmcm4'] 248 | member: [r1i1p1] 249 | path: /p/i1000365-007-blue-earth/ClimateChange/hydromt/CMIP5/month/rcp85/{model}/{variable}/{variable}_*{model}_*rcp85_{member}*.nc 250 | rename: 251 | pr: precip 252 | tas: temp 253 | tasmin: temp_min 254 | tasmax: temp_max 255 | unit_add: 256 | temp: -273.15 257 | temp_min: -273.15 258 | temp_max: -273.15 259 | unit_mult: 260 | precip: 86400 -------------------------------------------------------------------------------- /src/get_stats_climate_proj.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Tue Feb 1 14:34:58 2022 4 | 5 | @author: bouaziz 6 | """ 7 | 8 | import hydromt 9 | import os 10 | import glob 11 | import matplotlib.pyplot as plt 12 | import pandas as pd 13 | import geopandas as gpd 14 | import xarray as xr 15 | 16 | from dask.diagnostics import ProgressBar 17 | 18 | # %% 19 | 20 | # Snakemake options 21 | project_dir = snakemake.params.project_dir 22 | region_fn = snakemake.input.region_fid 23 | path_yml = snakemake.params.yml_fid 24 | name_scenario = snakemake.params.name_scenario 25 | name_members = snakemake.params.name_members 26 | name_model = snakemake.params.name_model 27 | name_clim_project = snakemake.params.name_clim_project 28 | variables = snakemake.params.variables 29 | save_grids = snakemake.params.save_grids 30 | 31 | # Time tuple for timeseries 32 | if name_clim_project == "cmip6": 33 | if name_scenario == "historical": 34 | # cmip6 historical 1850-2014 35 | time_tuple_all = ("1950-01-01", "2014-12-31") 36 | else: 37 | # cmip6 future 2015-2100+ depending on models 38 | time_tuple_all = ("2015-01-01", "2100-12-31") 39 | elif name_clim_project == "cmip5": 40 | if name_scenario == "historical": 41 | # cmip5 historical 1850-2005 42 | time_tuple_all = ("1950-01-01", "2005-12-31") 43 | else: 44 | # cmip5 future 2006-2100 45 | time_tuple_all = ("2006-01-01", "2100-12-31") 46 | else: # isimip3 47 | if name_scenario == "historical": 48 | # isimip3 historical 1850-2014 49 | time_tuple_all = ("1991-01-01", "2014-12-31") 50 | else: 51 | # isimip3 future 2015-2100 / p drive has gaps in between 2014-2021 52 | time_tuple_all = ("2021-01-01", "2100-12-31") 53 | 54 | # additional folder structure info 55 | folder_model = os.path.join(project_dir, "hydrology_model") 56 | folder_out = os.path.join(project_dir, "climate_projections", name_clim_project) 57 | 58 | if not os.path.exists(folder_out): 59 | os.mkdir(folder_out) 60 | 61 | # initialize model and region properties 62 | geom = gpd.read_file(region_fn) 63 | bbox = list(geom.geometry.bounds.values[0]) 64 | buffer = 1 65 | 66 | # initialize data_catalog from yml file 67 | data_catalog = hydromt.DataCatalog(data_libs=path_yml) 68 | 69 | 70 | def get_stats_clim_projections( 71 | data, 72 | name_clim_project, 73 | name_model, 74 | name_scenario, 75 | name_member, 76 | save_grids=False, 77 | ): 78 | """ 79 | Parameters 80 | ---------- 81 | data: dataset 82 | dataset for all available variables after opening data catalog 83 | name_clim_project : str 84 | name of the climate project (e.g. cmip5, cmip6, isimip3). 85 | should link to the name in the yml catalog. 86 | name_model : str 87 | model name of the climate model (e.g. ipsl, gfdl). 88 | name_scenario : str 89 | scenario name of the climate model (e.g. rcp4.5, rcp8.5). 90 | name_member : str 91 | member name of the climate model (e.g. r1i1p1f1). 92 | time_tuple : tuple 93 | time period over which to calculate statistics. 94 | save_grids : bool 95 | save gridded stats as well as scalar stats. False by default. 96 | 97 | Returns 98 | ------- 99 | Writes a netcdf file with mean monthly precipitation and temperature regime (12 maps) over the geom. 100 | todo: Writes a csv file with mean monthly timeseries of precip and temp statistics (mean) over the geom 101 | 102 | """ 103 | 104 | # get lat lon name of data 105 | XDIMS = ("x", "longitude", "lon", "long") 106 | YDIMS = ("y", "latitude", "lat") 107 | for dim in XDIMS: 108 | if dim in data.coords: 109 | x_dim = dim 110 | for dim in YDIMS: 111 | if dim in data.coords: 112 | y_dim = dim 113 | 114 | ds = [] 115 | ds_scalar = [] 116 | # filter variables for precip and temp 117 | # data_vars = list(data.data_vars) 118 | # var_list = [str for str in data_vars if any(sub in str for sub in variables)] 119 | for var in data.data_vars: # var_list: 120 | if var == "precip": 121 | var_m = data[var].resample(time="MS").sum("time") 122 | else: # for temp 123 | # elif "temp" in var: #for temp 124 | var_m = data[var].resample(time="MS").mean("time") 125 | 126 | # get scalar average over grid for each month 127 | var_m_scalar = var_m.mean([x_dim, y_dim]).round(decimals=2) 128 | ds_scalar.append(var_m_scalar.to_dataset()) 129 | 130 | # get grid average over time for each month 131 | if save_grids: 132 | # slice over time_tuple to save minimal required info for the grid 133 | # var_m = var_m.sel(time=slice(*time_tuple)) 134 | var_mm = var_m.groupby("time.month").mean("time").round(decimals=2) 135 | ds.append(var_mm.to_dataset()) 136 | 137 | # mean stats over grid and time 138 | mean_stats_time = xr.merge(ds_scalar) 139 | # add coordinate on project, model, scenario, realization to later merge all files 140 | mean_stats_time = mean_stats_time.assign_coords( 141 | { 142 | "clim_project": f"{name_clim_project}", 143 | "model": f"{name_model}", 144 | "scenario": f"{name_scenario}", 145 | "member": f"{name_member}", 146 | } 147 | ).expand_dims(["clim_project", "model", "scenario", "member"]) 148 | 149 | if save_grids: 150 | mean_stats = xr.merge(ds) 151 | # add coordinate on project, model, scenario, realization to later merge all files 152 | mean_stats = mean_stats.assign_coords( 153 | { 154 | "clim_project": f"{name_clim_project}", 155 | "model": f"{name_model}", 156 | "scenario": f"{name_scenario}", 157 | "member": f"{name_member}", 158 | } 159 | ).expand_dims(["clim_project", "model", "scenario", "member"]) 160 | 161 | else: 162 | mean_stats = xr.Dataset() 163 | 164 | return mean_stats, mean_stats_time 165 | 166 | 167 | # check if model really exists from data catalog entry - else skip and provide empty ds?? 168 | 169 | ds_members_mean_stats = [] 170 | ds_members_mean_stats_time = [] 171 | 172 | for name_member in name_members: 173 | print(name_member) 174 | entry = f"{name_clim_project}_{name_model}_{name_scenario}_{name_member}" 175 | if entry in data_catalog: 176 | try: # todo can this be replaced by if statement? 177 | data = data_catalog.get_rasterdataset( 178 | entry, 179 | bbox=bbox, 180 | buffer=buffer, 181 | time_tuple=time_tuple_all, 182 | variables=variables, 183 | ) 184 | # needed for cmip5/cmip6 cftime.Datetime360Day which is not picked up before. 185 | data = data.sel(time=slice(*time_tuple_all)) 186 | except: 187 | # if it is not possible to open all variables at once, loop over each one, remove duplicates and then merge: 188 | ds_list = [] 189 | for var in variables: 190 | try: 191 | data_ = data_catalog.get_rasterdataset( 192 | entry, 193 | bbox=bbox, 194 | buffer=buffer, 195 | time_tuple=time_tuple_all, 196 | variables=[var], 197 | ) 198 | # drop duplicates if any 199 | data_ = data_.drop_duplicates(dim="time", keep="first") 200 | ds_list.append(data_) 201 | except: 202 | print(f"{name_scenario}", f"{name_model}", f"{var} not found") 203 | # merge all variables back to data 204 | data = xr.merge(ds_list) 205 | 206 | # calculate statistics 207 | mean_stats, mean_stats_time = get_stats_clim_projections( 208 | data, 209 | name_clim_project, 210 | name_model, 211 | name_scenario, 212 | name_member, 213 | save_grids=save_grids, 214 | ) 215 | 216 | else: 217 | mean_stats = xr.Dataset() 218 | mean_stats_time = xr.Dataset() 219 | 220 | # merge members results 221 | ds_members_mean_stats.append(mean_stats) 222 | ds_members_mean_stats_time.append(mean_stats_time) 223 | 224 | if save_grids: 225 | nc_mean_stats = xr.merge(ds_members_mean_stats) 226 | else: 227 | nc_mean_stats = xr.Dataset() 228 | nc_mean_stats_time = xr.merge(ds_members_mean_stats_time) 229 | 230 | # write netcdf: 231 | 232 | # use hydromt function instead to write to netcdf? 233 | dvars = nc_mean_stats_time.raster.vars 234 | 235 | if name_scenario == "historical": 236 | name_nc_out = f"historical_stats_{name_model}.nc" 237 | name_nc_out_time = f"historical_stats_time_{name_model}.nc" 238 | else: 239 | name_nc_out = f"stats-{name_model}_{name_scenario}.nc" 240 | name_nc_out_time = f"stats_time-{name_model}_{name_scenario}.nc" 241 | 242 | print("writing stats over time to nc") 243 | delayed_obj = nc_mean_stats_time.to_netcdf( 244 | os.path.join(folder_out, name_nc_out_time), 245 | encoding={k: {"zlib": True} for k in dvars}, 246 | compute=False, 247 | ) 248 | with ProgressBar(): 249 | delayed_obj.compute() 250 | 251 | if save_grids: 252 | print("writing stats over grid to nc") 253 | delayed_obj = nc_mean_stats.to_netcdf( 254 | os.path.join(folder_out, name_nc_out), 255 | encoding={k: {"zlib": True} for k in dvars}, 256 | compute=False, 257 | ) 258 | with ProgressBar(): 259 | delayed_obj.compute() 260 | -------------------------------------------------------------------------------- /docs/notebooks/Model building.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "d1026dcd", 6 | "metadata": {}, 7 | "source": [ 8 | "# Creating your project and building the hydrological model" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "id": "af4e35b8", 14 | "metadata": {}, 15 | "source": [ 16 | "## Introduction" 17 | ] 18 | }, 19 | { 20 | "cell_type": "markdown", 21 | "id": "91c05fc2", 22 | "metadata": {}, 23 | "source": [ 24 | "Nokebook for the workflow **Snakefile_model_creation**: creates a Wflow model based on global data for the selected region and run and anlayse the model results for a historical period." 25 | ] 26 | }, 27 | { 28 | "cell_type": "code", 29 | "execution_count": null, 30 | "id": "8b887ec1", 31 | "metadata": {}, 32 | "outputs": [], 33 | "source": [ 34 | "# Import some python libraries we will need to run this notebook\n", 35 | "import os\n", 36 | "from IPython import display" 37 | ] 38 | }, 39 | { 40 | "cell_type": "code", 41 | "execution_count": null, 42 | "id": "5297f677-6afc-4efe-a143-bfe84ce88772", 43 | "metadata": {}, 44 | "outputs": [], 45 | "source": [ 46 | "# A helper function\n", 47 | "from subprocess import Popen, PIPE, STDOUT\n", 48 | "\n", 49 | "from IPython.core.magic import register_line_magic\n", 50 | "\n", 51 | "\n", 52 | "@register_line_magic\n", 53 | "def runrealcmd(command):\n", 54 | " with Popen(\n", 55 | " command, stdout=PIPE, shell=True, stderr=STDOUT, bufsize=1, close_fds=True\n", 56 | " ) as process:\n", 57 | " for line in iter(process.stdout.readline, b\"\"):\n", 58 | " print(line.rstrip().decode(\"utf-8\"))" 59 | ] 60 | }, 61 | { 62 | "cell_type": "markdown", 63 | "id": "f7acfc4f", 64 | "metadata": {}, 65 | "source": [ 66 | "## Defining your project region and properties" 67 | ] 68 | }, 69 | { 70 | "cell_type": "markdown", 71 | "id": "11fb0c8c", 72 | "metadata": {}, 73 | "source": [ 74 | "First thing is to prepare all the options for your project, this is done using the configuration or settings file below" 75 | ] 76 | }, 77 | { 78 | "cell_type": "code", 79 | "execution_count": null, 80 | "id": "60b1b63d", 81 | "metadata": {}, 82 | "outputs": [], 83 | "source": [ 84 | "# Add here the path where you have installed the blueearth-cst tool\n", 85 | "cst_path = r'D:\\blueearth_cst'\n", 86 | "os.chdir(cst_path)" 87 | ] 88 | }, 89 | { 90 | "cell_type": "markdown", 91 | "id": "4251a891", 92 | "metadata": {}, 93 | "source": [ 94 | "To chose where the river or basin you want to work with, you will need to supply the location of your river.\n", 95 | "\n", 96 | "The location can either be:\n", 97 | "- for a basin to the sea, any point in the basin [x, y]\n", 98 | "- for a subbasin (tributary or part of a basin) the location of the subbasin outlet [xout, yout]\n", 99 | "- a bounding box [xmin, ymin, xmax, ymax]\n", 100 | "- a geometry file of your region of interest (eg, myregion.shp)\n", 101 | "\n", 102 | "The coordinates should be in EPSG 4326.\n", 103 | "For points, you can use Google Maps to locate your coordinates.\n", 104 | "For bounding box or point cooridnates, you can also copy-paste locations from this website: http://bboxfinder.com/" 105 | ] 106 | }, 107 | { 108 | "cell_type": "code", 109 | "execution_count": null, 110 | "id": "b67b0319", 111 | "metadata": {}, 112 | "outputs": [], 113 | "source": [ 114 | "%%writefile ./config/my-project-settings.yml\n", 115 | "\n", 116 | "###########################################################################################\n", 117 | "#################################### General setting ######################################\n", 118 | "\n", 119 | "# Path to where the wflow model will be stored (include the project name)\n", 120 | "project_dir: examples/myModel\n", 121 | "\n", 122 | "# Path to the data catalog to use (can be different for Linux/Windows/data stored on P drive or other)\n", 123 | "data_sources: config/deltares_data.yml\n", 124 | "\n", 125 | "# Time horizon of the historical period\n", 126 | "starttime: \"2000-01-01T00:00:00\"\n", 127 | "endtime: \"2020-12-31T00:00:00\"\n", 128 | "# Historical climate data source (name as available in the data_sources catalog file). Either [era5, chirps_global, chirps, eobs].\n", 129 | "clim_historical: era5\n", 130 | "\n", 131 | "# List of wflow output variables to save\n", 132 | "wflow_outvars: ['river discharge', 'overland flow', 'actual evapotranspiration', 'groundwater recharge', 'snow']\n", 133 | "\n", 134 | "###########################################################################################\n", 135 | "#################################### Model building #######################################\n", 136 | "\n", 137 | "# hydromt \"region\" for building the model, coordinates are in EPSG 4326\n", 138 | "# for more info on how to fill this, you can see the hydromt documentation: \n", 139 | "# https://deltares.github.io/hydromt/latest/user_guide/cli.html#region-options\n", 140 | "# For wflow please use either 'basin' or 'subbasin' keywords\n", 141 | "#model_region: \"{'subbasin': [108.13800, 15.5414], 'uparea':100}\"\n", 142 | "model_region: \"{'subbasin': [108.152711, 15.571764], 'uparea': 100}\"\n", 143 | "# Grid resolution of the model in degrees\n", 144 | "model_resolution: 0.0083333\n", 145 | "\n", 146 | "# If available, please here put the path to the available observation stations locations files\n", 147 | "# csv file containing three columns: station_ID, x, y\n", 148 | "output_locations: None # If set to None, will default to the wflow outlets\n", 149 | "# Path to the file with observed discharge timeseries per station \n", 150 | "observations_timeseries: None" 151 | ] 152 | }, 153 | { 154 | "cell_type": "markdown", 155 | "id": "2a0246c8", 156 | "metadata": {}, 157 | "source": [ 158 | "## Running the model building workflow" 159 | ] 160 | }, 161 | { 162 | "cell_type": "markdown", 163 | "id": "16380b9b", 164 | "metadata": {}, 165 | "source": [ 166 | "The first line is to create an image of our workflow with snakemake to verify that all our settings are well taken into account." 167 | ] 168 | }, 169 | { 170 | "cell_type": "code", 171 | "execution_count": null, 172 | "id": "eb461836", 173 | "metadata": {}, 174 | "outputs": [], 175 | "source": [ 176 | "! snakemake -s Snakefile_model_creation --configfile config/my-project-settings.yml --dag | dot -Tpng > dag_model_building.png" 177 | ] 178 | }, 179 | { 180 | "cell_type": "code", 181 | "execution_count": null, 182 | "id": "64c2a3b2", 183 | "metadata": {}, 184 | "outputs": [], 185 | "source": [ 186 | "display.Image(\"./dag_model_building.png\")" 187 | ] 188 | }, 189 | { 190 | "cell_type": "markdown", 191 | "id": "39a82d53", 192 | "metadata": {}, 193 | "source": [ 194 | "Now we will unlock your directory in order to save the future results of the workflow. This step is most of the time not necessary." 195 | ] 196 | }, 197 | { 198 | "cell_type": "code", 199 | "execution_count": null, 200 | "id": "7850745f", 201 | "metadata": {}, 202 | "outputs": [], 203 | "source": [ 204 | "! snakemake --unlock -s Snakefile_model_creation --configfile config/my-project-settings.yml" 205 | ] 206 | }, 207 | { 208 | "cell_type": "markdown", 209 | "id": "29f64153", 210 | "metadata": {}, 211 | "source": [ 212 | "And finally let's run our worflow:" 213 | ] 214 | }, 215 | { 216 | "cell_type": "code", 217 | "execution_count": null, 218 | "id": "d23d7aea", 219 | "metadata": {}, 220 | "outputs": [], 221 | "source": [ 222 | "%runrealcmd snakemake all -c 1 -s Snakefile_model_creation --configfile config/my-project-settings.yml" 223 | ] 224 | }, 225 | { 226 | "cell_type": "markdown", 227 | "id": "d1f8ddcf", 228 | "metadata": {}, 229 | "source": [ 230 | "## Results" 231 | ] 232 | }, 233 | { 234 | "cell_type": "markdown", 235 | "id": "114d1489", 236 | "metadata": {}, 237 | "source": [ 238 | "First check that snake did run everything." 239 | ] 240 | }, 241 | { 242 | "cell_type": "code", 243 | "execution_count": null, 244 | "id": "81b9ec49", 245 | "metadata": {}, 246 | "outputs": [], 247 | "source": [ 248 | "! snakemake -s Snakefile_model_creation --configfile config/my-project-settings.yml --dag | dot -Tpng > dag_model_building.png" 249 | ] 250 | }, 251 | { 252 | "cell_type": "code", 253 | "execution_count": null, 254 | "id": "88dca307", 255 | "metadata": {}, 256 | "outputs": [], 257 | "source": [ 258 | "display.Image(\"./dag_model_building.png\")" 259 | ] 260 | }, 261 | { 262 | "cell_type": "markdown", 263 | "id": "c82febeb", 264 | "metadata": {}, 265 | "source": [ 266 | "Let's see which files where created in our output folder." 267 | ] 268 | }, 269 | { 270 | "cell_type": "code", 271 | "execution_count": null, 272 | "id": "bbc92bbe", 273 | "metadata": {}, 274 | "outputs": [], 275 | "source": [ 276 | "root = \"examples/myModel\"\n", 277 | "for path, _, files in os.walk(root):\n", 278 | " print(path)\n", 279 | " for name in files:\n", 280 | " if name.endswith(\".xml\"):\n", 281 | " continue\n", 282 | " print(f\" - {name}\")" 283 | ] 284 | }, 285 | { 286 | "cell_type": "code", 287 | "execution_count": null, 288 | "id": "b6aac318", 289 | "metadata": {}, 290 | "outputs": [], 291 | "source": [ 292 | "display.Image(\"./examples/myModel/plots/wflow_model_performance/basin_area.png\")" 293 | ] 294 | }, 295 | { 296 | "cell_type": "code", 297 | "execution_count": null, 298 | "id": "98f597e6", 299 | "metadata": {}, 300 | "outputs": [], 301 | "source": [ 302 | "display.Image(\"./examples/myModel/plots/wflow_model_performance/hydro_wflow_1.png\")" 303 | ] 304 | } 305 | ], 306 | "metadata": { 307 | "kernelspec": { 308 | "display_name": "Python 3 (ipykernel)", 309 | "language": "python", 310 | "name": "python3" 311 | }, 312 | "language_info": { 313 | "codemirror_mode": { 314 | "name": "ipython", 315 | "version": 3 316 | }, 317 | "file_extension": ".py", 318 | "mimetype": "text/x-python", 319 | "name": "python", 320 | "nbconvert_exporter": "python", 321 | "pygments_lexer": "ipython3", 322 | "version": "3.10.2" 323 | } 324 | }, 325 | "nbformat": 4, 326 | "nbformat_minor": 5 327 | } 328 | -------------------------------------------------------------------------------- /src/plot_results.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Plot wflow results and compare to observations if any 4 | """ 5 | 6 | import xarray as xr 7 | import numpy as np 8 | import os 9 | from os.path import join 10 | from pathlib import Path 11 | import matplotlib.pyplot as plt 12 | import pandas as pd 13 | import hydromt 14 | from hydromt_wflow import WflowModel 15 | 16 | from typing import Union 17 | 18 | from func_plot_signature import ( 19 | plot_signatures, 20 | plot_hydro, 21 | compute_metrics, 22 | plot_clim, 23 | plot_basavg, 24 | ) 25 | 26 | 27 | def analyse_wflow_historical( 28 | project_dir: Path, 29 | observations_fn: Union[Path, str] = None, 30 | gauges_locs: Union[Path, str] = None, 31 | ): 32 | """ 33 | Analyse and plot wflow model performance for historical run. 34 | 35 | To be read, model should be stored in ``project_dir``/hydrology_model. 36 | Model results should include the following keys: Q_gauges, 37 | Q_gauges_{basename(gauges_locs)}, P_subcatchment, T_subcatchment, EP_subcatchment. 38 | 39 | 40 | Outputs: 41 | 42 | - plot of hydrographs at the outlet(s) and gauges_locs if provided. If wflow run is 43 | three years or less, only the daily hydrograph will be plotted. If wflow run is 44 | longer than three years, plots will also include the yearly hydrograph, the 45 | monthly average and hyddrographs for the wettest and driest years. If observations 46 | are available, they are added as well. 47 | - plot of signature plots if wflow run is longer than a year and if observations 48 | are available. 49 | - plot of climate data per year and per month at the subcatchment level if wflow run 50 | is longer than a year. 51 | - plot of basin average outputs (e.g. soil moisture, snow, etc.). The variables to 52 | include should have the postfix _basavg in the wflow output file. 53 | - compute performance metrics (daily and monthly KGE, NSE, NSElog, RMSE, MSE, Pbias, 54 | VE) if observations are available and if wflow run is longer than a year. Metrics 55 | are saved to a csv file. 56 | 57 | Parameters 58 | ---------- 59 | project_dir : Path 60 | path to CST project directory 61 | observations_fn : Union[Path, str], optional 62 | Path to observations timeseries file, by default None 63 | Required columns: time, wflow_id IDs of the locations as in ``gauges_locs``. 64 | Separator is , and decimal is . 65 | gauges_locs : Union[Path, str], optional 66 | Path to gauges/observations locations file, by default None 67 | Required columns: wflow_id, station_name, x, y. 68 | Values in wflow_id column should match column names in ``observations_fn``. 69 | Separator is , and decimal is . 70 | """ 71 | ### 1. Prepare output and plotting options ### 72 | 73 | # Create output folders 74 | Folder_plots = f"{project_dir}/plots/wflow_model_performance" 75 | 76 | if not os.path.isdir(Folder_plots): 77 | os.mkdir(Folder_plots) 78 | 79 | # Plotting options 80 | fs = 7 81 | lw = 0.8 82 | 83 | # Other plot options 84 | label = "simulated" # "observed" 85 | color = "steelblue" # "red" 86 | linestyle = "-" 87 | marker = "o" 88 | 89 | ### 2. Read the observations ### 90 | # check if user provided observations 91 | has_observations = False 92 | 93 | if observations_fn is not None and os.path.exists(observations_fn): 94 | has_observations = True 95 | 96 | # Read 97 | gdf_obs = hydromt.io.open_vector(gauges_locs, crs=4326, sep=",") 98 | da_ts_obs = hydromt.io.open_timeseries_from_table( 99 | observations_fn, name="Q", index_dim="wflow_id", sep=";" 100 | ) 101 | ds_obs = hydromt.vector.GeoDataset.from_gdf( 102 | gdf_obs, da_ts_obs, merge_index = "inner" 103 | ) 104 | # Rename wflow_id to index 105 | ds_obs = ds_obs.rename({"wflow_id": "index"}) 106 | qobs = ds_obs["Q"].load() 107 | 108 | ### 3. Read the wflow model and results ### 109 | # Instantiate wflow model 110 | Folder_run = f"{project_dir}/hydrology_model" 111 | mod = WflowModel(root=Folder_run, mode="r") 112 | 113 | # Read the results 114 | # Discharge at the outlet(s) 115 | qsim = mod.results["Q_gauges"].rename("Q") 116 | # add station_name 117 | qsim = qsim.assign_coords( 118 | station_name=( 119 | "index", 120 | ["wflow_" + x for x in list(qsim["index"].values.astype(str))], 121 | ) 122 | ) 123 | # Discharge at the gauges_locs if present 124 | if gauges_locs is not None and os.path.exists(gauges_locs): 125 | # Get name of gauges dataset from the gauges locations file 126 | gauges_output_name = os.path.basename(gauges_locs).split(".")[0] 127 | if f"Q_gauges_{gauges_output_name}" in mod.results: 128 | qsim_gauges = mod.results[f"Q_gauges_{gauges_output_name}"].rename("Q") 129 | # Add station_name 130 | gdf_gauges = ( 131 | mod.geoms[f"gauges_{gauges_output_name}"] 132 | .rename(columns={"wflow_id": "index"}) 133 | .set_index("index") 134 | ) 135 | qsim_gauges = qsim_gauges.assign_coords( 136 | station_name=( 137 | "index", 138 | list(gdf_gauges["station_name"][qsim_gauges.index.values].values), 139 | ) 140 | ) 141 | # Merge with qsim 142 | qsim = xr.merge([qsim, qsim_gauges])["Q"] 143 | 144 | # Climate data P, EP, T for wflow_subcatch 145 | ds_clim = xr.merge( 146 | [ 147 | mod.results["P_subcatchment"], 148 | mod.results["T_subcatchment"], 149 | mod.results["EP_subcatchment"], 150 | ] 151 | ) 152 | 153 | # Other catchment average outputs 154 | ds_basin = xr.merge( 155 | [mod.results[dvar] for dvar in mod.results if "_basavg" in dvar] 156 | ) 157 | ds_basin = ds_basin.squeeze(drop=True) 158 | # If precipitation, skip as this will be plotted with the other climate data 159 | if "precipitation_basavg" in ds_basin: 160 | ds_basin = ds_basin.drop_vars("precipitation_basavg") 161 | 162 | ### 4. Plot climate data ### 163 | # No plots of climate data if wflow run is less than a year 164 | if len(ds_clim.time) < 365: 165 | print("less than 1 year of data is available " "no yearly clim plots are made.") 166 | else: 167 | for index in ds_clim.index.values: 168 | print(f"Plot climatic data at wflow basin {index}") 169 | ds_clim_i = ds_clim.sel(index=index) 170 | # Plot per year 171 | plot_clim(ds_clim_i, Folder_plots, f"wflow_{index}", "year") 172 | plt.close() 173 | # Plot per month 174 | plot_clim(ds_clim_i, Folder_plots, f"wflow_{index}", "month") 175 | plt.close() 176 | 177 | ### 5. Plot other basin average outputs ### 178 | print("Plot basin average wflow outputs") 179 | plot_basavg(ds_basin, Folder_plots) 180 | plt.close() 181 | 182 | ### 6. Plot hydrographs and compute performance metrics ### 183 | # Initialise the output performance table 184 | df_perf_all = pd.DataFrame() 185 | # Flag for plot signatures 186 | # (True if wflow run is longer than a year and observations are available) 187 | do_signatures = False 188 | 189 | # If possible, skip the first year of the wflow run (warm-up period) 190 | if len(qsim.time) > 365: 191 | print("Skipping the first year of the wflow run (warm-up period)") 192 | qsim = qsim.sel( 193 | time=slice( 194 | f"{qsim['time.year'][0].values+1}-{qsim['time.month'][0].values}-{qsim['time.day'][0].values}", 195 | None, 196 | ) 197 | ) 198 | if has_observations: 199 | do_signatures = True 200 | else: 201 | print("Simulation is less than a year so model warm-up period will be plotted.") 202 | # Sel qsim and qobs so that they have the same time period 203 | if has_observations: 204 | start = max(qsim.time.values[0], qobs.time.values[0]) 205 | end = min(qsim.time.values[-1], qobs.time.values[-1]) 206 | qsim = qsim.sel(time=slice(start, end)) 207 | qobs = qobs.sel(time=slice(start, end)) 208 | 209 | # Loop over the stations 210 | for station_id, station_name in zip(qsim.index.values, qsim.station_name.values): 211 | # Select the station 212 | qsim_i = qsim.sel(index=station_id) 213 | qobs_i = None 214 | if has_observations: 215 | if station_id in qobs.index.values: 216 | qobs_i = qobs.sel(index=station_id) 217 | 218 | # a) Plot hydrographs 219 | print(f"Plot hydrographs at wflow station {station_name}") 220 | plot_hydro( 221 | qsim=qsim_i, 222 | qobs=qobs_i, 223 | Folder_out=Folder_plots, 224 | station_name=station_name, 225 | label=label, 226 | color=color, 227 | lw=lw, 228 | fs=fs, 229 | ) 230 | plt.close() 231 | # b) Signature plot and performance metrics 232 | if do_signatures and qobs_i is not None: 233 | print("observed timeseries are available - making signature plots.") 234 | # Plot signatures 235 | plot_signatures( 236 | qsim=qsim_i, 237 | qobs=qobs_i, 238 | Folder_out=Folder_plots, 239 | station_name=station_name, 240 | label=label, 241 | color=color, 242 | linestyle=linestyle, 243 | marker=marker, 244 | fs=fs, 245 | lw=lw, 246 | ) 247 | plt.close() 248 | # Compute performance metrics 249 | df_perf = compute_metrics( 250 | qsim=qsim_i, 251 | qobs=qobs_i, 252 | station_name=station_name, 253 | ) 254 | # Join with other stations 255 | if df_perf_all.empty: 256 | df_perf_all = df_perf 257 | else: 258 | df_perf_all = df_perf_all.join(df_perf) 259 | else: 260 | print( 261 | "observed timeseries are not available " "no signature plots are made." 262 | ) 263 | 264 | # Save performance metrics to csv 265 | df_perf_all.to_csv(os.path.join(Folder_plots, "performance_metrics.csv")) 266 | 267 | ### End of the function ### 268 | 269 | 270 | if __name__ == "__main__": 271 | if "snakemake" in globals(): 272 | sm = globals()["snakemake"] 273 | analyse_wflow_historical( 274 | project_dir=sm.params.project_dir, 275 | observations_fn=sm.params.observations_file, 276 | gauges_locs=sm.params.gauges_output_fid, 277 | ) 278 | else: 279 | analyse_wflow_historical( 280 | project_dir=join(os.getcwd(), "examples", "my_project"), 281 | observations_fn=None, 282 | gauges_locs=None, 283 | ) 284 | -------------------------------------------------------------------------------- /docs/notebooks/Climate projections.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "d52f4320", 6 | "metadata": {}, 7 | "source": [ 8 | "# Compute climate projections from different GCM models and RCP" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "id": "2c3a674b", 14 | "metadata": {}, 15 | "source": [ 16 | "## Introduction" 17 | ] 18 | }, 19 | { 20 | "cell_type": "markdown", 21 | "id": "36d890eb", 22 | "metadata": {}, 23 | "source": [ 24 | "Nokebook for the workflow **Snakefile_climate_projections**: derives future climate statistics (expected temperature and precipitation change) for different RCPs and GCMs (from CMIP dataset)." 25 | ] 26 | }, 27 | { 28 | "cell_type": "code", 29 | "execution_count": null, 30 | "id": "a5407bcf", 31 | "metadata": {}, 32 | "outputs": [], 33 | "source": [ 34 | "# Import some python libraries we will need to run this notebook\n", 35 | "import os\n", 36 | "from IPython import display" 37 | ] 38 | }, 39 | { 40 | "cell_type": "code", 41 | "execution_count": null, 42 | "id": "775790fa", 43 | "metadata": {}, 44 | "outputs": [], 45 | "source": [ 46 | "# A helper function\n", 47 | "from subprocess import Popen, PIPE, STDOUT\n", 48 | "\n", 49 | "from IPython.core.magic import register_line_magic\n", 50 | "\n", 51 | "\n", 52 | "@register_line_magic\n", 53 | "def runrealcmd(command):\n", 54 | " with Popen(\n", 55 | " command, stdout=PIPE, shell=True, stderr=STDOUT, bufsize=1, close_fds=True\n", 56 | " ) as process:\n", 57 | " for line in iter(process.stdout.readline, b\"\"):\n", 58 | " print(line.rstrip().decode(\"utf-8\"))" 59 | ] 60 | }, 61 | { 62 | "cell_type": "markdown", 63 | "id": "9b194805", 64 | "metadata": {}, 65 | "source": [ 66 | "## Choosing your climate datasets and scenarios" 67 | ] 68 | }, 69 | { 70 | "cell_type": "markdown", 71 | "id": "2b3c35f2", 72 | "metadata": {}, 73 | "source": [ 74 | "First thing is to prepare all the options for your project, this is done using the configuration or settings file below" 75 | ] 76 | }, 77 | { 78 | "cell_type": "code", 79 | "execution_count": null, 80 | "id": "fb140db3", 81 | "metadata": {}, 82 | "outputs": [], 83 | "source": [ 84 | "# Add here the path where you have installed the blueearth-cst tool\n", 85 | "cst_path = r'D:\\blueearth_cst'\n", 86 | "os.chdir(cst_path)" 87 | ] 88 | }, 89 | { 90 | "cell_type": "markdown", 91 | "id": "4e8e697e", 92 | "metadata": {}, 93 | "source": [ 94 | "To chose where the river or basin you want to work with, you will need to supply the location of your river.\n", 95 | "\n", 96 | "The location can either be:\n", 97 | "- for a basin to the sea, any point in the basin [x, y]\n", 98 | "- for a subbasin (tributary or part of a basin) the location of the subbasin outlet [xout, yout]\n", 99 | "- a bounding box [xmin, ymin, xmax, ymax]\n", 100 | "- a geometry file of your region of interest (eg, myregion.shp)\n", 101 | "\n", 102 | "The coordinates should be in EPSG 4326.\n", 103 | "For points, you can use Google Maps to locate your coordinates.\n", 104 | "For bounding box or point cooridnates, you can also copy-paste locations from this website: http://bboxfinder.com/" 105 | ] 106 | }, 107 | { 108 | "cell_type": "code", 109 | "execution_count": null, 110 | "id": "161624ae", 111 | "metadata": {}, 112 | "outputs": [], 113 | "source": [ 114 | "%%writefile ./config/my-project-settings.yml\n", 115 | "\n", 116 | "###########################################################################################\n", 117 | "#################################### General setting ######################################\n", 118 | "\n", 119 | "# Path to where the wflow model will be stored (include the project name)\n", 120 | "project_dir: examples/myModel\n", 121 | "\n", 122 | "# Path to the data catalog to use (can be different for Linux/Windows/data stored on P drive or other)\n", 123 | "data_sources: config/deltares_data.yml\n", 124 | "data_sources_climate: config/cmip6_data.yml\n", 125 | "\n", 126 | "# Time horizon of the historical period\n", 127 | "starttime: \"2000-01-01T00:00:00\"\n", 128 | "endtime: \"2020-12-31T00:00:00\"\n", 129 | "# Historical climate data source (name as available in the data_sources catalog file). Either [era5, chirps_global, chirps, eobs].\n", 130 | "clim_historical: era5\n", 131 | "\n", 132 | "# List of wflow output variables to save\n", 133 | "wflow_outvars: ['river discharge', 'overland flow', 'actual evapotranspiration', 'groundwater recharge', 'snow']\n", 134 | "\n", 135 | "###########################################################################################\n", 136 | "#################################### Model building #######################################\n", 137 | "\n", 138 | "# hydromt \"region\" for building the model, coordinates are in EPSG 4326\n", 139 | "# for more info on how to fill this, you can see the hydromt documentation: \n", 140 | "# https://deltares.github.io/hydromt/latest/user_guide/cli.html#region-options\n", 141 | "# For wflow please use either 'basin' or 'subbasin' keywords\n", 142 | "#model_region: \"{'subbasin': [108.13800, 15.5414], 'uparea':100}\"\n", 143 | "model_region: \"{'subbasin': [108.152711, 15.571764], 'uparea': 100}\"\n", 144 | "# Grid resolution of the model in degrees\n", 145 | "model_resolution: 0.0083333\n", 146 | "\n", 147 | "# If available, please here put the path to the available observation stations locations files\n", 148 | "# csv file containing three columns: station_ID, x, y\n", 149 | "output_locations: None # If set to None, will default to the wflow outlets\n", 150 | "# Path to the file with observed discharge timeseries per station \n", 151 | "observations_timeseries: None\n", 152 | " \n", 153 | "###########################################################################################\n", 154 | "################################## Climate projections ####################################\n", 155 | "# climate models and scenarios\n", 156 | "clim_project: cmip6\n", 157 | "models: ['NOAA-GFDL/GFDL-ESM4']\n", 158 | "scenarios: [ssp245, ssp585]\n", 159 | "members: [r1i1p1f1]\n", 160 | "\n", 161 | "variables: [precip, temp]\n", 162 | "\n", 163 | "# time horizons of the climate projections\n", 164 | "start_month_hyd_year: Jan\n", 165 | "historical: 1980, 2010\n", 166 | "future_horizons: \n", 167 | " near: 2030, 2060\n", 168 | "# far: 2070, 2100\n", 169 | "\n", 170 | "# Additional gridded outputs (else only basin average)\n", 171 | "save_grids: FALSE" 172 | ] 173 | }, 174 | { 175 | "cell_type": "markdown", 176 | "id": "a16b0694", 177 | "metadata": {}, 178 | "source": [ 179 | "## Running the model building workflow" 180 | ] 181 | }, 182 | { 183 | "cell_type": "markdown", 184 | "id": "7ce52e95", 185 | "metadata": {}, 186 | "source": [ 187 | "The first line is to create an image of our workflow with snakemake to verify that all our settings are well taken into account." 188 | ] 189 | }, 190 | { 191 | "cell_type": "code", 192 | "execution_count": null, 193 | "id": "1c064489", 194 | "metadata": {}, 195 | "outputs": [], 196 | "source": [ 197 | "! snakemake -s Snakefile_climate_projections --configfile config/my-project-settings.yml --dag | dot -Tpng > dag_projections.png" 198 | ] 199 | }, 200 | { 201 | "cell_type": "code", 202 | "execution_count": null, 203 | "id": "8c757eb0", 204 | "metadata": {}, 205 | "outputs": [], 206 | "source": [ 207 | "display.Image(\"./dag_projections.png\")" 208 | ] 209 | }, 210 | { 211 | "cell_type": "markdown", 212 | "id": "608768f2", 213 | "metadata": {}, 214 | "source": [ 215 | "Now we will unlock your directory in order to save the future results of the workflow. This step is most of the time not necessary." 216 | ] 217 | }, 218 | { 219 | "cell_type": "code", 220 | "execution_count": null, 221 | "id": "d7cd5896", 222 | "metadata": {}, 223 | "outputs": [], 224 | "source": [ 225 | "! snakemake --unlock -s Snakefile_climate_projections --configfile config/my-project-settings.yml" 226 | ] 227 | }, 228 | { 229 | "cell_type": "markdown", 230 | "id": "f9324e09", 231 | "metadata": {}, 232 | "source": [ 233 | "And finally let's run our worflow:" 234 | ] 235 | }, 236 | { 237 | "cell_type": "code", 238 | "execution_count": null, 239 | "id": "15cbafcf", 240 | "metadata": {}, 241 | "outputs": [], 242 | "source": [ 243 | "%runrealcmd snakemake all -c 1 -s Snakefile_climate_projections --configfile config/my-project-settings.yml" 244 | ] 245 | }, 246 | { 247 | "cell_type": "markdown", 248 | "id": "3ef0c8e1", 249 | "metadata": {}, 250 | "source": [ 251 | "## Results" 252 | ] 253 | }, 254 | { 255 | "cell_type": "markdown", 256 | "id": "6d8926f2", 257 | "metadata": {}, 258 | "source": [ 259 | "First check that snake did run everything." 260 | ] 261 | }, 262 | { 263 | "cell_type": "code", 264 | "execution_count": null, 265 | "id": "b9deda71", 266 | "metadata": {}, 267 | "outputs": [], 268 | "source": [ 269 | "! snakemake -s Snakefile_climate_projections --configfile config/my-project-settings.yml --dag | dot -Tpng > dag_projections.png" 270 | ] 271 | }, 272 | { 273 | "cell_type": "code", 274 | "execution_count": null, 275 | "id": "492642df", 276 | "metadata": {}, 277 | "outputs": [], 278 | "source": [ 279 | "display.Image(\"./dag_projections.png\")" 280 | ] 281 | }, 282 | { 283 | "cell_type": "markdown", 284 | "id": "a37015f7", 285 | "metadata": {}, 286 | "source": [ 287 | "Let's see which files where created in our output folder." 288 | ] 289 | }, 290 | { 291 | "cell_type": "code", 292 | "execution_count": null, 293 | "id": "b88459bd", 294 | "metadata": {}, 295 | "outputs": [], 296 | "source": [ 297 | "root = \"examples/myModel\"\n", 298 | "for path, _, files in os.walk(root):\n", 299 | " print(path)\n", 300 | " for name in files:\n", 301 | " if name.endswith(\".xml\"):\n", 302 | " continue\n", 303 | " print(f\" - {name}\")" 304 | ] 305 | }, 306 | { 307 | "cell_type": "code", 308 | "execution_count": null, 309 | "id": "aef149b2", 310 | "metadata": {}, 311 | "outputs": [], 312 | "source": [ 313 | "import pandas as pd\n", 314 | "df = pd.read_csv('examples/myModel/climate_projections/cmip6/annual_change_scalar_stats_summary_mean.csv')\n", 315 | "df" 316 | ] 317 | } 318 | ], 319 | "metadata": { 320 | "kernelspec": { 321 | "display_name": "Python 3 (ipykernel)", 322 | "language": "python", 323 | "name": "python3" 324 | }, 325 | "language_info": { 326 | "codemirror_mode": { 327 | "name": "ipython", 328 | "version": 3 329 | }, 330 | "file_extension": ".py", 331 | "mimetype": "text/x-python", 332 | "name": "python", 333 | "nbconvert_exporter": "python", 334 | "pygments_lexer": "ipython3", 335 | "version": "3.10.2" 336 | } 337 | }, 338 | "nbformat": 4, 339 | "nbformat_minor": 5 340 | } 341 | -------------------------------------------------------------------------------- /config/cmip6_data.yml: -------------------------------------------------------------------------------- 1 | meta: 2 | version: 2022.12 3 | cmip6_{model}_historical_{member}: 4 | crs: 4326 5 | data_type: RasterDataset 6 | driver: zarr 7 | filesystem: gcs 8 | kwargs: 9 | drop_variables: ['time_bnds', 'lat_bnds', 'lon_bnds', 'bnds'] 10 | decode_times: true 11 | preprocess: harmonise_dims 12 | consolidated: true 13 | meta: 14 | category: climate 15 | paper_doi: 10.1175/BAMS-D-11-00094.1 16 | paper_ref: Taylor et al. 2012 17 | source_license: CC BY 4.0 18 | source_url: https://console.cloud.google.com/marketplace/details/noaa-public/cmip6?_ga=2.136097265.-1784288694.1541379221&pli=1 19 | source_version: 1.3.1 20 | placeholders: 21 | model: ['IPSL/IPSL-CM6A-LR','SNU/SAM0-UNICON','NCAR/CESM2','NCAR/CESM2-WACCM','INM/INM-CM4-8','INM/INM-CM5-0', 22 | 'NOAA-GFDL/GFDL-ESM4','NCC/NorESM2-LM','NIMS-KMA/KACE-1-0-G','CAS/FGOALS-f3-L','CSIRO-ARCCSS/ACCESS-CM2', 23 | 'NCC/NorESM2-MM','CSIRO/ACCESS-ESM1-5','NCAR/CESM2-WACCM-FV2','NCAR/CESM2-FV2','CMCC/CMCC-CM2-SR5', 24 | 'AS-RCEC/TaiESM1','NCC/NorCPM1','IPSL/IPSL-CM5A2-INCA','CMCC/CMCC-CM2-HR4','CMCC/CMCC-ESM2','IPSL/IPSL-CM6A-LR-INCA', 25 | 'E3SM-Project/E3SM-1-0'] 26 | # model: ['NOAA-GFDL/GFDL-CM4','BCC/BCC-CSM2-MR', 'BCC/BCC-ESM1', 'CCCma/CanESM5', 'MRI/MRI-ESM2-0', 27 | # 'HAMMOZ-Consortium/MPI-ESM-1-2-HAM', 'MPI-M/MPI-ESM1-2-LR', 'MPI-M/MPI-ESM1-2-HR', 'NUIST/NESM3', 28 | # 'CAS/FGOALS-g3', 'MIROC/MIROC6', 'KIOST/KIOST-ESM', 'CCCR-IITM/IITM-ESM', 'AWI/AWI-ESM-1-1-LR', 29 | # 'EC-Earth-Consortium/EC-Earth3-Veg-LR', 'EC-Earth-Consortium/EC-Earth3-Veg', 30 | # 'EC-Earth-Consortium/EC-Earth3', 'EC-Earth-Consortium/EC-Earth3-AerChem', 31 | # 'EC-Earth-Consortium/EC-Earth3-CC', 'MPI-M/ICON-ESM-LR'] # other models but were regridded to irregular grid for lat 32 | member: ['r1i1p1f1'] 33 | path: gs://cmip6/CMIP6/CMIP/{model}/historical/{member}/Amon/{variable}/*/* 34 | rename: 35 | pr: precip 36 | tas: temp 37 | rsds: kin 38 | psl: press_msl 39 | unit_add: 40 | temp: -273.15 41 | unit_mult: 42 | precip: 86400 43 | press_msl: 0.01 44 | 45 | cmip6_{model}_ssp119_{member}: 46 | crs: 4326 47 | data_type: RasterDataset 48 | driver: zarr 49 | filesystem: gcs 50 | kwargs: 51 | drop_variables: ['time_bnds', 'lat_bnds', 'lon_bnds', 'bnds'] 52 | decode_times: true 53 | preprocess: harmonise_dims 54 | consolidated: true 55 | meta: 56 | category: climate 57 | paper_doi: 10.1175/BAMS-D-11-00094.1 58 | paper_ref: Taylor et al. 2012 59 | source_license: CC BY 4.0 60 | source_url: https://console.cloud.google.com/marketplace/details/noaa-public/cmip6?_ga=2.136097265.-1784288694.1541379221&pli=1 61 | source_version: 1.3.1 62 | placeholders: 63 | model: ['NOAA-GFDL/GFDL-ESM4', 'IPSL/IPSL-CM6A-LR'] 64 | # model: ['CCCma/CanESM5', 'MRI/MRI-ESM2-0','EC-Earth-Consortium/EC-Earth3-Veg', 65 | # 'EC-Earth-Consortium/EC-Earth3-Veg-LR'] # other models but were regridded to irregular grid for lat 66 | member: ['r1i1p1f1'] 67 | path: gs://cmip6/CMIP6/ScenarioMIP/{model}/ssp119/{member}/Amon/{variable}/*/* 68 | rename: 69 | pr: precip 70 | tas: temp 71 | rsds: kin 72 | psl: press_msl 73 | unit_add: 74 | temp: -273.15 75 | unit_mult: 76 | precip: 86400 77 | press_msl: 0.01 78 | 79 | cmip6_{model}_ssp126_{member}: 80 | crs: 4326 81 | data_type: RasterDataset 82 | driver: zarr 83 | filesystem: gcs 84 | kwargs: 85 | drop_variables: ['time_bnds', 'lat_bnds', 'lon_bnds', 'bnds'] 86 | decode_times: true 87 | preprocess: harmonise_dims 88 | consolidated: true 89 | meta: 90 | category: climate 91 | paper_doi: 10.1175/BAMS-D-11-00094.1 92 | paper_ref: Taylor et al. 2012 93 | source_license: CC BY 4.0 94 | source_url: https://console.cloud.google.com/marketplace/details/noaa-public/cmip6?_ga=2.136097265.-1784288694.1541379221&pli=1 95 | source_version: 1.3.1 96 | placeholders: 97 | model: ['NOAA-GFDL/GFDL-ESM4','INM/INM-CM4-8','INM/INM-CM5-0','IPSL/IPSL-CM6A-LR','NIMS-KMA/KACE-1-0-G', 98 | 'NCC/NorESM2-MM','CMCC/CMCC-CM2-SR5','IPSL/IPSL-CM5A2-INCA','CMCC/CMCC-ESM2'] 99 | # model: ['BCC/BCC-CSM2-MR','CCCma/CanESM5','AWI/AWI-CM-1-1-MR','DKRZ/MPI-ESM1-2-HR','MPI-M/MPI-ESM1-2-LR', 100 | # 'NUIST/NESM3', 'MIROC/MIROC6','MRI/MRI-ESM2-0','KIOST/KIOST-ESM','EC-Earth-Consortium/EC-Earth3-Veg', 101 | # 'EC-Earth-Consortium/EC-Earth3','CCCR-IITM/IITM-ESM','EC-Earth-Consortium/EC-Earth3-Veg-LR', 102 | # 'NCAR/CESM2-WACCM','CSIRO-ARCCSS/ACCESS-CM2'] # other models but were regridded to irregular grid for lat or time outofbounds 103 | member: ['r1i1p1f1'] 104 | path: gs://cmip6/CMIP6/ScenarioMIP/{model}/ssp126/{member}/Amon/{variable}/*/* 105 | rename: 106 | pr: precip 107 | tas: temp 108 | rsds: kin 109 | psl: press_msl 110 | unit_add: 111 | temp: -273.15 112 | unit_mult: 113 | precip: 86400 114 | press_msl: 0.01 115 | 116 | cmip6_{model}_ssp245_{member}: 117 | crs: 4326 118 | data_type: RasterDataset 119 | driver: zarr 120 | filesystem: gcs 121 | kwargs: 122 | drop_variables: ['time_bnds', 'lat_bnds', 'lon_bnds', 'bnds'] 123 | decode_times: true 124 | preprocess: harmonise_dims 125 | consolidated: true 126 | meta: 127 | category: climate 128 | paper_doi: 10.1175/BAMS-D-11-00094.1 129 | paper_ref: Taylor et al. 2012 130 | source_license: CC BY 4.0 131 | source_url: https://console.cloud.google.com/marketplace/details/noaa-public/cmip6?_ga=2.136097265.-1784288694.1541379221&pli=1 132 | source_version: 1.3.1 133 | placeholders: 134 | model: ['NOAA-GFDL/GFDL-ESM4','IPSL/IPSL-CM6A-LR','INM/INM-CM4-8','INM/INM-CM5-0','NCAR/CESM2-WACCM', 135 | 'NCC/NorESM2-LM','CSIRO-ARCCSS/ACCESS-CM2','NCC/NorESM2-MM','NIMS-KMA/KACE-1-0-G','CMCC/CMCC-CM2-SR5', 136 | 'CMCC/CMCC-ESM2','AS-RCEC/TaiESM1'] 137 | # model: ['NOAA-GFDL/GFDL-CM4','BCC/BCC-CSM2-MR','CCCma/CanESM5','EC-Earth-Consortium/EC-Earth3-Veg', 138 | # 'EC-Earth-Consortium/EC-Earth3','EC-Earth-Consortium/EC-Earth3-Veg-LR','EC-Earth-Consortium/EC-Earth3-CC', 139 | # 'AWI/AWI-CM-1-1-MR','MRI/MRI-ESM2-0','MPI-M/MPI-ESM1-2-LR','DKRZ/MPI-ESM1-2-HR','NUIST/NESM3', 140 | # 'CAS/FGOALS-g3','MIROC/MIROC6','KIOST/KIOST-ESM','CCCR-IITM/IITM-ESM'] # other models but were regridded to irregular grid for lat 141 | member: ['r1i1p1f1'] 142 | path: gs://cmip6/CMIP6/ScenarioMIP/{model}/ssp245/{member}/Amon/{variable}/*/* 143 | rename: 144 | pr: precip 145 | tas: temp 146 | rsds: kin 147 | psl: press_msl 148 | unit_add: 149 | temp: -273.15 150 | unit_mult: 151 | precip: 86400 152 | press_msl: 0.01 153 | 154 | cmip6_{model}_ssp370_{member}: 155 | crs: 4326 156 | data_type: RasterDataset 157 | driver: zarr 158 | filesystem: gcs 159 | kwargs: 160 | drop_variables: ['time_bnds', 'lat_bnds', 'lon_bnds', 'bnds'] 161 | decode_times: true 162 | preprocess: harmonise_dims 163 | consolidated: true 164 | meta: 165 | category: climate 166 | paper_doi: 10.1175/BAMS-D-11-00094.1 167 | paper_ref: Taylor et al. 2012 168 | source_license: CC BY 4.0 169 | source_url: https://console.cloud.google.com/marketplace/details/noaa-public/cmip6?_ga=2.136097265.-1784288694.1541379221&pli=1 170 | source_version: 1.3.1 171 | placeholders: 172 | model: ['NOAA-GFDL/GFDL-ESM4','IPSL/IPSL-CM6A-LR','INM/INM-CM4-8','INM/INM-CM5-0','NCAR/CESM2-WACCM', 173 | 'CSIRO-ARCCSS/ACCESS-CM2','NCC/NorESM2-MM','NCC/NorESM2-LM','NIMS-KMA/KACE-1-0-G','CMCC/CMCC-CM2-SR5', 174 | 'IPSL/IPSL-CM5A2-INCA','CMCC/CMCC-ESM2'] 175 | # model: ['BCC/BCC-CSM2-MR','CCCma/CanESM5','AWI/AWI-CM-1-1-MR','BCC/BCC-ESM1','DKRZ/MPI-ESM1-2-HR', 176 | # EC-Earth-Consortium/EC-Earth3-Veg','EC-Earth-Consortium/EC-Earth3','EC-Earth-Consortium/EC-Earth3-AerChem', 177 | # 'EC-Earth-Consortium/EC-Earth3-Veg-LR','MRI/MRI-ESM2-0','MPI-M/MPI-ESM1-2-LR','HAMMOZ-Consortium/MPI-ESM-1-2-HAM', 178 | # 'CAS/FGOALS-g3','MIROC/MIROC6','CCCR-IITM/IITM-ESM'] # other models but were regridded to irregular grid for lat 179 | member: ['r1i1p1f1'] 180 | path: gs://cmip6/CMIP6/ScenarioMIP/{model}/ssp370/{member}/Amon/{variable}/*/* 181 | rename: 182 | pr: precip 183 | tas: temp 184 | rsds: kin 185 | psl: press_msl 186 | unit_add: 187 | temp: -273.15 188 | unit_mult: 189 | precip: 86400 190 | press_msl: 0.01 191 | 192 | cmip6_{model}_ssp434_{member}: 193 | crs: 4326 194 | data_type: RasterDataset 195 | driver: zarr 196 | filesystem: gcs 197 | kwargs: 198 | drop_variables: ['time_bnds', 'lat_bnds', 'lon_bnds', 'bnds'] 199 | decode_times: true 200 | preprocess: harmonise_dims 201 | consolidated: true 202 | meta: 203 | category: climate 204 | paper_doi: 10.1175/BAMS-D-11-00094.1 205 | paper_ref: Taylor et al. 2012 206 | source_license: CC BY 4.0 207 | source_url: https://console.cloud.google.com/marketplace/details/noaa-public/cmip6?_ga=2.136097265.-1784288694.1541379221&pli=1 208 | source_version: 1.3.1 209 | placeholders: 210 | model: ['IPSL/IPSL-CM6A-LR'] 211 | # model: ['CCCma/CanESM5'] # other models but were regridded to irregular grid for lat 212 | member: ['r1i1p1f1'] 213 | path: gs://cmip6/CMIP6/ScenarioMIP/{model}/ssp434/{member}/Amon/{variable}/*/* 214 | rename: 215 | pr: precip 216 | tas: temp 217 | rsds: kin 218 | psl: press_msl 219 | unit_add: 220 | temp: -273.15 221 | unit_mult: 222 | precip: 86400 223 | press_msl: 0.01 224 | 225 | cmip6_{model}_ssp460_{member}: 226 | crs: 4326 227 | data_type: RasterDataset 228 | driver: zarr 229 | filesystem: gcs 230 | kwargs: 231 | drop_variables: ['time_bnds', 'lat_bnds', 'lon_bnds', 'bnds'] 232 | decode_times: true 233 | preprocess: harmonise_dims 234 | consolidated: true 235 | meta: 236 | category: climate 237 | paper_doi: 10.1175/BAMS-D-11-00094.1 238 | paper_ref: Taylor et al. 2012 239 | source_license: CC BY 4.0 240 | source_url: https://console.cloud.google.com/marketplace/details/noaa-public/cmip6?_ga=2.136097265.-1784288694.1541379221&pli=1 241 | source_version: 1.3.1 242 | placeholders: 243 | model: ['IPSL/IPSL-CM6A-LR'] 244 | # model: ['CCCma/CanESM5'] # other models but were regridded to irregular grid for lat 245 | member: ['r1i1p1f1'] 246 | path: gs://cmip6/CMIP6/ScenarioMIP/{model}/ssp460/{member}/Amon/{variable}/*/* 247 | rename: 248 | pr: precip 249 | tas: temp 250 | rsds: kin 251 | psl: press_msl 252 | unit_add: 253 | temp: -273.15 254 | unit_mult: 255 | precip: 86400 256 | press_msl: 0.01 257 | 258 | cmip6_{model}_ssp585_{member}: 259 | crs: 4326 260 | data_type: RasterDataset 261 | driver: zarr 262 | filesystem: gcs 263 | kwargs: 264 | drop_variables: ['time_bnds', 'lat_bnds', 'lon_bnds', 'bnds'] 265 | decode_times: true 266 | preprocess: harmonise_dims 267 | consolidated: true 268 | meta: 269 | category: climate 270 | paper_doi: 10.1175/BAMS-D-11-00094.1 271 | paper_ref: Taylor et al. 2012 272 | source_license: CC BY 4.0 273 | source_url: https://console.cloud.google.com/marketplace/details/noaa-public/cmip6?_ga=2.136097265.-1784288694.1541379221&pli=1 274 | source_version: 1.3.1 275 | placeholders: 276 | model: ['NOAA-GFDL/GFDL-ESM4','INM/INM-CM4-8','INM/INM-CM5-0','NIMS-KMA/KACE-1-0-G','NCC/NorESM2-MM','NCC/NorESM2-LM', 277 | 'CMCC/CMCC-CM2-SR5','AS-RCEC/TaiESM1','CMCC/CMCC-ESM2'] 278 | # model: ['NOAA-GFDL/GFDL-CM4','BCC/BCC-CSM2-MR','CCCma/CanESM5','AWI/AWI-CM-1-1-MR','MPI-M/MPI-ESM1-2-LR', 279 | # 'DKRZ/MPI-ESM1-2-HR','MIROC/MIROC6','MRI/MRI-ESM2-0','EC-Earth-Consortium/EC-Earth3-Veg', 280 | # 'EC-Earth-Consortium/EC-Earth3','EC-Earth-Consortium/EC-Earth3-Veg-LR','EC-Earth-Consortium/EC-Earth3-CC', 281 | # 'NUIST/NESM3','CAS/FGOALS-g3','IPSL/IPSL-CM6A-LR','KIOST/KIOST-ESM', 'NCAR/CESM2-WACCM','CCCR-IITM/IITM-ESM', 282 | # 'CSIRO-ARCCSS/ACCESS-CM2','CSIRO/ACCESS-ESM1-5'] # other models but were regridded to irregular grid for lat or time outofbounds 283 | member: ['r1i1p1f1'] 284 | path: gs://cmip6/CMIP6/ScenarioMIP/{model}/ssp585/{member}/Amon/{variable}/*/* 285 | rename: 286 | pr: precip 287 | tas: temp 288 | rsds: kin 289 | psl: press_msl 290 | unit_add: 291 | temp: -273.15 292 | unit_mult: 293 | precip: 86400 294 | press_msl: 0.01 --------------------------------------------------------------------------------