├── .github └── workflows │ └── python-publish.yml ├── .gitignore ├── HISTORY.rst ├── LICENSE ├── README.md ├── environment └── environment.yml ├── notebooks ├── README.md ├── data-acquisition.ipynb ├── data-processing │ ├── collapse-sliiders-to-seg.ipynb │ ├── create-diaz-pyCIAM-inputs.ipynb │ └── slr │ │ ├── AR5 │ │ ├── 1-convert-mat-version.ipynb │ │ ├── 2-generate-projected-lsl.ipynb │ │ ├── 3-retrieve-num-gcms.ipynb │ │ ├── 4-process-localizesl-output.ipynb │ │ ├── 5-create-slr-quantile.ipynb │ │ └── README.md │ │ ├── AR6.ipynb │ │ └── sweet.ipynb ├── models │ ├── create-surge-lookup-tables.ipynb │ ├── fit-movefactor.ipynb │ ├── run-pyCIAM-diaz2016.ipynb │ └── run-pyCIAM-slrquantiles.ipynb ├── nb_logs │ ├── data-acquisition.ipynb │ ├── data-processing │ │ └── slr │ │ │ └── AR6.ipynb │ └── models │ │ └── run-pyCIAM-slrquantiles.ipynb ├── post-processing │ ├── pyCIAM-results-figures.ipynb │ └── zenodo-upload.ipynb ├── run_example.sh ├── run_full_replication.sh └── shared.py ├── params.json ├── params_diaz.json ├── pyCIAM ├── __init__.py ├── constants.py ├── io.py ├── run.py ├── surge │ ├── __init__.py │ ├── _calc.py │ ├── damage_funcs.py │ └── lookup.py └── utils.py └── pyproject.toml /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | # This workflow will upload a Python Package using Twine when a release is created 2 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries 3 | 4 | # This workflow uses actions that are not certified by GitHub. 5 | # They are provided by a third-party and are governed by 6 | # separate terms of service, privacy policy, and support 7 | # documentation. 8 | 9 | name: Upload Python Package 10 | 11 | on: [push] 12 | 13 | jobs: 14 | deploy: 15 | 16 | runs-on: ubuntu-latest 17 | permissions: 18 | id-token: write 19 | steps: 20 | - uses: actions/checkout@v4 21 | - name: Set up Python 22 | uses: actions/setup-python@v5 23 | with: 24 | python-version: '3.x' 25 | - name: Install dependencies 26 | run: | 27 | python -m pip install --upgrade pip 28 | pip install build setuptools_scm 29 | - name: Build package 30 | run: python -m build 31 | - name: Publish distribution 📦 to PyPI 32 | if: startsWith(github.ref, 'refs/tags') 33 | uses: pypa/gh-action-pypi-publish@release/v1 34 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | 5 | # C extensions 6 | *.so 7 | 8 | # Distribution / packaging 9 | .Python 10 | env/ 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | *.egg-info/ 23 | .installed.cfg 24 | *.egg 25 | 26 | # PyInstaller 27 | # Usually these files are written by a python script from a template 28 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 29 | *.manifest 30 | *.spec 31 | 32 | # Installer logs 33 | pip-log.txt 34 | pip-delete-this-directory.txt 35 | 36 | # Unit test / coverage reports 37 | htmlcov/ 38 | .tox/ 39 | .coverage 40 | .coverage.* 41 | .cache 42 | nosetests.xml 43 | coverage.xml 44 | *.cover 45 | 46 | # Translations 47 | *.mo 48 | *.pot 49 | 50 | # Django stuff: 51 | *.log 52 | 53 | # Sphinx documentation 54 | docs/_build/ 55 | 56 | # PyBuilder 57 | target/ 58 | 59 | # DotEnv configuration 60 | .env 61 | 62 | # Database 63 | *.db 64 | *.rdb 65 | 66 | # Pycharm 67 | .idea 68 | 69 | # VS Code 70 | .vscode/ 71 | 72 | # Spyder 73 | .spyproject/ 74 | 75 | # Jupyter NB Checkpoints 76 | .ipynb_checkpoints/ 77 | 78 | # exclude data from source control by default 79 | /data/ 80 | 81 | # Mac OS-specific storage files 82 | .DS_Store 83 | 84 | # vim 85 | *.swp 86 | *.swo 87 | 88 | # Mypy cache 89 | .mypy_cache/ 90 | 91 | # other 92 | dask-worker-space 93 | nb_logs 94 | .virtual_documents 95 | shared_gcs.py 96 | -------------------------------------------------------------------------------- /HISTORY.rst: -------------------------------------------------------------------------------- 1 | History 2 | ======= 3 | 4 | v1.2.1 5 | ------ 6 | * Update GitHub actions versions 7 | * Update data acquisition notebook, using helper functions to download input data from 8 | zenodo 9 | 10 | v1.2.0 11 | ------ 12 | * Point `data-acquisition.ipynb` to updated Zenodo deposit that fixes the dtype of `subsets` variable in `diaz2016_inputs_raw.zarr.zip` to be bool rather than int8 13 | * Variable name bugfix in `data-acquisition.ipynb` 14 | * Add netcdf versions of SLIIDERS and the pyCIAM results to `upload-zenodo.ipynb` 15 | * Update results in Zenodo record to use SLIIDERS v1.2 16 | 17 | v1.1.2 18 | ------ 19 | * Update zenodo-upload.ipynb to include packages 20 | * Update readme to emphasize environment.yml 21 | 22 | v1.1.1 23 | ------ 24 | * Update package dependencies 25 | 26 | v1.1.0 27 | ------ 28 | * Use general Zenodo DOI numbers referencing latest version of each deposit 29 | * Addition of AR6 and Sweet scenarios 30 | * Addition of `execute_pyciam` wrapper function 31 | * Updates to SLIIDERS inputs based on reviewer comments 32 | * General repo hygiene 33 | * Additional/updated figures/tables/results in `post-processing/pyCIAM-results-figures.ipynb` 34 | 35 | v1.0.2 36 | ------ 37 | * Add HISTORY.rst 38 | * Bump patch number to align with Zenodo deposit update 39 | 40 | v1.0.1 41 | ------ 42 | * Add docstrings for all public functions 43 | * Update readme with additional step-by-step instructions to serve as user manual 44 | * Add `optimize_case` function 45 | * Refactor various functions to facilitate abstraction 46 | 47 | v1.0.0 48 | ------ 49 | * Initial commit 50 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Climate Impact Lab 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # pyCIAM 2 | 3 | pyCIAM is a Python port, including numerous model updates, of the [Coastal Impacts and Adaptation Model](https://github.com/delavane/CIAM), as described in [Diaz 2016](https://link.springer.com/article/10.1007/s10584-016-1675-4#Sec13). This code accompanies Depsky et al. 2023. See the manuscript for further details on model functionality and structure. 4 | 5 | `pyCIAM`, like `CIAM`, is a tool to estimate global economic impacts of sea level rise at fine resolution, accounting for adaptation and spatial variability and uncertainty in sea level rise. This model requires a number of socioeconomic and sea level rise inputs, organized by coastal "segment" and elevation. In Depsky et al. 2023, we develop the [SLIIDERS](https://doi.org/10.5281/zenodo.6449230) dataset to serve this purpose; however, users may wish to alter and/or replace this dataset for their own purposes, especially if newer input data (used to generate `SLIIDERS`) becomes available. 6 | 7 | Model outputs consistent with Depsky et al. 2023 are available on Zenodo, with DOI [10.5281/zenodo.6014085](https://doi.org/10.5281/zenodo.6014085) 8 | 9 | At present, this repository contains both the `pyCIAM` package, along with a Jupyter Notebook-based workflow used to create the outputs of Depsky et al. 2023. In the future, we will migrate the notebook workflow into its own repository to isolate the more broadly useful package within this one. 10 | 11 | ## Status 12 | 13 | This code was developed for Depsky et al. 2023, but our intention is for broader use. If and when that broader usage develops, the package will continue to become more formalized and well-tested. In the meantime, please pardon the dust as we continue to finalize the package and develop a comprehensive testing suite. Don't hesitate to reach out by filing an issue if you have any questions about usage. 14 | 15 | ## Installation 16 | 17 | `pyCIAM` is available via PyPI and installable via `pip`. 18 | 19 | ```bash 20 | pip install python-CIAM 21 | ``` 22 | 23 | Note that currently, package dependencies have not been thoroughly described and must be installed separately by the user. We are working to include dependencies in the package so that all will be included in the `pip install` command. 24 | 25 | To run the model, you will need to define parameters, which are located in [params.json](./params.json). This file will need to be either user-generated from scratch or obtained from this repository via cloning or downloading that individual file. 26 | 27 | If you wish to use the notebook-based workflow used in Depsky et al. 2023, either to replicate these results or as a template for separate analyses, you should clone this repository. We are working on developing interactive documentation that will also contain these notebooks. 28 | 29 | The requisite packages for executing the full Depsky et al. 2023 workflow are identified in [environment.yml](environment/environment.yml) which can be used to build a conda/mamba environment via 30 | 31 | ```bash 32 | mamba env create -f /path/to/environment.yml 33 | mamba activate pyciam 34 | ``` 35 | 36 | ## Quickstart 37 | 38 | To run an example set of segments through pyCIAM, execute the following steps: 39 | 40 | 1. Define a correct set of filepaths in [shared.py](./notebooks/shared.py). All filepaths should be defined as :py:class:`pathlib.Path`-like objects. Because there are many files that are written to or read from in replicating the full Depsky et al. 2023 analysis, the filepaths that are needed explicitly to run the [example script](./notebooks/run_example.sh) are indicated with a `# NEEDED FOR EXAMPLE` comment. 41 | 42 | 2. Define a `start_dask_cluster` function in [shared.py](./notebooks/shared.py). All notebooks in this repo will use this function to instantiate a dask cluster for executing the model. The type of cluster needed for your use case will depend on your computing environment. The default function simply instantiates a default :py:class:`distributed.LocalCluster` object. 43 | 44 | 3. Execute [run_example.sh](./notebooks/run_example.sh). 45 | 46 | ## Usage 47 | 48 | To recreate the analysis of Depsky et al. 2023, you will need more inputs and to run the model on a wider range of scenarios and locations. You should ensure all of the paths in [shared.py](.notebooks/shared.py) are valid and then you will want to execute [run_full_replication.sh](./notebooks/run_full_replication.sh). Note that this may require some tuning of parallelization parameters (such as chunk size) within individual notebooks depending on the size of your resources. For reference, in development, these workflows were executed on a Dask cluster with ~6.5 GB memory per worker. 49 | 50 | For users wishing to use `pyCIAM` in other contexts, we recommend starting with [the example shell script](./notebooks/run_example.sh) as a template. A full description of the model is available in Depsky et al. 2023, and a description of the workflow contained in each notebook is provided in [notebooks/README.md](./notebooks/README.md). 51 | 52 | ### API 53 | 54 | pyCIAM contains only a handful of public functions that a user will want to employ when executing the model. All are available as top-level imports from pyCIAM. 55 | 56 | * `execute_pyciam`: This is the end-to-end function that represents the most likely entrypoint for users. All other public functions are called by this one. 57 | * `create_surge_lookup`: Creates a lookup table that can be leveraged to build a 2D linear spline function for calculating damages from extreme sea levels. This can drastically reduce computational expense required for simulation on large ensembles of sea level rise trajectories. 58 | * `load_ciam_inputs`: An I/O function to load SLIIDERS-like input data, storm surge damage lookup table (if specified), model parameters, and process/format these data for inclusion in pyCIAM. 59 | * `load_diaz_inputs`: A similar function to load a preprocessed SLIIDERS-like input dataset that is generated from the same input data used in Diaz 2016. This is used to generate comparisons to the Diaz 2016 results within Depsky et al. 2023. 60 | * `calc_costs`: This is the main computation engine in pyCIAM. It computes costs for all cost types, regions, years, socioeconomic and SLR trajectories, and adaptation case. It does *not* compute the optimal adaptation case, which must be computed afterward, for reasons described below. 61 | * `select_optimal_case`: This function calculates the optimal adaptation choice for a given region and returns the associated costs and NPV. 62 | 63 | ### Step-by-step Instructions 64 | 65 | The below sections describe the high-level stages of the pyCIAM model. With the exception of the first two (obtaining model inputs and specifying parameters), the `execute_pyciam` wrapper will cover all the remaining steps. 66 | 67 | #### Obtaining model inputs 68 | 69 | pyCIAM depends on inputs describing a variety of socioeconomic and sea level variables across user-defined regions. The [SLIIDERS](https://doi.org/10.5281/zenodo.6449230) dataset has been developed to contain socioeconomic variables; however, any similarly formatted input dataset will work, if users wish to substitute, for example, alternative projections of economic growth besides the Shared Socioeconomic Pathways. To begin, we recommend that users obtain the SLIIDERS dataset, which can be found at the linked DOI. 70 | 71 | All inputs necessary to reproduce the results in Depsky et al. 2023 can be downloaded via the scripts in [notebooks/data-acquisition](./notebooks/data-acquisition). 72 | 73 | Additional processing of some of these inputs is needed. See notebooks in [notebooks/data-processing](./notebooks/data-processing) and their execution in [run_example.sh](./notebooks/run_example.sh) and [run_full_replication.sh](./notebooks/run_full_replication.sh). 74 | 75 | #### Parameter specification 76 | 77 | Parameters for the model run are defined in [params.json](./params.json). These can be modified to reflect alternative assumptions and/or model duration and timesteps. Description of each parameter can be found in the JSON file. 78 | 79 | #### Surge lookup table creation 80 | 81 | This step is not strictly necessary but provides dramatic performance increases. Rather than calculating the damages from extreme sea levels for all adaptation cases, for all segments, elevation slices, years, and scenarios, you may develop a lookup table that is used to build a 2D spline function to interpolate surge costs for any given combination of sea level height, and difference between storm surge height and retreat or protection height. `create_surge_lookup()` is used for this. See [create-surge-lookup-tables.ipynb](./notebooks/models/create-surge-lookup-tables.ipynb) for an example. 82 | 83 | #### Calculation of costs for each adaptation case 84 | 85 | In this step, costs for all adaptation options for all segments are calculated. Depending on the number of SLR trajectories modeled, segments may be run in smaller or larger batches to keep memory footprint to a reasonable size. This parallelization is executed via [Dask](https://www.dask.org). `calc_costs()` is the function that executes these calculations. An example of usage can be seen in [fit-movefactor.ipynb](./notebooks/models/fit-movefactor.ipynb). 86 | 87 | #### Calculation of the optimal adaptation choice 88 | 89 | In this step, we find the optimal adaptation choice for each segment. `select_optimal_case()` is the function that executes this calculation. This is not called directly in any of the workflows in this repository but is called within `execute_pyciam` 90 | 91 | ## Reliance on Dask 92 | 93 | **IMPORTANT**: The notebook-based workflow provided in this repository serves as a set of examples in addition to replicating the analysis in Depsky et al. 2023. It assumes that parallel task execution occurs via [Dask](https://dask.org). During development, a [dask-gateway](https://gateway.dask.org/) server was used on top of a Kubernetes cluster. Users wishing to run these notebooks must specify the type of Dask cluster they wish to use by replacing the default `start_dask_cluster()` function in [shared.py](./notebooks/shared.py). By default, a :py:class:`distributed.LocalCluster` instance will be created. 94 | 95 | ## Support 96 | 97 | Please file an issue for any problems you encounter 98 | 99 | ## Contributing 100 | 101 | We encourage community contributions and hope that the functionality of `pyCIAM` will grow as a result. At the moment, we have no contribution template. Please fork the project and file a Merge Request to propose your addition. Clearly define the contribution that the Merge Request is making and, when any issues have been resolved, we will merge the new code. 102 | 103 | ## Authors 104 | 105 | The original authors of this code include: 106 | 107 | * Daniel Allen 108 | * Ian Bolliger 109 | * Junho Choi 110 | * Nicholas Depsky 111 | 112 | ## License 113 | 114 | This code is licensed under the [MIT License](./LICENSE) 115 | -------------------------------------------------------------------------------- /environment/environment.yml: -------------------------------------------------------------------------------- 1 | name: pyciam 2 | channels: 3 | - conda-forge 4 | dependencies: 5 | - bottleneck=1.3 6 | - bokeh=2.4.3 # for use of dask dashboard 7 | - cartopy=0.21.1 8 | - cloudpathlib=0.13 9 | - distributed=2023.3.1 10 | - flox=0.6.8 11 | - fsspec=2023.3.0 12 | - geopandas=0.12.2 13 | - gitpython=3.1.31 # unmarked dependency of rhg_compute_tools 14 | - jupyterlab=3.6.1 15 | - matplotlib=3.7.1 16 | - numpy=1.24.2 17 | - numpy_groupies=0.9.20 18 | - oct2py=5.6.0 19 | - octave=7.3.0 20 | - openpyxl=3.1.1 21 | - pandas=1.5.3 22 | - papermill=2.3.4 23 | - pint-xarray=0.3 24 | - pip=23.0.1 25 | - python=3.11.0 26 | - requests=2.28.2 27 | - scikit-learn=1.2.2 28 | - scipy=1.10.1 29 | - tqdm=4.65.0 30 | - xarray=2023.2.0 31 | - zarr=2.14.2 32 | - pip: 33 | - python-ciam==1.1 34 | - rhg_compute_tools==1.3 35 | - parameterize_jobs==0.1 -------------------------------------------------------------------------------- /notebooks/README.md: -------------------------------------------------------------------------------- 1 | # Executing pyCIAM 2 | 3 | This README describes the workflow used to produce results contained in Depsky et al. 2023. The list of notebooks necessary to run an example pyCIAM workflow or to recreate the full set of Depsky et al. 2023 results are contained in [run_example.sh](run_example.sh) and [run_full_replication.sh](run_full_replication.sh), respectively. 4 | 5 | The aggregated coastal input dataset required for pyCIAM is [SLIIDERS](https://doi.org/10.5281/zenodo.6449230). Alternatively, users may construct their own inputs, for example to integrate alternative underlying data layers. In this case, they must still conform to the format of the SLIIDERS dataset. We would recommend starting from the SLIIDERS construction code found in the [SLIIDERS repository](https://github.com/ClimateImpactLab/sliiders) 6 | 7 | A set of common filepaths, settings, and helper functions used for this worfkflow are contained in [shared.py](./shared.py). These should be adjusted as needed. In particular, you will need to adjust the filepaths to suit your data storage structure. 8 | 9 | The following notebooks should be run in the described order to replicate the manuscript results. 10 | 11 | 1. [data-acquisition.ipynb](data-acquisition.ipynb): This notebook downloads all input data necessary to replicate the results of Depsky et al. 2023, with options to download only a subset necessary to run an example pyCIAM model. 12 | 2. [data-processing/collapse-sliiders-to-seg.ipynb](data-processing/collapse-sliiders-to-seg.ipynb): SLIIDERS is provided where each analysis unit corresponds to a unique combination of admin1 region and coastal segment. This is helpful for aggregating results to admin1-level outputs, since the decision-making agent must occur at the segment level. For certain use cases, e.g. creating the surge lookup table, the additional admin1 dimension is unnecessary and leads to excess computational demands. Thus, we collapse the dataset to the segment level. This notebook would not be necessary if, for example, a user created a SLIIDERS alternative that was only indexed by segment. 13 | 3. [data-processing/create-diaz-pyCIAM-inputs.ipynb](data-processing/create-diaz-pyCIAM-inputs.ipynb): This notebook generates a SLIIDERS-like input dataset that reflects the inputs used in [Diaz 2016](https://link.springer.com/article/10.1007/s10584-016-1675-4#Sec13). This is necessary for comparisons of results from the original CIAM paper to the updated version. These comparsions are performed and reported on in Depsky et al. 2023. 14 | 4. [data-processing/slr/AR6.ipynb](data-processing/slr/AR6.ipynb): This notebook processes SLR projections based on AR6 emissions scenarios from the FACTS SLR framework. 15 | 5. [data-processing/slr/sweet.ipynb](data-processing/slr/sweet.ipynb): This notebook processes FACTS-generated projections grouped by end-of-century GMSL level as in Sweet et al. 2022. 16 | 6. [data-processing/slr/AR5](data-processing/slr/AR5): These notebooks run LocalizeSL (the predecessor to FACTS) on a variety of SLR scenarios based largely on the IPCC AR5 emissions scenarios. See the [README inside this folder](data-processing/slr/AR5/README.md) for more details. 17 | 7. [models/create-surge-lookup-tables.ipynb](models/create-surge-lookup-tables.ipynb): This notebook creates segment-adm1-specific lookup tables that estimate fraction of total capital stock lost and fraction of total population killed as a function of extreme sea level height. Computing these on the fly for a large number of SLR simulations is computationally intractable given the numerical integration needed, so lookup tables are used to enable these calculations. 18 | 8. [models/fit-movefactor.ipynb](models/fit-movefactor.ipynb): This notebook performs the empirical estimation of the relocation cost parameter `movefactor`, as detailed in Depsky et al. 2023. It is purely for analysis and does not create any output datasets necessary for other notebooks. 19 | 9. [models/run-pyCIAM-slrquantiles.ipynb](models/run-pyCIAM-slrquantiles.ipynb): This notebook is just a thin wrapper to call `execute_pyciam()` using appropriate inputs. 20 | 10. [models/run-pyCIAM-diaz2016.ipynb](models/run-pyCIAM-diaz2016.ipynb): This notebook is just a thin wrapper to call `execute_pyciam()` using inputs and configuration consistent with Diaz 2016. These outputs are used for validation and comparison within Depsky et al. 2023. 21 | 11. [post-processing/pyCIAM-results-figures.ipynb](post-processing/pyCIAM-results-figures.ipynb): This notebook generates numbers and figures used in Depsky et al. 2023. 22 | 12. [post-processing/zenodo-upload.ipynb](post-processing/zenodo-upload.ipynb): This notebook can be used by core model developers to upload new versions of SLIIDERS and/or model outputs to Zenodo. It will not need to be used by typical users. 23 | -------------------------------------------------------------------------------- /notebooks/data-acquisition.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "318ec323-147b-42a9-874b-6835b06813bc", 6 | "metadata": {}, 7 | "source": [ 8 | "# Download necessary files" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "id": "9ad66ba5-1ea0-49d5-a860-bbb4963946ed", 14 | "metadata": {}, 15 | "source": [ 16 | "This notebook downloads files necessary to replicate the analysis in Depsky et al. 2023." 17 | ] 18 | }, 19 | { 20 | "cell_type": "code", 21 | "execution_count": 1, 22 | "id": "65deea57-083b-4641-8b86-d03d8dcf57de", 23 | "metadata": { 24 | "tags": [] 25 | }, 26 | "outputs": [], 27 | "source": [ 28 | "import sys\n", 29 | "\n", 30 | "sys.path.append(\"..\")" 31 | ] 32 | }, 33 | { 34 | "cell_type": "code", 35 | "execution_count": 2, 36 | "id": "a9431a2c-9f54-493b-9cdb-1ea9421f1433", 37 | "metadata": { 38 | "tags": [] 39 | }, 40 | "outputs": [], 41 | "source": [ 42 | "from pathlib import Path\n", 43 | "\n", 44 | "import pandas as pd\n", 45 | "import requests\n", 46 | "from cartopy.io import shapereader\n", 47 | "from pyCIAM.io import (\n", 48 | " download_and_extract_from_zenodo,\n", 49 | " download_and_extract_partial_zip,\n", 50 | " get_zenodo_file_list,\n", 51 | ")\n", 52 | "from pyCIAM.utils import copy\n", 53 | "from shared import (\n", 54 | " DIR_SHP,\n", 55 | " DIR_SLR_AR5_IFILES_RAW,\n", 56 | " DIR_SLR_AR6_RAW,\n", 57 | " DIR_SLR_SWEET_RAW,\n", 58 | " LOCALIZESL_COREFILES,\n", 59 | " LOCALIZESL_REV,\n", 60 | " PATH_BORDERS,\n", 61 | " PATH_COASTLINES,\n", 62 | " PATH_DIAZ_INPUTS_RAW,\n", 63 | " PATH_GADM,\n", 64 | " PATH_MOVEFACTOR_DATA,\n", 65 | " PATH_PWT,\n", 66 | " PATH_SLIIDERS,\n", 67 | " PATH_SLIIDERS_INCOME_INTERMEDIATE_FILE,\n", 68 | " PATH_SLR_AR5_QUANTILES,\n", 69 | " PATH_SLR_GMSL_HIST_TIMESERIES,\n", 70 | " PATH_SLR_HIST_TREND_MAP,\n", 71 | " PATHS_SURGE_LOOKUP,\n", 72 | " save,\n", 73 | ")" 74 | ] 75 | }, 76 | { 77 | "cell_type": "code", 78 | "execution_count": 3, 79 | "id": "7e3d5ad1-a9e5-4b03-9dca-57b5b2098e4c", 80 | "metadata": { 81 | "tags": [ 82 | "parameters" 83 | ] 84 | }, 85 | "outputs": [], 86 | "source": [ 87 | "Z_URL_BASE = \"https://zenodo.org/api/\"\n", 88 | "Z_URL_RECORDS = Z_URL_BASE + \"records/{doi}\"\n", 89 | "Z_URL_DEPOSITS = Z_URL_BASE + \"deposit/depositions/{doi}\"\n", 90 | "\n", 91 | "# This will need to point to the correct version of the SLIIDERS zenodo store (see\n", 92 | "# Depsky et al. 2023 for the version associated with that manuscript)\n", 93 | "Z_SLIIDERS_DOI = \"7693868\"\n", 94 | "Z_PYCIAM_DOI = \"8229860\"\n", 95 | "Z_AR6_DOI = \"6382554\"\n", 96 | "Z_SWEET_DOI = \"6067895\"\n", 97 | "\n", 98 | "\n", 99 | "DOWNLOAD_DIAZ_INPUTS = True\n", 100 | "DOWNLOAD_SLIIDERS = True\n", 101 | "DOWNLOAD_SURGE_LOOKUPS = True\n", 102 | "\n", 103 | "DOWNLOAD_PLOTTING_DATA = True\n", 104 | "\n", 105 | "DOWNLOAD_SLR_AR5 = True\n", 106 | "DOWNLOAD_SLR_AR6 = True\n", 107 | "DOWNLOAD_SLR_SWEET = True\n", 108 | "\n", 109 | "# Only needed if you would like to re-run LocalizeSL to re-generate AR5 SLR inputs\n", 110 | "# (requires matlab or octave). The output of this workflow, quantiled to the quantiles\n", 111 | "# we are working with in Depsky et al. 2023, is obtained with DOWNLOAD_SLR_AR5=True\n", 112 | "DOWNLOAD_LOCALIZESL_INPUTS = False\n", 113 | "\n", 114 | "# pre-release\n", 115 | "# PARAMS = {\"access_token\": environ[\"ACCESS_TOKEN\"]}\n", 116 | "# Z_URL_SLIIDERS_PC = Z_URL_DEPOSITS\n", 117 | "\n", 118 | "# post-release\n", 119 | "PARAMS = {}\n", 120 | "Z_URL_SLIIDERS_PC = Z_URL_RECORDS" 121 | ] 122 | }, 123 | { 124 | "cell_type": "code", 125 | "execution_count": 5, 126 | "id": "f8316f7d-38c6-4e9c-ac5a-78fa05418e61", 127 | "metadata": { 128 | "tags": [] 129 | }, 130 | "outputs": [], 131 | "source": [ 132 | "pyciam_files = get_zenodo_file_list(Z_PYCIAM_DOI)" 133 | ] 134 | }, 135 | { 136 | "cell_type": "markdown", 137 | "id": "a17d55fb-d63b-4476-a209-391909e913e3", 138 | "metadata": { 139 | "tags": [] 140 | }, 141 | "source": [ 142 | "## Data for plotting and results calculation" 143 | ] 144 | }, 145 | { 146 | "cell_type": "markdown", 147 | "id": "d284b787-cd6c-4193-a402-5614027d1506", 148 | "metadata": {}, 149 | "source": [ 150 | "These are necessary to run the [pyCIAM-results-figures.ipynb](./pyCIAM-results-figures.ipynb) notebook, but are not directly used in the model execution." 151 | ] 152 | }, 153 | { 154 | "cell_type": "markdown", 155 | "id": "eca8f9f2-63f0-4c3c-b49e-8bc7cd50d0df", 156 | "metadata": {}, 157 | "source": [ 158 | "### Natural Earth Country Boundaries" 159 | ] 160 | }, 161 | { 162 | "cell_type": "code", 163 | "execution_count": 6, 164 | "id": "b182039a-7173-4469-b239-4c761a36b35f", 165 | "metadata": { 166 | "tags": [] 167 | }, 168 | "outputs": [ 169 | { 170 | "name": "stdout", 171 | "output_type": "stream", 172 | "text": [ 173 | "Downloading GADM data...\n", 174 | "Downloading PWT data...\n", 175 | "Downloading SLIIDERS intermediate file...\n", 176 | "Downloading movefactor analysis outputs...\n" 177 | ] 178 | } 179 | ], 180 | "source": [ 181 | "if DOWNLOAD_PLOTTING_DATA:\n", 182 | " DIR_SHP.mkdir(exist_ok=True, parents=True)\n", 183 | " # Natural Earth coastline data\n", 184 | " if not PATH_BORDERS.is_file():\n", 185 | " print(\"Downloading Natural Earth borders data...\")\n", 186 | " fname_boundary = Path(\n", 187 | " shapereader.natural_earth(\n", 188 | " resolution=\"10m\",\n", 189 | " category=\"cultural\",\n", 190 | " name=\"admin_0_boundary_lines_land\",\n", 191 | " )\n", 192 | " )\n", 193 | " copy(fname_boundary.parent, PATH_BORDERS.parent)\n", 194 | "\n", 195 | " if not PATH_COASTLINES.is_file():\n", 196 | " print(\"Downloading Natural Earth coastlines data...\")\n", 197 | " fname_coastlines = Path(\n", 198 | " shapereader.natural_earth(\n", 199 | " resolution=\"10m\", category=\"physical\", name=\"coastline\"\n", 200 | " )\n", 201 | " )\n", 202 | " copy(fname_coastlines.parent, PATH_COASTLINES.parent)\n", 203 | "\n", 204 | " # GADM\n", 205 | " print(\"Downloading GADM data...\")\n", 206 | " if not PATH_GADM.is_file():\n", 207 | " GADM_NAME = PATH_GADM.stem\n", 208 | " download_and_extract_partial_zip(\n", 209 | " PATH_GADM.parent,\n", 210 | " f\"https://geodata.ucdavis.edu/gadm/gadm4.1/{GADM_NAME}.zip\",\n", 211 | " f\"{GADM_NAME}.gpkg\",\n", 212 | " )\n", 213 | "\n", 214 | " # Penn World Table\n", 215 | " print(\"Downloading PWT data...\")\n", 216 | " if not PATH_PWT.is_file():\n", 217 | " save(\n", 218 | " pd.read_excel(\"https://www.rug.nl/ggdc/docs/pwt100.xlsx\", sheet_name=2),\n", 219 | " PATH_PWT,\n", 220 | " )\n", 221 | "\n", 222 | " # SLIIDERS intermediate output used for normalizing costs by GDP for presentation in\n", 223 | " # Depsky et al. 2023\n", 224 | " print(\"Downloading SLIIDERS intermediate file...\")\n", 225 | " download_and_extract_from_zenodo(\n", 226 | " PATH_SLIIDERS_INCOME_INTERMEDIATE_FILE, pyciam_files, \"inputs/ypk_\"\n", 227 | " )\n", 228 | "\n", 229 | " # Output of the non-market relocation cost analysis in Depsky et al. 2023\n", 230 | " print(\"Downloading movefactor analysis outputs...\")\n", 231 | " download_and_extract_from_zenodo(\n", 232 | " PATH_MOVEFACTOR_DATA, pyciam_files, \"products/suboptimal\"\n", 233 | " )" 234 | ] 235 | }, 236 | { 237 | "cell_type": "markdown", 238 | "id": "02944b01-d2b4-4ae5-ad02-8f1f6e786a30", 239 | "metadata": { 240 | "tags": [] 241 | }, 242 | "source": [ 243 | "## SLIIDERS" 244 | ] 245 | }, 246 | { 247 | "cell_type": "code", 248 | "execution_count": 7, 249 | "id": "cd36a5b5-147e-4185-bfab-ead1ab385a17", 250 | "metadata": {}, 251 | "outputs": [ 252 | { 253 | "name": "stdout", 254 | "output_type": "stream", 255 | "text": [ 256 | "Downloading SLIIDERS...\n" 257 | ] 258 | } 259 | ], 260 | "source": [ 261 | "if DOWNLOAD_SLIIDERS:\n", 262 | " print(\"Downloading SLIIDERS...\")\n", 263 | " sliiders_files = requests.get(\n", 264 | " Z_URL_SLIIDERS_PC.format(doi=Z_SLIIDERS_DOI), params=PARAMS\n", 265 | " ).json()[\"files\"]\n", 266 | " download_and_extract_from_zenodo(\n", 267 | " PATH_SLIIDERS, sliiders_files, \"products/sliiders-v\"\n", 268 | " )" 269 | ] 270 | }, 271 | { 272 | "cell_type": "markdown", 273 | "id": "801f1d20-1cc5-40a9-83e4-c292ac9cba9b", 274 | "metadata": {}, 275 | "source": [ 276 | "## Diaz 2016 inputs" 277 | ] 278 | }, 279 | { 280 | "cell_type": "code", 281 | "execution_count": 8, 282 | "id": "ac24be38-f3a5-4f79-9090-9fbe89c90ff8", 283 | "metadata": { 284 | "tags": [] 285 | }, 286 | "outputs": [ 287 | { 288 | "name": "stdout", 289 | "output_type": "stream", 290 | "text": [ 291 | "Downloading Diaz 2016 inputs...\n" 292 | ] 293 | } 294 | ], 295 | "source": [ 296 | "if DOWNLOAD_DIAZ_INPUTS:\n", 297 | " print(\"Downloading Diaz 2016 inputs...\")\n", 298 | " download_and_extract_from_zenodo(\n", 299 | " PATH_DIAZ_INPUTS_RAW, pyciam_files, \"inputs/diaz2016_inputs\"\n", 300 | " )" 301 | ] 302 | }, 303 | { 304 | "cell_type": "markdown", 305 | "id": "36028871-9be6-4f68-9fe8-b99da2c09c21", 306 | "metadata": {}, 307 | "source": [ 308 | "## SLR Inputs" 309 | ] 310 | }, 311 | { 312 | "cell_type": "code", 313 | "execution_count": 48, 314 | "id": "003f01c4-0851-4c2c-8359-fc1f2d715527", 315 | "metadata": { 316 | "tags": [] 317 | }, 318 | "outputs": [ 319 | { 320 | "name": "stdout", 321 | "output_type": "stream", 322 | "text": [ 323 | "Downloading LocalizeSL pre-computed quantiles...\n", 324 | "Downloading SLR projections from Sweet 2022...\n", 325 | "https://zenodo.org/api/files/d1eda86e-cb3d-4cda-9e12-32f9f35b9197/Interagency_Report.zip\n", 326 | "/tmp/ciam/data/raw/slr/sweet2022/TR_global_projections.nc\n", 327 | "Results/TR_global_projections.nc\n", 328 | "/tmp/ciam/data/raw/slr/sweet2022/TR_gridded_projections.nc\n", 329 | "Results/TR_gridded_projections.nc\n", 330 | "/tmp/ciam/data/raw/slr/sweet2022/TR_local_projections.nc\n", 331 | "Results/TR_local_projections.nc\n", 332 | "Downloading AR6 SLR projections (global)...\n", 333 | "https://zenodo.org/api/files/6cbb54eb-b2aa-483e-8dd0-179103857395/ar6.zip\n", 334 | "/tmp/ciam/data/raw/slr/ar6/global/total_ssp126_low_confidence_values.nc\n", 335 | "ar6/global/confidence_output_files/low_confidence/ssp126/total_ssp126_low_confidence_values.nc\n", 336 | "/tmp/ciam/data/raw/slr/ar6/global/total_ssp245_low_confidence_values.nc\n", 337 | "ar6/global/confidence_output_files/low_confidence/ssp245/total_ssp245_low_confidence_values.nc\n", 338 | "/tmp/ciam/data/raw/slr/ar6/global/total_ssp585_low_confidence_values.nc\n", 339 | "ar6/global/confidence_output_files/low_confidence/ssp585/total_ssp585_low_confidence_values.nc\n", 340 | "/tmp/ciam/data/raw/slr/ar6/global/total_ssp119_medium_confidence_values.nc\n", 341 | "ar6/global/confidence_output_files/medium_confidence/ssp119/total_ssp119_medium_confidence_values.nc\n", 342 | "/tmp/ciam/data/raw/slr/ar6/global/total_ssp126_medium_confidence_values.nc\n", 343 | "ar6/global/confidence_output_files/medium_confidence/ssp126/total_ssp126_medium_confidence_values.nc\n", 344 | "/tmp/ciam/data/raw/slr/ar6/global/total_ssp245_medium_confidence_values.nc\n", 345 | "ar6/global/confidence_output_files/medium_confidence/ssp245/total_ssp245_medium_confidence_values.nc\n", 346 | "/tmp/ciam/data/raw/slr/ar6/global/total_ssp370_medium_confidence_values.nc\n", 347 | "ar6/global/confidence_output_files/medium_confidence/ssp370/total_ssp370_medium_confidence_values.nc\n", 348 | "/tmp/ciam/data/raw/slr/ar6/global/total_ssp585_medium_confidence_values.nc\n", 349 | "ar6/global/confidence_output_files/medium_confidence/ssp585/total_ssp585_medium_confidence_values.nc\n", 350 | "Downloading AR6 SLR projections (regional)...\n", 351 | "https://zenodo.org/api/files/6cbb54eb-b2aa-483e-8dd0-179103857395/ar6-regional-confidence.zip\n", 352 | "/tmp/ciam/data/raw/slr/ar6/regional/total_ssp126_low_confidence_values.nc\n", 353 | "ar6-regional-confidence/regional/confidence_output_files/low_confidence/ssp126/total_ssp126_low_confidence_values.nc\n", 354 | "...Downloading total_ssp126_low_confidence_values.nc\n", 355 | "...Writing total_ssp126_low_confidence_values.nc\n", 356 | "/tmp/ciam/data/raw/slr/ar6/regional/total_ssp245_low_confidence_values.nc\n", 357 | "ar6-regional-confidence/regional/confidence_output_files/low_confidence/ssp245/total_ssp245_low_confidence_values.nc\n", 358 | "...Downloading total_ssp245_low_confidence_values.nc\n", 359 | "...Writing total_ssp245_low_confidence_values.nc\n", 360 | "/tmp/ciam/data/raw/slr/ar6/regional/total_ssp585_low_confidence_values.nc\n", 361 | "ar6-regional-confidence/regional/confidence_output_files/low_confidence/ssp585/total_ssp585_low_confidence_values.nc\n", 362 | "...Downloading total_ssp585_low_confidence_values.nc\n", 363 | "...Writing total_ssp585_low_confidence_values.nc\n", 364 | "/tmp/ciam/data/raw/slr/ar6/regional/total_ssp119_medium_confidence_values.nc\n", 365 | "ar6-regional-confidence/regional/confidence_output_files/medium_confidence/ssp119/total_ssp119_medium_confidence_values.nc\n", 366 | "...Downloading total_ssp119_medium_confidence_values.nc\n", 367 | "...Writing total_ssp119_medium_confidence_values.nc\n", 368 | "/tmp/ciam/data/raw/slr/ar6/regional/total_ssp126_medium_confidence_values.nc\n", 369 | "ar6-regional-confidence/regional/confidence_output_files/medium_confidence/ssp126/total_ssp126_medium_confidence_values.nc\n", 370 | "...Downloading total_ssp126_medium_confidence_values.nc\n", 371 | "...Writing total_ssp126_medium_confidence_values.nc\n", 372 | "/tmp/ciam/data/raw/slr/ar6/regional/total_ssp245_medium_confidence_values.nc\n", 373 | "ar6-regional-confidence/regional/confidence_output_files/medium_confidence/ssp245/total_ssp245_medium_confidence_values.nc\n", 374 | "...Downloading total_ssp245_medium_confidence_values.nc\n", 375 | "...Writing total_ssp245_medium_confidence_values.nc\n", 376 | "/tmp/ciam/data/raw/slr/ar6/regional/total_ssp370_medium_confidence_values.nc\n", 377 | "ar6-regional-confidence/regional/confidence_output_files/medium_confidence/ssp370/total_ssp370_medium_confidence_values.nc\n", 378 | "...Downloading total_ssp370_medium_confidence_values.nc\n", 379 | "...Writing total_ssp370_medium_confidence_values.nc\n", 380 | "/tmp/ciam/data/raw/slr/ar6/regional/total_ssp585_medium_confidence_values.nc\n", 381 | "ar6-regional-confidence/regional/confidence_output_files/medium_confidence/ssp585/total_ssp585_medium_confidence_values.nc\n", 382 | "...Downloading total_ssp585_medium_confidence_values.nc\n", 383 | "...Writing total_ssp585_medium_confidence_values.nc\n", 384 | "Downloading map of historical SLR...\n", 385 | "Downloading historical GMSL timeseries...\n" 386 | ] 387 | } 388 | ], 389 | "source": [ 390 | "if DOWNLOAD_SLR_AR5:\n", 391 | " print(\"Downloading LocalizeSL pre-computed quantiles...\")\n", 392 | " download_and_extract_from_zenodo(\n", 393 | " PATH_SLR_AR5_QUANTILES, pyciam_files, \"inputs/ar5-msl\"\n", 394 | " )\n", 395 | "\n", 396 | "if DOWNLOAD_SLR_SWEET:\n", 397 | " print(\"Downloading SLR projections from Sweet 2022...\")\n", 398 | " sweet_files = requests.get(\n", 399 | " Z_URL_RECORDS.format(doi=Z_SWEET_DOI), params=PARAMS\n", 400 | " ).json()[\"files\"]\n", 401 | " download_and_extract_from_zenodo(\n", 402 | " DIR_SLR_SWEET_RAW,\n", 403 | " sweet_files,\n", 404 | " \"Interagency_Report.zip\",\n", 405 | " zip_glob=[\n", 406 | " \"Results/TR_global_projections.nc\",\n", 407 | " \"Results/TR_gridded_projections.nc\",\n", 408 | " \"Results/TR_local_projections.nc\",\n", 409 | " ],\n", 410 | " )\n", 411 | "\n", 412 | "if DOWNLOAD_SLR_AR6:\n", 413 | " ar6_files = requests.get(Z_URL_RECORDS.format(doi=Z_AR6_DOI), params=PARAMS).json()[\n", 414 | " \"files\"\n", 415 | " ]\n", 416 | "\n", 417 | " # get total SLR\n", 418 | " for scope, name in [(\"global\", \"ar6\"), (\"regional\", \"ar6-regional-confidence\")]:\n", 419 | " print(f\"Downloading AR6 SLR projections: total, {scope}...\")\n", 420 | " download_and_extract_from_zenodo(\n", 421 | " DIR_SLR_AR6_RAW / scope,\n", 422 | " ar6_files,\n", 423 | " f\"{name}.zip\",\n", 424 | " zip_glob=(\n", 425 | " f\"{name}/{scope}/confidence_output_files/**/ssp*/total_*values.nc\"\n", 426 | " ),\n", 427 | " )\n", 428 | "\n", 429 | " # get only the contribution of vertical land motion\n", 430 | " print(\"Downloading AR6 SLR projections: verticallandmotion, regional...\")\n", 431 | " download_and_extract_from_zenodo(\n", 432 | " DIR_SLR_AR6_RAW / \"regional\",\n", 433 | " ar6_files,\n", 434 | " \"ar6-regional-confidence.zip\",\n", 435 | " zip_glob=(\n", 436 | " \"ar6-regional-confidence/regional/confidence_output_files/**/ssp*/\"\n", 437 | " \"verticallandmotion_*values.nc\"\n", 438 | " ),\n", 439 | " )\n", 440 | "\n", 441 | "if DOWNLOAD_SLR_AR5 or DOWNLOAD_SLR_SWEET:\n", 442 | " # must also add some historical trends to convert from 2000 to 2005 MSL datum\n", 443 | " BASE_URL = \"https://data.aviso.altimetry.fr/aviso-gateway/data/indicators/msl/\"\n", 444 | " print(\"Downloading map of historical SLR...\")\n", 445 | " with PATH_SLR_HIST_TREND_MAP.open(\"wb\") as f:\n", 446 | " f.write(\n", 447 | " requests.get(\n", 448 | " BASE_URL + \"MSL_Map_MERGED_Global_AVISO_NoGIA_Adjust.nc\"\n", 449 | " ).content\n", 450 | " )\n", 451 | " print(\"Downloading historical GMSL timeseries...\")\n", 452 | " with PATH_SLR_GMSL_HIST_TIMESERIES.open(\"wb\") as f:\n", 453 | " f.write(\n", 454 | " requests.get(\n", 455 | " BASE_URL + \"MSL_Serie_MERGED_Global_AVISO_GIA_Adjust_Filter2m.nc\"\n", 456 | " ).content\n", 457 | " )" 458 | ] 459 | }, 460 | { 461 | "cell_type": "markdown", 462 | "id": "81125eca-4add-4d40-a93f-f2b18bac4265", 463 | "metadata": {}, 464 | "source": [ 465 | "## Storm Surge Lookup Tables" 466 | ] 467 | }, 468 | { 469 | "cell_type": "code", 470 | "execution_count": 50, 471 | "id": "cb8f902f-6fb8-4ed0-95b3-fa5c6899212a", 472 | "metadata": { 473 | "tags": [] 474 | }, 475 | "outputs": [ 476 | { 477 | "name": "stdout", 478 | "output_type": "stream", 479 | "text": [ 480 | "Downloading segment-level storm surge lookup...\n", 481 | "Downloading segment/admin unit-level storm surge lookup...\n" 482 | ] 483 | } 484 | ], 485 | "source": [ 486 | "if DOWNLOAD_SURGE_LOOKUPS:\n", 487 | " print(\"Downloading segment-level storm surge lookup...\")\n", 488 | " download_and_extract_from_zenodo(\n", 489 | " PATHS_SURGE_LOOKUP[\"seg\"], pyciam_files, \"inputs/surge-lookup-v1.1-seg.\"\n", 490 | " )\n", 491 | "\n", 492 | " print(\"Downloading segment/admin unit-level storm surge lookup...\")\n", 493 | " download_and_extract_from_zenodo(\n", 494 | " PATHS_SURGE_LOOKUP[\"seg_adm\"], pyciam_files, \"inputs/surge-lookup-v1.1-seg_adm\"\n", 495 | " )" 496 | ] 497 | }, 498 | { 499 | "cell_type": "markdown", 500 | "id": "8213ae2d-ebcd-414d-a1cc-8c8f828898a2", 501 | "metadata": {}, 502 | "source": [ 503 | "## LocalizeSL Inputs" 504 | ] 505 | }, 506 | { 507 | "cell_type": "code", 508 | "execution_count": 54, 509 | "id": "1aa31c42-72c5-406c-acaa-51a9e4fb8330", 510 | "metadata": {}, 511 | "outputs": [ 512 | { 513 | "name": "stdout", 514 | "output_type": "stream", 515 | "text": [ 516 | "Downloading LocalizeSL corefile: SLRProjections190726core_SEJ_full...\n", 517 | "Downloading LocalizeSL corefile: SLRProjections170113GRIDDEDcore...\n", 518 | "Downloading LocalizeSL corefile: SLRProjections200204GRIDDEDcore_D20...\n", 519 | "Downloading LocalizeSL corefile: SLRProjections210628GRIDDEDcore_SROCC...\n" 520 | ] 521 | } 522 | ], 523 | "source": [ 524 | "if DOWNLOAD_LOCALIZESL_INPUTS:\n", 525 | " DIR_SLR_AR5_IFILES_RAW.mkdir(exist_ok=True, parents=True)\n", 526 | "\n", 527 | " for corefile_name in LOCALIZESL_COREFILES:\n", 528 | " print(f\"Downloading LocalizeSL corefile: {corefile_name}...\")\n", 529 | " url = (\n", 530 | " f\"https://github.com/bobkopp/LocalizeSL/raw/{LOCALIZESL_REV}/IFILES/\"\n", 531 | " f\"{corefile_name}.mat\"\n", 532 | " )\n", 533 | "\n", 534 | " filename = Path(url).name\n", 535 | " with (DIR_SLR_AR5_IFILES_RAW / filename).open(\"wb\") as f:\n", 536 | " f.write(requests.get(url).content)" 537 | ] 538 | } 539 | ], 540 | "metadata": { 541 | "kernelspec": { 542 | "display_name": "Python 3 (ipykernel)", 543 | "language": "python", 544 | "name": "python3" 545 | }, 546 | "language_info": { 547 | "codemirror_mode": { 548 | "name": "ipython", 549 | "version": 3 550 | }, 551 | "file_extension": ".py", 552 | "mimetype": "text/x-python", 553 | "name": "python", 554 | "nbconvert_exporter": "python", 555 | "pygments_lexer": "ipython3", 556 | "version": "3.12.2" 557 | }, 558 | "widgets": { 559 | "application/vnd.jupyter.widget-state+json": { 560 | "state": {}, 561 | "version_major": 2, 562 | "version_minor": 0 563 | } 564 | } 565 | }, 566 | "nbformat": 4, 567 | "nbformat_minor": 5 568 | } 569 | -------------------------------------------------------------------------------- /notebooks/data-processing/collapse-sliiders-to-seg.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "id": "b1358d97-11d8-4718-9df3-8de968736412", 7 | "metadata": {}, 8 | "outputs": [], 9 | "source": [ 10 | "import sys\n", 11 | "\n", 12 | "sys.path.append(\"../\")" 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": 2, 18 | "id": "1530a104-fd32-4d20-869e-a6064075063d", 19 | "metadata": {}, 20 | "outputs": [], 21 | "source": [ 22 | "from shared import PATH_SLIIDERS, PATH_SLIIDERS_SEG, STORAGE_OPTIONS\n", 23 | "\n", 24 | "from pyCIAM.utils import collapse_econ_inputs_to_seg" 25 | ] 26 | }, 27 | { 28 | "cell_type": "code", 29 | "execution_count": 3, 30 | "id": "11240e7b-6f7c-4e71-b029-b5c13f7f55b2", 31 | "metadata": {}, 32 | "outputs": [], 33 | "source": [ 34 | "collapse_econ_inputs_to_seg(\n", 35 | " PATH_SLIIDERS, PATH_SLIIDERS_SEG, storage_options=STORAGE_OPTIONS\n", 36 | ")" 37 | ] 38 | } 39 | ], 40 | "metadata": { 41 | "kernelspec": { 42 | "display_name": "Python 3 (ipykernel)", 43 | "language": "python", 44 | "name": "python3" 45 | }, 46 | "language_info": { 47 | "codemirror_mode": { 48 | "name": "ipython", 49 | "version": 3 50 | }, 51 | "file_extension": ".py", 52 | "mimetype": "text/x-python", 53 | "name": "python", 54 | "nbconvert_exporter": "python", 55 | "pygments_lexer": "ipython3", 56 | "version": "3.11.0" 57 | }, 58 | "widgets": { 59 | "application/vnd.jupyter.widget-state+json": { 60 | "state": {}, 61 | "version_major": 2, 62 | "version_minor": 0 63 | } 64 | } 65 | }, 66 | "nbformat": 4, 67 | "nbformat_minor": 5 68 | } 69 | -------------------------------------------------------------------------------- /notebooks/data-processing/create-diaz-pyCIAM-inputs.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Preprocess pyCIAM Inputs for Diaz 2016" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "This notebook creates all of the inputs needed for `pyCIAM`, using the data provided by the [github repo](https://github.com/delavane/CIAM) for [Diaz 2016](https://link.springer.com/article/10.1007/s10584-016-1675-4). The raw GAMS data files (.gdx) were previously converted to a zarr store for easy loading in pyCIAM. In this notebook, we reformat this zarr store and calculate some intermediate data products that were calculated on the fly in Diaz 2016 but are treated as input values in pyCIAM." 15 | ] 16 | }, 17 | { 18 | "cell_type": "markdown", 19 | "metadata": { 20 | "tags": [] 21 | }, 22 | "source": [ 23 | "## Setup" 24 | ] 25 | }, 26 | { 27 | "cell_type": "code", 28 | "execution_count": null, 29 | "metadata": {}, 30 | "outputs": [], 31 | "source": [ 32 | "import sys\n", 33 | "\n", 34 | "sys.path.append(\"../\")" 35 | ] 36 | }, 37 | { 38 | "cell_type": "code", 39 | "execution_count": 2, 40 | "metadata": {}, 41 | "outputs": [ 42 | { 43 | "name": "stderr", 44 | "output_type": "stream", 45 | "text": [ 46 | "/srv/conda/envs/notebook/lib/python3.9/site-packages/dask_gateway/client.py:21: FutureWarning: format_bytes is deprecated and will be removed in a future release. Please use dask.utils.format_bytes instead.\n", 47 | " from distributed.utils import LoopRunner, format_bytes\n" 48 | ] 49 | } 50 | ], 51 | "source": [ 52 | "import numpy as np\n", 53 | "import pandas as pd\n", 54 | "import xarray as xr\n", 55 | "from shared import PATH_DIAZ_INPUTS_INT, PATH_DIAZ_INPUTS_RAW, open_zarr, save" 56 | ] 57 | }, 58 | { 59 | "cell_type": "markdown", 60 | "metadata": {}, 61 | "source": [ 62 | "## Load raw data" 63 | ] 64 | }, 65 | { 66 | "cell_type": "code", 67 | "execution_count": 3, 68 | "metadata": {}, 69 | "outputs": [], 70 | "source": [ 71 | "# load, assuming missings (e.g. in land area) correspond to 0\n", 72 | "ds_in = open_zarr(PATH_DIAZ_INPUTS_RAW).load()\n", 73 | "for v in ds_in.data_vars:\n", 74 | " if ds_in[v].isnull().any():\n", 75 | " ds_in[v] = ds_in[v].fillna(0)\n", 76 | "\n", 77 | "# drop non-segment-level analysis inputs\n", 78 | "ds_in = ds_in.drop_vars(\n", 79 | " [\n", 80 | " d\n", 81 | " for d in ds_in.data_vars\n", 82 | " if d.startswith(\"country_\") or d.startswith(\"fund_\") or d.startswith(\"rep_\")\n", 83 | " ]\n", 84 | ")" 85 | ] 86 | }, 87 | { 88 | "cell_type": "markdown", 89 | "metadata": {}, 90 | "source": [ 91 | "## Adjust to \"remove kink in land area\"" 92 | ] 93 | }, 94 | { 95 | "cell_type": "code", 96 | "execution_count": 4, 97 | "metadata": {}, 98 | "outputs": [], 99 | "source": [ 100 | "area0 = ds_in.landarea.sel(elev=1) / 2\n", 101 | "area0[\"elev\"] = 0.5\n", 102 | "\n", 103 | "area2 = ds_in.landarea.sel(elev=2) / 2\n", 104 | "\n", 105 | "area1 = area0 + area2\n", 106 | "area1[\"elev\"] = 1.5\n", 107 | "\n", 108 | "landarea = xr.concat(\n", 109 | " [area0, area1, area2, ds_in.landarea.sel(elev=slice(3, None))], dim=\"elev\"\n", 110 | ")\n", 111 | "ds_in = ds_in.drop_dims(\"elev\")\n", 112 | "ds_in[\"landarea\"] = landarea" 113 | ] 114 | }, 115 | { 116 | "cell_type": "markdown", 117 | "metadata": {}, 118 | "source": [ 119 | "## Calculate input vals" 120 | ] 121 | }, 122 | { 123 | "cell_type": "code", 124 | "execution_count": 5, 125 | "metadata": {}, 126 | "outputs": [], 127 | "source": [ 128 | "# start with coastline lengths\n", 129 | "out = xr.Dataset({\"length\": ds_in.length})\n", 130 | "\n", 131 | "# constants from original ciam dataset\n", 132 | "out = xr.merge((out, ds_in.mobcapfrac))\n", 133 | "\n", 134 | "# extreme sea levels\n", 135 | "esls = ds_in.surge_height.sel(return_period=[\"s10\", \"s100\", \"s1000\", \"smax\"])\n", 136 | "esls[\"return_period\"] = [10, 100, 1000, 10000]\n", 137 | "out[\"surge_height\"] = esls\n", 138 | "\n", 139 | "# growth values used later\n", 140 | "gr_ypcc = ds_in[\"ypc\"] / (ds_in[\"ypc\"].shift(t=1) + 1e-9) - 1\n", 141 | "gr_pop = ds_in[\"pop\"] / (ds_in[\"pop\"].shift(t=1) + 1e-9) - 1\n", 142 | "gr_val = xr.DataArray(np.arange(len(ds_in.t)), coords={\"t\": ds_in.t.values})\n", 143 | "appr = np.exp((0.565 * gr_ypcc + 0.313 * gr_pop).cumsum(\"t\"))\n", 144 | "\n", 145 | "# population\n", 146 | "popdens = ds_in.popdens * np.exp(np.log(1 + gr_pop).cumsum(\"t\")).sel(country=ds_in.xsc)\n", 147 | "out[\"pop_2000\"] = popdens.isel(t=0, drop=True) * ds_in.landarea.fillna(0)\n", 148 | "out[\"pop_scale\"] = (popdens / popdens.isel(t=0, drop=True)).fillna(0)\n", 149 | "\n", 150 | "# income\n", 151 | "out[\"ypc\"] = ds_in.ypc.sel(country=ds_in.xsc) * np.maximum(\n", 152 | " ds_in.min_ypc_scale,\n", 153 | " (popdens.isel(t=0, drop=True) / ds_in.ypc_scale_denom) ** ds_in.ypc_scale_elast,\n", 154 | ")\n", 155 | "# correct for greenland\n", 156 | "out[\"ypc\"] = out.ypc.where(\n", 157 | " ~ds_in.subsets.sel(subset=\"greenland\", drop=True), 22642 * 1.01**gr_val\n", 158 | ")\n", 159 | "\n", 160 | "# capital\n", 161 | "out[\"K_2000\"] = ds_in.kgdp * out[\"pop_2000\"] * out.ypc.isel(t=0, drop=True)\n", 162 | "out[\"K_scale\"] = out.pop_scale * out.ypc / out.ypc.isel(t=0, drop=True)\n", 163 | "\n", 164 | "# land value\n", 165 | "fundland = (\n", 166 | " np.minimum(\n", 167 | " ds_in.dvbm,\n", 168 | " np.maximum(\n", 169 | " ds_in.min_fundland,\n", 170 | " ds_in.dvbm\n", 171 | " * ds_in.ypc.isel(t=0, drop=True)\n", 172 | " * ds_in.refpopdens\n", 173 | " / (\n", 174 | " ds_in.ypc.isel(t=0, drop=True).sel(country=\"USA\", drop=True)\n", 175 | " * ds_in.refpopdens.sel(country=\"USA\", drop=True)\n", 176 | " ),\n", 177 | " ),\n", 178 | " )\n", 179 | " * 1e6\n", 180 | ")\n", 181 | "interior = appr * fundland\n", 182 | "out[\"interior\"] = interior.where(\n", 183 | " ~ds_in.subsets.sel(subset=\"greenland\", drop=True),\n", 184 | " interior.sel(country=\"CAN\", drop=True),\n", 185 | ").sel(country=ds_in.xsc)\n", 186 | "\n", 187 | "# wetland - distribute this over total land area starting from elev=0 to emulate how it\n", 188 | "# is implicitly treated in calculating wetland costs in Diaz 2016\n", 189 | "cum_area = ds_in.landarea.cumsum(\"elev\")\n", 190 | "wetland_area = ds_in.landarea.where(\n", 191 | " ds_in.wetland.fillna(0) >= cum_area,\n", 192 | " np.maximum(ds_in.wetland.fillna(0) - cum_area.shift(elev=1, fill_value=0), 0),\n", 193 | ")\n", 194 | "wetlandservice = (\n", 195 | " 1e6\n", 196 | " * appr\n", 197 | " * ds_in.wvbm\n", 198 | " * (\n", 199 | " ds_in.ypc.isel(t=0, drop=True)\n", 200 | " / ds_in.ypc.isel(t=0, drop=True).sel(country=\"USA\", drop=True)\n", 201 | " )\n", 202 | " ** 1.16\n", 203 | " * (ds_in.refpopdens / 27.59) ** 0.47\n", 204 | ")\n", 205 | "out[\"wetland\"] = wetland_area\n", 206 | "out[\"wetlandservice\"] = wetlandservice.sel(country=ds_in.xsc)\n", 207 | "# handle segs where they have more wetland area than land area\n", 208 | "out[\"total_wetland_val\"] = ds_in.wetland.fillna(0) * wetlandservice.sel(\n", 209 | " country=ds_in.xsc\n", 210 | ")\n", 211 | "\n", 212 | "# vsl\n", 213 | "out[\"vsl\"] = (\n", 214 | " ds_in.vsl_ypc_mult\n", 215 | " * ds_in.ypc.sel(country=\"USA\", drop=True)\n", 216 | " * (ds_in.ypc / ds_in.ypc.sel(country=\"USA\", drop=True)) ** ds_in.vsl_inc_elast\n", 217 | ").sel(country=ds_in.xsc)\n", 218 | "# correct greenland\n", 219 | "out[\"vsl\"] = out.vsl.where(\n", 220 | " ~ds_in.subsets.sel(subset=\"greenland\", drop=True),\n", 221 | " (\n", 222 | " ds_in.vsl_ypc_mult\n", 223 | " * ds_in.ypc.sel(country=\"USA\", drop=True)\n", 224 | " * (\n", 225 | " out.ypc.isel(seg=ds_in.subsets.sel(subset=\"greenland\", drop=True))\n", 226 | " / ds_in.ypc.sel(country=\"USA\", drop=True)\n", 227 | " )\n", 228 | " ** ds_in.vsl_inc_elast\n", 229 | " ).reindex(seg=out.seg),\n", 230 | ")\n", 231 | "\n", 232 | "# rho\n", 233 | "out[\"rho\"] = (\n", 234 | " ds_in.ypc\n", 235 | " / (ds_in.ypc + ds_in.ypc.isel(t=0, drop=True).sel(country=\"USA\", drop=True))\n", 236 | ").sel(country=ds_in.xsc)\n", 237 | "\n", 238 | "# protection construction cost\n", 239 | "cci = ds_in.cci.sel(country=ds_in.xsc)\n", 240 | "cci = cci.where(~ds_in.subsets.sel(subset=\"island\", drop=True), cci * 2)\n", 241 | "out[\"pc\"] = ds_in.pc0 * 1e6 * cci\n", 242 | "\n", 243 | "# discount factor. emulating diaz, we don't discount 2010 relative to 2000 and start\n", 244 | "# discounting in 2010\n", 245 | "out[\"dr\"] = ds_in.dr\n", 246 | "out[\"dfact\"] = 1 / (1 + ds_in.dr) ** (out.t - out.t.isel(t=0, drop=True))\n", 247 | "\n", 248 | "# add in year 2000 values, equal to 2010\n", 249 | "out = out.reindex(t=np.concatenate(([2000], out.t.values))).bfill(\"t\")\n", 250 | "\n", 251 | "# add in surge coefficients to allow for Diaz-like surge calculation\n", 252 | "out[\"surge_coefs\"] = ds_in.coefs\n", 253 | "\n", 254 | "# add in LSL\n", 255 | "out[\"lsl\"] = ds_in.lsl.reindex(t=out.t.values, fill_value=0)\n", 256 | "\n", 257 | "# land area\n", 258 | "out[\"landarea\"] = ds_in.landarea.fillna(0)\n", 259 | "\n", 260 | "# add in elevation bounds\n", 261 | "elev_lb = xr.DataArray(\n", 262 | " np.concatenate(([0, 0.5, 1.5], out.elev.isel(elev=slice(3, None)).values - 1)),\n", 263 | " coords={\"elev\": out.elev.values},\n", 264 | ")\n", 265 | "out[\"elev_bounds\"] = xr.concat(\n", 266 | " (elev_lb, out.elev), dim=pd.Index([\"lower\", \"upper\"], name=\"bound\")\n", 267 | ")\n", 268 | "out[\"elev\"] = out.elev_bounds.mean(\"bound\")\n", 269 | "\n", 270 | "# rename t\n", 271 | "out = out.rename(t=\"year\").drop_vars(\"country\")" 272 | ] 273 | }, 274 | { 275 | "cell_type": "markdown", 276 | "metadata": {}, 277 | "source": [ 278 | "## Save" 279 | ] 280 | }, 281 | { 282 | "cell_type": "code", 283 | "execution_count": 6, 284 | "metadata": {}, 285 | "outputs": [], 286 | "source": [ 287 | "dollar_units = \"2010 USD\"\n", 288 | "\n", 289 | "# coords\n", 290 | "out.seg.attrs.update({\"long_name\": \"DIVA Segment\"})\n", 291 | "out.return_period.attrs.update({\"long_name\": \"Return periods\", \"units\": \"y\"})\n", 292 | "out.elev.attrs.update(\n", 293 | " {\n", 294 | " \"long_name\": \"Elevation\",\n", 295 | " \"description\": (\n", 296 | " \"Midpoint elevation for each coastal elevation bin employed in pyCIAM\"\n", 297 | " ),\n", 298 | " \"units\": \"m\",\n", 299 | " }\n", 300 | ")\n", 301 | "\n", 302 | "# data_vars\n", 303 | "out.surge_coefs.attrs.update(\n", 304 | " {\n", 305 | " \"long_name\": \"Surge Damage Coefficients\",\n", 306 | " \"description\": (\n", 307 | " \"Coefficients used in the original Diaz 2016 paper to estimate surge \"\n", 308 | " \"damage\"\n", 309 | " ),\n", 310 | " }\n", 311 | ")\n", 312 | "out.elev_bounds.attrs.update(\n", 313 | " {\n", 314 | " \"long_name\": \"Elevation bounds\",\n", 315 | " \"description\": (\n", 316 | " \"Lower and upper bounds for each coastal elevation bin employed in pyCIAM\"\n", 317 | " ),\n", 318 | " \"units\": \"m\",\n", 319 | " }\n", 320 | ")\n", 321 | "out.length.attrs.update({\"description\": \"Length of coastline\", \"units\": \"km\"})\n", 322 | "out.surge_height.attrs.update(\n", 323 | " {\"description\": \"Estimated ESL/storm surge heights\", \"units\": \"m\"}\n", 324 | ")\n", 325 | "out.wetland.attrs.update(\n", 326 | " {\n", 327 | " \"description\": \"Estimated area for all wetland by elevation\",\n", 328 | " \"units\": \"km^2\",\n", 329 | " }\n", 330 | ")\n", 331 | "out.wetlandservice.attrs.update(\n", 332 | " {\n", 333 | " \"description\": \"Value of wetlands\",\n", 334 | " \"units\": f\"{dollar_units} per km^2\",\n", 335 | " }\n", 336 | ")\n", 337 | "out.total_wetland_val.attrs.update(\n", 338 | " {\n", 339 | " \"description\": (\n", 340 | " \"Estimated value for all wetland. Includes wetlands for segments that have \"\n", 341 | " \"more wetland area than land area\"\n", 342 | " ),\n", 343 | " \"units\": dollar_units,\n", 344 | " }\n", 345 | ")\n", 346 | "out.vsl.attrs.update(\n", 347 | " {\n", 348 | " \"long_name\": \"Value of a Statistical Life\",\n", 349 | " \"units\": f\"{dollar_units} per person\",\n", 350 | " }\n", 351 | ")\n", 352 | "out.K_2000.attrs.update(\n", 353 | " {\n", 354 | " \"description\": \"Total value of capital in year 2000\",\n", 355 | " \"units\": dollar_units,\n", 356 | " }\n", 357 | ")\n", 358 | "out.K_scale.attrs.update(\n", 359 | " {\n", 360 | " \"description\": \"Ratio of capital stock in present year to K_2000\",\n", 361 | " \"units\": dollar_units,\n", 362 | " }\n", 363 | ")\n", 364 | "out.pop_2000.attrs.update({\"long_name\": \"Population in 2000\", \"units\": \"people\"})\n", 365 | "out.pop_scale.attrs.update(\n", 366 | " {\n", 367 | " \"description\": \"Ratio of population in present year to pop_2000\",\n", 368 | " \"units\": dollar_units,\n", 369 | " }\n", 370 | ")\n", 371 | "out.ypc.attrs.update(\n", 372 | " {\n", 373 | " \"description\": \"Downscaled income per capital\",\n", 374 | " \"units\": f\"{dollar_units} per person\",\n", 375 | " }\n", 376 | ")\n", 377 | "out.landarea.attrs.update(\n", 378 | " {\n", 379 | " \"long_name\": \"Total Land Area\",\n", 380 | " \"units\": \"km^2\",\n", 381 | " }\n", 382 | ")\n", 383 | "out.interior.attrs.update(\n", 384 | " {\n", 385 | " \"long_name\": \"Value of non-coastal land\",\n", 386 | " \"units\": f\"{dollar_units} per km2\",\n", 387 | " }\n", 388 | ")\n", 389 | "out.pc.attrs.update(\n", 390 | " {\n", 391 | " \"description\": \"Protection cost (quadratic with height)\",\n", 392 | " \"units\": f\"{dollar_units} per km per vert m^2\",\n", 393 | " }\n", 394 | ")\n", 395 | "out.mobcapfrac.attrs.update(\n", 396 | " {\n", 397 | " \"description\": \"Fraction of capital that is mobile\",\n", 398 | " }\n", 399 | ")\n", 400 | "out.rho.attrs.update(\n", 401 | " {\n", 402 | " \"description\": (\n", 403 | " \"Resilience factor scaling depth-damage and depth-mortality functions\"\n", 404 | " )\n", 405 | " }\n", 406 | ")\n", 407 | "out.lsl.attrs.update(\n", 408 | " {\n", 409 | " \"long_name\": \"Local Sea Level Rise\",\n", 410 | " \"description\": (\n", 411 | " \"Local Sea Level Rise under emissions scenario relative to 1991-2009 \"\n", 412 | " \"baseline.\"\n", 413 | " ),\n", 414 | " \"units\": \"m\",\n", 415 | " }\n", 416 | ")\n", 417 | "\n", 418 | "# clear previously-saved zarr store encoding\n", 419 | "for d in out.data_vars:\n", 420 | " out[d].encoding.clear()\n", 421 | "\n", 422 | "# prep str coords for zarr\n", 423 | "for k, v in out.coords.items():\n", 424 | " if v.dtype == object:\n", 425 | " out[k] = v.astype(\"unicode\")" 426 | ] 427 | }, 428 | { 429 | "cell_type": "code", 430 | "execution_count": 8, 431 | "metadata": {}, 432 | "outputs": [ 433 | { 434 | "data": { 435 | "text/plain": [ 436 | "" 437 | ] 438 | }, 439 | "execution_count": 8, 440 | "metadata": {}, 441 | "output_type": "execute_result" 442 | } 443 | ], 444 | "source": [ 445 | "save(out, PATH_DIAZ_INPUTS_INT, mode=\"w\")" 446 | ] 447 | } 448 | ], 449 | "metadata": { 450 | "kernelspec": { 451 | "display_name": "Python 3 (ipykernel)", 452 | "language": "python", 453 | "name": "python3" 454 | }, 455 | "language_info": { 456 | "codemirror_mode": { 457 | "name": "ipython", 458 | "version": 3 459 | }, 460 | "file_extension": ".py", 461 | "mimetype": "text/x-python", 462 | "name": "python", 463 | "nbconvert_exporter": "python", 464 | "pygments_lexer": "ipython3", 465 | "version": "3.10.8" 466 | }, 467 | "widgets": { 468 | "application/vnd.jupyter.widget-state+json": { 469 | "state": {}, 470 | "version_major": 2, 471 | "version_minor": 0 472 | } 473 | } 474 | }, 475 | "nbformat": 4, 476 | "nbformat_minor": 4 477 | } 478 | -------------------------------------------------------------------------------- /notebooks/data-processing/slr/AR5/1-convert-mat-version.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "## Convert MATLAB v7.3 corefiles to MATLAB v5\n", 8 | "- Octave doesn't support v7.3 as of 2020-05-19\n", 9 | "- We use the Mat I/O C library to convert v7.3 to v5 (Mat I/O doesn't support v7)\n", 10 | "- Note that what is called \"v5\" in Mat I/O is the same as \"v6\" in Octave" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": null, 16 | "metadata": {}, 17 | "outputs": [], 18 | "source": [ 19 | "import sys\n", 20 | "\n", 21 | "sys.path.append(\"../../../\")" 22 | ] 23 | }, 24 | { 25 | "cell_type": "code", 26 | "execution_count": 4, 27 | "metadata": {}, 28 | "outputs": [], 29 | "source": [ 30 | "import shlex\n", 31 | "from subprocess import run\n", 32 | "\n", 33 | "from shared import DIR_SLR_AR5_IFILES_INT, DIR_SLR_AR5_IFILES_RAW" 34 | ] 35 | }, 36 | { 37 | "cell_type": "code", 38 | "execution_count": 1, 39 | "metadata": {}, 40 | "outputs": [ 41 | { 42 | "name": "stderr", 43 | "output_type": "stream", 44 | "text": [ 45 | "Wrote MAT file: \"/gcs/rhg-data/impactlab-rhg/coastal/sliiders/int/slr/ifiles/SLRProjections170113GRIDDEDcore_v5.mat\"\n" 46 | ] 47 | }, 48 | { 49 | "name": "stdout", 50 | "output_type": "stream", 51 | "text": [ 52 | "Writing MAT variable: OceanDynMean... succeeded.\n", 53 | "Writing MAT variable: OceanDynN... succeeded.\n", 54 | "Writing MAT variable: OceanDynRegions... succeeded.\n", 55 | "Writing MAT variable: OceanDynStd... succeeded.\n", 56 | "Writing MAT variable: OceanDynTECorr... succeeded.\n", 57 | "Writing MAT variable: OceanDynYears... succeeded.\n", 58 | "Writing MAT variable: ThermExpMean... succeeded.\n", 59 | "Writing MAT variable: ThermExpN... succeeded.\n", 60 | "Writing MAT variable: ThermExpStd... succeeded.\n", 61 | "Writing MAT variable: ThermExpYears... succeeded.\n", 62 | "Writing MAT variable: colAIS... succeeded.\n", 63 | "Writing MAT variable: colGIC... succeeded.\n", 64 | "Writing MAT variable: colGIS... succeeded.\n", 65 | "Writing MAT variable: colLS... succeeded.\n", 66 | "Writing MAT variable: colTE... succeeded.\n", 67 | "Writing MAT variable: fpsite... succeeded.\n", 68 | "Writing MAT variable: mergeZOSZOSTOGA... succeeded.\n", 69 | "Writing MAT variable: nearestTG... succeeded.\n", 70 | "Writing MAT variable: quantlevs... succeeded.\n", 71 | "Writing MAT variable: rateprojs... succeeded.\n", 72 | "Writing MAT variable: rateprojssd... succeeded.\n", 73 | "Writing MAT variable: samps... succeeded.\n", 74 | "Writing MAT variable: scens... succeeded.\n", 75 | "Writing MAT variable: seeds... succeeded.\n", 76 | "Writing MAT variable: targregionnames... succeeded.\n", 77 | "Writing MAT variable: targregions... succeeded.\n", 78 | "Writing MAT variable: targsitecoords... succeeded.\n", 79 | "Writing MAT variable: targyears... succeeded.\n" 80 | ] 81 | }, 82 | { 83 | "name": "stderr", 84 | "output_type": "stream", 85 | "text": [ 86 | "Wrote MAT file: \"/gcs/rhg-data/impactlab-rhg/coastal/sliiders/int/slr/ifiles/SLRProjections190726core_SEJ_full_v5.mat\"\n" 87 | ] 88 | }, 89 | { 90 | "name": "stdout", 91 | "output_type": "stream", 92 | "text": [ 93 | "Writing MAT variable: corefileH... succeeded.\n", 94 | "Writing MAT variable: corefileL... succeeded.\n" 95 | ] 96 | }, 97 | { 98 | "name": "stderr", 99 | "output_type": "stream", 100 | "text": [ 101 | "Wrote MAT file: \"/gcs/rhg-data/impactlab-rhg/coastal/sliiders/int/slr/ifiles/SLRProjections200204GRIDDEDcore_D20_v5.mat\"\n" 102 | ] 103 | }, 104 | { 105 | "name": "stdout", 106 | "output_type": "stream", 107 | "text": [ 108 | "Writing MAT variable: OceanDynMean... succeeded.\n", 109 | "Writing MAT variable: OceanDynN... succeeded.\n", 110 | "Writing MAT variable: OceanDynRegions... succeeded.\n", 111 | "Writing MAT variable: OceanDynStd... succeeded.\n", 112 | "Writing MAT variable: OceanDynTECorr... succeeded.\n", 113 | "Writing MAT variable: OceanDynYears... succeeded.\n", 114 | "Writing MAT variable: ThermExpMean... succeeded.\n", 115 | "Writing MAT variable: ThermExpN... succeeded.\n", 116 | "Writing MAT variable: ThermExpStd... succeeded.\n", 117 | "Writing MAT variable: ThermExpYears... succeeded.\n", 118 | "Writing MAT variable: colAIS... succeeded.\n", 119 | "Writing MAT variable: colGIC... succeeded.\n", 120 | "Writing MAT variable: colGIS... succeeded.\n", 121 | "Writing MAT variable: colLS... succeeded.\n", 122 | "Writing MAT variable: colTE... succeeded.\n", 123 | "Writing MAT variable: fpsite... succeeded.\n", 124 | "Writing MAT variable: mergeZOSZOSTOGA... succeeded.\n", 125 | "Writing MAT variable: nearestTG... succeeded.\n", 126 | "Writing MAT variable: quantlevs... succeeded.\n", 127 | "Writing MAT variable: rateprojs... succeeded.\n", 128 | "Writing MAT variable: rateprojssd... succeeded.\n", 129 | "Writing MAT variable: samps... succeeded.\n", 130 | "Writing MAT variable: scens... succeeded.\n", 131 | "Writing MAT variable: seeds... succeeded.\n", 132 | "Writing MAT variable: targregionnames... succeeded.\n", 133 | "Writing MAT variable: targregions... succeeded.\n", 134 | "Writing MAT variable: targsitecoords... succeeded.\n", 135 | "Writing MAT variable: targyears... succeeded.\n", 136 | "Writing MAT variable: OceanDynMean... succeeded.\n", 137 | "Writing MAT variable: OceanDynN... succeeded.\n", 138 | "Writing MAT variable: OceanDynRegions... succeeded.\n", 139 | "Writing MAT variable: OceanDynStd... succeeded.\n", 140 | "Writing MAT variable: OceanDynTECorr... succeeded.\n", 141 | "Writing MAT variable: OceanDynYears... succeeded.\n", 142 | "Writing MAT variable: ThermExpMean... succeeded.\n", 143 | "Writing MAT variable: ThermExpN... succeeded.\n", 144 | "Writing MAT variable: ThermExpStd... succeeded.\n", 145 | "Writing MAT variable: ThermExpYears... succeeded.\n", 146 | "Writing MAT variable: colAIS... succeeded.\n", 147 | "Writing MAT variable: colGIC... succeeded.\n", 148 | "Writing MAT variable: colGIS... succeeded.\n", 149 | "Writing MAT variable: colLS... succeeded.\n", 150 | "Writing MAT variable: colTE... succeeded.\n", 151 | "Writing MAT variable: fpsite... succeeded.\n", 152 | "Writing MAT variable: mergeZOSZOSTOGA... succeeded.\n", 153 | "Writing MAT variable: nearestTG... succeeded.\n", 154 | "Writing MAT variable: quantlevs... succeeded.\n", 155 | "Writing MAT variable: rateprojs... succeeded.\n", 156 | "Writing MAT variable: rateprojssd... succeeded.\n", 157 | "Writing MAT variable: samps... succeeded.\n", 158 | "Writing MAT variable: scens... succeeded.\n", 159 | "Writing MAT variable: seeds... succeeded.\n", 160 | "Writing MAT variable: targregionnames... succeeded.\n", 161 | "Writing MAT variable: targregions... succeeded.\n", 162 | "Writing MAT variable: targsitecoords... succeeded.\n", 163 | "Writing MAT variable: targyears... succeeded.\n" 164 | ] 165 | }, 166 | { 167 | "name": "stderr", 168 | "output_type": "stream", 169 | "text": [ 170 | "Wrote MAT file: \"/gcs/rhg-data/impactlab-rhg/coastal/sliiders/int/slr/ifiles/SLRProjections210628GRIDDEDcore_SROCC_v5.mat\"\n" 171 | ] 172 | } 173 | ], 174 | "source": [ 175 | "DIR_SLR_AR5_IFILES_INT.mkdir(parents=True, exist_ok=True)\n", 176 | "\n", 177 | "for mfile_in in DIR_SLR_AR5_IFILES_RAW.glob(\"*.mat\"):\n", 178 | " mfile_out = DIR_SLR_AR5_IFILES_INT / (mfile_in.stem + \"_v5\" + mfile_in.suffix)\n", 179 | "\n", 180 | " cmd = f\"matio_73to5 {mfile_in} {mfile_out}\"\n", 181 | " run(shlex.split(cmd))" 182 | ] 183 | } 184 | ], 185 | "metadata": { 186 | "kernelspec": { 187 | "display_name": "Python 3 (ipykernel)", 188 | "language": "python", 189 | "name": "python3" 190 | }, 191 | "language_info": { 192 | "codemirror_mode": { 193 | "name": "ipython", 194 | "version": 3 195 | }, 196 | "file_extension": ".py", 197 | "mimetype": "text/x-python", 198 | "name": "python", 199 | "nbconvert_exporter": "python", 200 | "pygments_lexer": "ipython3", 201 | "version": "3.10.8" 202 | }, 203 | "widgets": { 204 | "application/vnd.jupyter.widget-state+json": { 205 | "state": {}, 206 | "version_major": 2, 207 | "version_minor": 0 208 | } 209 | } 210 | }, 211 | "nbformat": 4, 212 | "nbformat_minor": 4 213 | } 214 | -------------------------------------------------------------------------------- /notebooks/data-processing/slr/AR5/3-retrieve-num-gcms.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "d54c06e4-f962-40a2-9d51-5d53f613fc42", 6 | "metadata": {}, 7 | "source": [ 8 | "# Retrieve `OceanDynN` information from LocalizeSL corefiles\n", 9 | "`OceanDynN` represents the number of GCMs used to calculate LSL projections for each year-RCP-site\n", 10 | "\n", 11 | "This notebook saves this information in a `.zarr` in a format similar to the projections themselves" 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": null, 17 | "id": "ea31cea7-7800-47be-8f40-6c646ebf25d7", 18 | "metadata": {}, 19 | "outputs": [], 20 | "source": [ 21 | "import sys\n", 22 | "\n", 23 | "sys.path.append(\"../../../\")" 24 | ] 25 | }, 26 | { 27 | "cell_type": "code", 28 | "execution_count": 2, 29 | "id": "06b96358-6f6c-4d18-993c-548d013f662f", 30 | "metadata": {}, 31 | "outputs": [], 32 | "source": [ 33 | "import pandas as pd\n", 34 | "from oct2py import octave\n", 35 | "from shared import (\n", 36 | " DIR_SCRATCH,\n", 37 | " DIR_SLR_AR5_IFILES_INT,\n", 38 | " FS,\n", 39 | " LOCALIZESL_COREFILES,\n", 40 | " PATH_LOCALIZESL,\n", 41 | " PATH_SLR_AR5_N_GCMS,\n", 42 | " _to_fuse,\n", 43 | " save,\n", 44 | ")" 45 | ] 46 | }, 47 | { 48 | "cell_type": "markdown", 49 | "id": "b1d27162-8a9b-4752-9d88-050379c29191", 50 | "metadata": {}, 51 | "source": [ 52 | "#### Define paths" 53 | ] 54 | }, 55 | { 56 | "cell_type": "code", 57 | "execution_count": null, 58 | "id": "4fb517fb-cf6c-4bb7-aeb6-575c9fe082be", 59 | "metadata": {}, 60 | "outputs": [], 61 | "source": [ 62 | "DIR_OCTAVE_OUTPUTS = DIR_SCRATCH / \"ngcm_localizeSL_outputs\"\n", 63 | "DIR_OCTAVE_OUTPUTS_FUSE = str(_to_fuse(DIR_OCTAVE_OUTPUTS))\n", 64 | "\n", 65 | "DIR_MFILES = PATH_LOCALIZESL / \"MFILES\"" 66 | ] 67 | }, 68 | { 69 | "cell_type": "markdown", 70 | "id": "c1ccca44-499b-410d-b205-23582d25a2ab", 71 | "metadata": {}, 72 | "source": [ 73 | "### Define Octave function to save information about `OceanDyn` parameters" 74 | ] 75 | }, 76 | { 77 | "cell_type": "code", 78 | "execution_count": null, 79 | "id": "0d9e7d86-c117-4583-bddd-7a855baacdba", 80 | "metadata": {}, 81 | "outputs": [], 82 | "source": [ 83 | "load_oceandyn_func = f\"\"\"\n", 84 | "function oceandyn_info = save_oceandyn_info(corefile_name, subcorefile_choice)\n", 85 | " dir_out = '{DIR_OCTAVE_OUTPUTS_FUSE}';\n", 86 | "\n", 87 | " ifilesdir='{str(DIR_SLR_AR5_IFILES_INT)}';\n", 88 | " mfilesdir='{str(DIR_MFILES)}';\n", 89 | "\n", 90 | " addpath(ifilesdir);\n", 91 | " addpath(mfilesdir);\n", 92 | "\n", 93 | " f = [corefile_name '_v5.mat'];\n", 94 | "\n", 95 | " corefilewrapper=load(fullfile(ifilesdir, f));\n", 96 | "\n", 97 | " mkdir(dir_out);\n", 98 | "\n", 99 | " if strcmp(corefile_name, 'SLRProjections190726core_SEJ_full')\n", 100 | " if strcmp(subcorefile_choice, \"H\")\n", 101 | " corefile = corefilewrapper.corefileH;\n", 102 | " else\n", 103 | " corefile = corefilewrapper.corefileL;\n", 104 | " end\n", 105 | " else\n", 106 | " corefile = corefilewrapper;\n", 107 | " end\n", 108 | "\n", 109 | " disp([\"Corefile: \" corefile_name]);\n", 110 | " disp([\"Corefile subgroup: \" subcorefile_choice]);\n", 111 | "\n", 112 | " siteids = int64(corefile.targregions);\n", 113 | "\n", 114 | " for i=1:length(corefile.scens)\n", 115 | " scen = cell2mat(corefile.scens(i));\n", 116 | " csvwrite(strcat(dir_out, '/OceanDynN_', corefile_name, '_', scen, '.csv'), corefile.OceanDynN(:,:,i));\n", 117 | " csvwrite(strcat(dir_out, '/OceanDynYears_', corefile_name, '_', scen, '.csv'), corefile.OceanDynYears);\n", 118 | " dlmwrite(strcat(dir_out, '/siteid_', corefile_name, '_', scen, '.csv'), siteids, 'precision', '%i')\n", 119 | " end\n", 120 | "end\n", 121 | "\"\"\"" 122 | ] 123 | }, 124 | { 125 | "cell_type": "markdown", 126 | "id": "d3b7abb1-a454-4768-b0e1-4633950cb6ea", 127 | "metadata": {}, 128 | "source": [ 129 | "### Save OceanDyn metadata for each corefile" 130 | ] 131 | }, 132 | { 133 | "cell_type": "code", 134 | "execution_count": null, 135 | "id": "e22ac5a3-7b5c-44ad-ac2b-2ef5b7802ad9", 136 | "metadata": {}, 137 | "outputs": [ 138 | { 139 | "name": "stdout", 140 | "output_type": "stream", 141 | "text": [ 142 | "Corefile: SLRProjections190726core_SEJ_full\n", 143 | "Corefile subgroup: L\n", 144 | "Corefile: SLRProjections190726core_SEJ_full\n", 145 | "Corefile subgroup: H\n", 146 | "Corefile: SLRProjections170113GRIDDEDcore\n", 147 | "warning: implicit conversion from numeric to char\n", 148 | "Corefile subgroup: \u0000\n", 149 | "Corefile: SLRProjections200204GRIDDEDcore_D20\n", 150 | "warning: implicit conversion from numeric to char\n", 151 | "Corefile subgroup: \u0000\n", 152 | "Corefile: SLRProjections210628GRIDDEDcore_SROCC\n", 153 | "warning: implicit conversion from numeric to char\n", 154 | "Corefile subgroup: \u0000\n" 155 | ] 156 | } 157 | ], 158 | "source": [ 159 | "octave.eval(load_oceandyn_func)\n", 160 | "\n", 161 | "for corefile_name, subcorefiles in LOCALIZESL_COREFILES.items():\n", 162 | " for subcorefile_choice in subcorefiles:\n", 163 | " octave.save_oceandyn_info(corefile_name, subcorefile_choice)" 164 | ] 165 | }, 166 | { 167 | "cell_type": "markdown", 168 | "id": "78ddc154-c4e0-439a-849b-788ec59d15ea", 169 | "metadata": {}, 170 | "source": [ 171 | "### Load CSV outputs into DataFrames" 172 | ] 173 | }, 174 | { 175 | "cell_type": "code", 176 | "execution_count": null, 177 | "id": "83c33314-69b6-4e50-80fe-bd9462b64aae", 178 | "metadata": {}, 179 | "outputs": [ 180 | { 181 | "name": "stdout", 182 | "output_type": "stream", 183 | "text": [ 184 | "SLRProjections190726core_SEJ_full ['2p0degree+L', 'rcp85+H']\n", 185 | "SLRProjections170113GRIDDEDcore ['rcp26', 'rcp45', 'rcp60', 'rcp85']\n", 186 | "SLRProjections200204GRIDDEDcore_D20 ['rcp26', 'rcp45', 'rcp60', 'rcp85']\n", 187 | "SLRProjections210628GRIDDEDcore_SROCC ['rcp26', 'rcp45', 'rcp60', 'rcp85']\n" 188 | ] 189 | } 190 | ], 191 | "source": [ 192 | "rcps = [\"2p0degree+L\", \"rcp85+H\", \"rcp26\", \"rcp45\", \"rcp60\", \"rcp85\"]\n", 193 | "\n", 194 | "dfs = []\n", 195 | "for corefile in LOCALIZESL_COREFILES.keys():\n", 196 | " rcps = [\"rcp26\", \"rcp45\", \"rcp60\", \"rcp85\"]\n", 197 | " if corefile == \"SLRProjections190726core_SEJ_full\":\n", 198 | " rcps = [\"2p0degree+L\", \"rcp85+H\"]\n", 199 | "\n", 200 | " print(corefile, rcps)\n", 201 | "\n", 202 | " for rcp in rcps:\n", 203 | " scenario = f\"{corefile}_{rcp}\"\n", 204 | " sites = pd.read_csv(DIR_OCTAVE_OUTPUTS / f\"siteid_{scenario}.csv\", header=None)\n", 205 | " sites[\"name\"] = sites[0].astype(int).astype(str)\n", 206 | " sites = sites.drop(columns=[0])\n", 207 | "\n", 208 | " years = pd.read_csv(\n", 209 | " DIR_OCTAVE_OUTPUTS / f\"OceanDynYears_{scenario}.csv\", header=None\n", 210 | " )\n", 211 | "\n", 212 | " years = years.T.rename(columns={0: \"year\"})\n", 213 | "\n", 214 | " df = pd.read_csv(\n", 215 | " DIR_OCTAVE_OUTPUTS / f\"OceanDynN_{scenario}.csv\",\n", 216 | " header=None,\n", 217 | " names=sites[\"name\"].values,\n", 218 | " )\n", 219 | " df = df.join(years).set_index(\"year\", drop=True)\n", 220 | " df = df[\n", 221 | " [c for c in df.columns if int(c) > 100000000]\n", 222 | " ] # These high-valued sites are the gridded ones\n", 223 | " df = df.loc[2000:]\n", 224 | " df[\"scenario\"] = scenario\n", 225 | " df = df.set_index(\"scenario\", append=True)\n", 226 | "\n", 227 | " df = df.stack()\n", 228 | " df.index = df.index.set_names(\"scenario\", level=1)\n", 229 | " df.index = df.index.set_names(\"site_id\", level=2)\n", 230 | "\n", 231 | " dfs.append(df)" 232 | ] 233 | }, 234 | { 235 | "cell_type": "markdown", 236 | "id": "152a6a4b-5343-4c2a-881c-ca22e6716041", 237 | "metadata": {}, 238 | "source": [ 239 | "## Merge DataFrames into xarray and save" 240 | ] 241 | }, 242 | { 243 | "cell_type": "code", 244 | "execution_count": null, 245 | "id": "65915940-695d-4623-b58d-89862d38a2bb", 246 | "metadata": {}, 247 | "outputs": [], 248 | "source": [ 249 | "save(pd.concat(dfs).to_xarray().to_dataset(name=\"numGCMs\"), PATH_SLR_AR5_N_GCMS)" 250 | ] 251 | }, 252 | { 253 | "cell_type": "code", 254 | "execution_count": null, 255 | "id": "5e2c614b-5a22-4b46-8f5a-59a59f81f5e1", 256 | "metadata": {}, 257 | "outputs": [], 258 | "source": [ 259 | "FS.rm(str(DIR_OCTAVE_OUTPUTS), recursive=True)" 260 | ] 261 | } 262 | ], 263 | "metadata": { 264 | "kernelspec": { 265 | "display_name": "Python 3 (ipykernel)", 266 | "language": "python", 267 | "name": "python3" 268 | }, 269 | "language_info": { 270 | "codemirror_mode": { 271 | "name": "ipython", 272 | "version": 3 273 | }, 274 | "file_extension": ".py", 275 | "mimetype": "text/x-python", 276 | "name": "python", 277 | "nbconvert_exporter": "python", 278 | "pygments_lexer": "ipython3", 279 | "version": "3.10.8" 280 | }, 281 | "widgets": { 282 | "application/vnd.jupyter.widget-state+json": { 283 | "state": {}, 284 | "version_major": 2, 285 | "version_minor": 0 286 | } 287 | } 288 | }, 289 | "nbformat": 4, 290 | "nbformat_minor": 5 291 | } 292 | -------------------------------------------------------------------------------- /notebooks/data-processing/slr/AR5/5-create-slr-quantile.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "c32fb782-2a80-4ec3-b06f-54945f90ed00", 6 | "metadata": {}, 7 | "source": [ 8 | "# Create Quantile-based SLR dataset" 9 | ] 10 | }, 11 | { 12 | "cell_type": "code", 13 | "execution_count": 7, 14 | "id": "f87eeeca-c542-4cb3-8069-fad1133d55dd", 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "import sys\n", 19 | "\n", 20 | "sys.path.append(\"../../../\")" 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": 8, 26 | "id": "ce02a65c-ceae-475d-820f-40c91ee1b45c", 27 | "metadata": {}, 28 | "outputs": [], 29 | "source": [ 30 | "from shared import PATH_SLR_AR5_FULL, PATH_SLR_AR5_QUANTILES, QUANTILES, open_zarr, save" 31 | ] 32 | }, 33 | { 34 | "cell_type": "code", 35 | "execution_count": 12, 36 | "id": "990cd2f2-a1f7-4a23-b1ec-0486668947a0", 37 | "metadata": {}, 38 | "outputs": [], 39 | "source": [ 40 | "ds = open_zarr(PATH_SLR_AR5_FULL, chunks={\"mc_sample_id\": -1}).quantile(\n", 41 | " QUANTILES, dim=\"mc_sample_id\", keep_attrs=True\n", 42 | ")\n", 43 | "for v in ds.variables:\n", 44 | " ds[v].encoding = {}\n", 45 | " if ds[v].dtype == object:\n", 46 | " ds[v] = ds[v].astype(\"unicode\")\n", 47 | "save(ds, PATH_SLR_AR5_QUANTILES, mode=\"w\")" 48 | ] 49 | } 50 | ], 51 | "metadata": { 52 | "kernelspec": { 53 | "display_name": "Python 3 (ipykernel)", 54 | "language": "python", 55 | "name": "python3" 56 | }, 57 | "language_info": { 58 | "codemirror_mode": { 59 | "name": "ipython", 60 | "version": 3 61 | }, 62 | "file_extension": ".py", 63 | "mimetype": "text/x-python", 64 | "name": "python", 65 | "nbconvert_exporter": "python", 66 | "pygments_lexer": "ipython3", 67 | "version": "3.10.8" 68 | }, 69 | "widgets": { 70 | "application/vnd.jupyter.widget-state+json": { 71 | "state": {}, 72 | "version_major": 2, 73 | "version_minor": 0 74 | } 75 | } 76 | }, 77 | "nbformat": 4, 78 | "nbformat_minor": 5 79 | } 80 | -------------------------------------------------------------------------------- /notebooks/data-processing/slr/AR5/README.md: -------------------------------------------------------------------------------- 1 | This directory contains notebooks to generate LocalizeSL-based local SLR projections using a variety of temperature scenarios and ice sheet models, largely based around the IPCC AR5. 2 | 3 | The final output of steps 1-4 is a Zarr store containing 10,000 Monte Carlo draws for each of the RCP scenarios and years (decadal), at each site ID (defined by LocalizeSL), for each corefile. Step 5 simply collapses these MC draws into quantiles. 4 | 5 | The steps to produce this output are as follows: 6 | 1. [convert-mat-version](./1-convert-mat-version.ipynb): Necessary to convert the MatLab version to work with Octave 7 | 2. [generate-projected-lsl](./2-generate-projected-lsl.ipynb): Dask workers running Octave. For any corefile, call the LocalizeSL `LocalizeStoredProjections` function, followed by `WriteTableMC`, to get outputs as TSVs. 8 | 3. [retrieve-num-gcms](./3-retrieve-num-gcms.ipynb): Calculate number of GCMs for each site-year-scenario, for later use in clipping some sites due to data quality issues. 9 | 4. [process-localizesl-output](./4-process-localizesl-output.ipynb): combine all TSVs into a single Zarr store. Clip some sites based on data quality criteria. 10 | 5. [create-slr-quantile](./5-create-slr-quantile.ipynb): Take quantiles of the outputs of step (4), for use in the runs executed as part of Depsky et al. 2023. -------------------------------------------------------------------------------- /notebooks/data-processing/slr/AR6.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import sys\n", 10 | "\n", 11 | "sys.path.append(\"../../\")" 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": 2, 17 | "metadata": { 18 | "tags": [] 19 | }, 20 | "outputs": [ 21 | { 22 | "name": "stderr", 23 | "output_type": "stream", 24 | "text": [ 25 | "/srv/conda/envs/notebook/lib/python3.9/site-packages/dask_gateway/client.py:21: FutureWarning: format_bytes is deprecated and will be removed in a future release. Please use dask.utils.format_bytes instead.\n", 26 | " from distributed.utils import LoopRunner, format_bytes\n" 27 | ] 28 | } 29 | ], 30 | "source": [ 31 | "import pandas as pd\n", 32 | "import shared\n", 33 | "import xarray as xr\n", 34 | "from shared import open_dataset, save" 35 | ] 36 | }, 37 | { 38 | "cell_type": "markdown", 39 | "metadata": {}, 40 | "source": [ 41 | "## Load and aggregate AR6 FACTS projections" 42 | ] 43 | }, 44 | { 45 | "cell_type": "code", 46 | "execution_count": 3, 47 | "metadata": { 48 | "tags": [] 49 | }, 50 | "outputs": [ 51 | { 52 | "name": "stdout", 53 | "output_type": "stream", 54 | "text": [ 55 | "Processing gs://rhg-data/impactlab-rhg/coastal/ciam_paper/data/raw/slr/ar6/regional/total_ssp126_low_confidence_values.nc...\n", 56 | "Processing gs://rhg-data/impactlab-rhg/coastal/ciam_paper/data/raw/slr/ar6/regional/total_ssp245_low_confidence_values.nc...\n", 57 | "Processing gs://rhg-data/impactlab-rhg/coastal/ciam_paper/data/raw/slr/ar6/regional/total_ssp585_low_confidence_values.nc...\n", 58 | "Processing gs://rhg-data/impactlab-rhg/coastal/ciam_paper/data/raw/slr/ar6/regional/total_ssp119_medium_confidence_values.nc...\n", 59 | "Processing gs://rhg-data/impactlab-rhg/coastal/ciam_paper/data/raw/slr/ar6/regional/total_ssp126_medium_confidence_values.nc...\n", 60 | "Processing gs://rhg-data/impactlab-rhg/coastal/ciam_paper/data/raw/slr/ar6/regional/total_ssp245_medium_confidence_values.nc...\n", 61 | "Processing gs://rhg-data/impactlab-rhg/coastal/ciam_paper/data/raw/slr/ar6/regional/total_ssp370_medium_confidence_values.nc...\n", 62 | "Processing gs://rhg-data/impactlab-rhg/coastal/ciam_paper/data/raw/slr/ar6/regional/total_ssp585_medium_confidence_values.nc...\n", 63 | "Processing gs://rhg-data/impactlab-rhg/coastal/ciam_paper/data/raw/slr/ar6/regional/verticallandmotion_ssp126_low_confidence_values.nc...\n", 64 | "Processing gs://rhg-data/impactlab-rhg/coastal/ciam_paper/data/raw/slr/ar6/regional/verticallandmotion_ssp245_low_confidence_values.nc...\n", 65 | "Processing gs://rhg-data/impactlab-rhg/coastal/ciam_paper/data/raw/slr/ar6/regional/verticallandmotion_ssp585_low_confidence_values.nc...\n", 66 | "Processing gs://rhg-data/impactlab-rhg/coastal/ciam_paper/data/raw/slr/ar6/regional/verticallandmotion_ssp119_medium_confidence_values.nc...\n", 67 | "Processing gs://rhg-data/impactlab-rhg/coastal/ciam_paper/data/raw/slr/ar6/regional/verticallandmotion_ssp126_medium_confidence_values.nc...\n", 68 | "Processing gs://rhg-data/impactlab-rhg/coastal/ciam_paper/data/raw/slr/ar6/regional/verticallandmotion_ssp245_medium_confidence_values.nc...\n", 69 | "Processing gs://rhg-data/impactlab-rhg/coastal/ciam_paper/data/raw/slr/ar6/regional/verticallandmotion_ssp370_medium_confidence_values.nc...\n", 70 | "Processing gs://rhg-data/impactlab-rhg/coastal/ciam_paper/data/raw/slr/ar6/regional/verticallandmotion_ssp585_medium_confidence_values.nc...\n" 71 | ] 72 | } 73 | ], 74 | "source": [ 75 | "all_ds = []\n", 76 | "global_ds = []\n", 77 | "global_ssps = []\n", 78 | "\n", 79 | "\n", 80 | "def open_and_convert(ds_path):\n", 81 | " out = open_dataset(ds_path)\n", 82 | " out[\"sea_level_change\"] = (\n", 83 | " out.sea_level_change.pint.quantify().pint.to(\"meters\").pint.dequantify()\n", 84 | " )\n", 85 | " return out\n", 86 | "\n", 87 | "\n", 88 | "for kind in [\"total\", \"verticallandmotion\"]:\n", 89 | " ds = []\n", 90 | " this_ssps = []\n", 91 | " for conf in [\"low\", \"medium\"]:\n", 92 | " for ds_path in (shared.DIR_SLR_AR6_RAW / \"regional\").glob(f\"{kind}_*_{conf}_*\"):\n", 93 | " print(f\"Processing {ds_path}...\")\n", 94 | " this_ssp = ds_path.name.split(\"_\")[1]\n", 95 | " ssp_conf = f\"{this_ssp}_{conf}\"\n", 96 | " ds.append(open_and_convert(ds_path))\n", 97 | " this_ssps.append(ssp_conf)\n", 98 | " if kind == \"total\":\n", 99 | " global_ds.append(\n", 100 | " open_and_convert(shared.DIR_SLR_AR6_RAW / \"global\" / ds_path.name)\n", 101 | " )\n", 102 | " global_ssps.append(ssp_conf)\n", 103 | " all_ds.append(\n", 104 | " xr.concat(ds, pd.Index(this_ssps, name=\"scenario\"), data_vars=\"different\")\n", 105 | " )\n", 106 | "\n", 107 | "# for some reason the VLM dataset has an entry for 2005 that is all 0s, while other\n", 108 | "# datasets just don't have 2005 b/c it is the assumed basline\n", 109 | "assert (all_ds[1].sea_level_change.sel(years=2005) == 0).all()\n", 110 | "all_ds[1] = all_ds[1].sel(years=slice(2006, None))\n", 111 | "\n", 112 | "global_ds = (\n", 113 | " xr.concat(global_ds, pd.Index(global_ssps, name=\"scenario\"), data_vars=\"different\")\n", 114 | " .squeeze(drop=True)\n", 115 | " .drop_vars([\"lon\", \"lat\"])\n", 116 | " .sea_level_change\n", 117 | ")\n", 118 | "\n", 119 | "# handle floating point matching errors on the quantile dimension\n", 120 | "global_ds[\"quantiles\"] = all_ds[0].quantiles\n", 121 | "all_ds[1][\"quantiles\"] = all_ds[0].quantiles\n", 122 | "\n", 123 | "all_ds = xr.Dataset(\n", 124 | " {\n", 125 | " \"lsl_msl05\": all_ds[0].sea_level_change,\n", 126 | " \"lsl_ncc_msl05\": all_ds[1].sea_level_change,\n", 127 | " \"gsl_msl05\": global_ds,\n", 128 | " \"lon\": all_ds[1].lon,\n", 129 | " \"lat\": all_ds[0].lat,\n", 130 | " }\n", 131 | ")\n", 132 | "\n", 133 | "# drop locations with NaN values in the time period we're interested in\n", 134 | "valid = (\n", 135 | " all_ds[[\"lsl_msl05\", \"lsl_ncc_msl05\"]]\n", 136 | " .sel(years=slice(2100))\n", 137 | " .notnull()\n", 138 | " .all([\"scenario\", \"quantiles\", \"years\"])\n", 139 | " .to_array(\"tmp\")\n", 140 | " .all(\"tmp\")\n", 141 | ")\n", 142 | "all_ds = all_ds.sel(locations=valid)\n", 143 | "\n", 144 | "all_ds = all_ds.rename(\n", 145 | " {\"years\": \"year\", \"quantiles\": \"quantile\", \"locations\": \"site_id\"}\n", 146 | ")\n", 147 | "\n", 148 | "# we generally allow +180 but not -180\n", 149 | "all_ds[\"lon\"] = all_ds.lon.where(all_ds.lon != -180, 180)\n", 150 | "\n", 151 | "# ensure no locations have missing values\n", 152 | "assert all_ds.sel(year=slice(2100)).notnull().all().to_array().all()" 153 | ] 154 | }, 155 | { 156 | "cell_type": "markdown", 157 | "metadata": {}, 158 | "source": [ 159 | "## Save" 160 | ] 161 | }, 162 | { 163 | "cell_type": "code", 164 | "execution_count": 5, 165 | "metadata": {}, 166 | "outputs": [], 167 | "source": [ 168 | "for v in all_ds.variables:\n", 169 | " all_ds[v].encoding.clear()\n", 170 | "save(all_ds.chunk({\"site_id\": 100}), shared.PATH_SLR_AR6, mode=\"w\")" 171 | ] 172 | } 173 | ], 174 | "metadata": { 175 | "kernelspec": { 176 | "display_name": "Python 3 (ipykernel)", 177 | "language": "python", 178 | "name": "python3" 179 | }, 180 | "language_info": { 181 | "codemirror_mode": { 182 | "name": "ipython", 183 | "version": 3 184 | }, 185 | "file_extension": ".py", 186 | "mimetype": "text/x-python", 187 | "name": "python", 188 | "nbconvert_exporter": "python", 189 | "pygments_lexer": "ipython3", 190 | "version": "3.9.10" 191 | }, 192 | "widgets": { 193 | "application/vnd.jupyter.widget-state+json": { 194 | "state": {}, 195 | "version_major": 2, 196 | "version_minor": 0 197 | } 198 | } 199 | }, 200 | "nbformat": 4, 201 | "nbformat_minor": 4 202 | } 203 | -------------------------------------------------------------------------------- /notebooks/data-processing/slr/sweet.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import sys\n", 10 | "\n", 11 | "sys.path.append(\"../../\")" 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": 2, 17 | "metadata": {}, 18 | "outputs": [ 19 | { 20 | "name": "stderr", 21 | "output_type": "stream", 22 | "text": [ 23 | "/srv/conda/envs/notebook/lib/python3.9/site-packages/dask_gateway/client.py:21: FutureWarning: format_bytes is deprecated and will be removed in a future release. Please use dask.utils.format_bytes instead.\n", 24 | " from distributed.utils import LoopRunner, format_bytes\n" 25 | ] 26 | } 27 | ], 28 | "source": [ 29 | "import numpy as np\n", 30 | "import pandas as pd\n", 31 | "import shared\n", 32 | "import xarray as xr\n", 33 | "from shared import open_dataarray, open_dataset, open_zarr, save\n", 34 | "\n", 35 | "from pyCIAM import spherical_nearest_neighbor as snn" 36 | ] 37 | }, 38 | { 39 | "cell_type": "markdown", 40 | "metadata": {}, 41 | "source": [ 42 | "## Aggregate Sweet et al. datasets" 43 | ] 44 | }, 45 | { 46 | "cell_type": "code", 47 | "execution_count": 3, 48 | "metadata": {}, 49 | "outputs": [], 50 | "source": [ 51 | "# Global data\n", 52 | "glob = (\n", 53 | " open_dataset(shared.DIR_SLR_SWEET_RAW / \"TR_global_projections.nc\")\n", 54 | " .rename(years=\"year\", percentiles=\"quantile\")\n", 55 | " .sel(year=slice(2020, None))[\n", 56 | " [\"GMSL_Low\", \"GMSL_IntLow\", \"GMSL_Int\", \"GMSL_IntHigh\", \"GMSL_High\"]\n", 57 | " ]\n", 58 | " .to_array(dim=\"scenario\")\n", 59 | ")\n", 60 | "glob[\"scenario\"] = glob.scenario.str[5:]\n", 61 | "glob[\"quantile\"] = glob[\"quantile\"] / 100" 62 | ] 63 | }, 64 | { 65 | "cell_type": "code", 66 | "execution_count": 4, 67 | "metadata": {}, 68 | "outputs": [], 69 | "source": [ 70 | "# Gauge data\n", 71 | "ds_loc = (\n", 72 | " open_dataset(shared.DIR_SLR_SWEET_RAW / \"TR_local_projections.nc\")\n", 73 | " .set_coords([\"lon\", \"lat\"])\n", 74 | " .rename(percentiles=\"quantile\", years=\"year\", tg=\"site_id\")\n", 75 | " .dropna(how=\"all\", dim=\"year\")\n", 76 | " .sel(year=slice(2020, None))\n", 77 | ")\n", 78 | "\n", 79 | "# drop stations w/ QC issues\n", 80 | "ds_loc = ds_loc.sel(site_id=~ds_loc.QC_flag.astype(bool))\n", 81 | "\n", 82 | "# rearrange\n", 83 | "ds_loc = ds_loc[[i for i in ds_loc.data_vars if i.startswith(\"rsl_total\")]].to_array(\n", 84 | " dim=\"scenario\"\n", 85 | ")\n", 86 | "ds_loc[\"quantile\"] = ds_loc[\"quantile\"] / 100\n", 87 | "ds_loc[\"scenario\"] = ds_loc.scenario.str[10:]\n", 88 | "assert ds_loc.notnull().all()\n", 89 | "\n", 90 | "# rename site ID as numeric\n", 91 | "ds_loc[\"site_id\"] = np.arange(ds_loc.site_id.size)" 92 | ] 93 | }, 94 | { 95 | "cell_type": "code", 96 | "execution_count": 5, 97 | "metadata": {}, 98 | "outputs": [], 99 | "source": [ 100 | "# Gridded data\n", 101 | "grid = (\n", 102 | " open_dataset(shared.DIR_SLR_SWEET_RAW / \"TR_gridded_projections.nc\")\n", 103 | " .rename(percentiles=\"quantile\", years=\"year\")\n", 104 | " .dropna(how=\"all\", dim=\"year\")\n", 105 | " .sel(year=slice(2020, None))\n", 106 | " .stack(site_id=[\"lon\", \"lat\"])\n", 107 | ")\n", 108 | "\n", 109 | "# rearrange\n", 110 | "grid = grid[[i for i in grid.data_vars if i.startswith(\"rsl_total\")]].to_array(\n", 111 | " dim=\"scenario\"\n", 112 | ")\n", 113 | "grid[\"quantile\"] = grid[\"quantile\"] / 100\n", 114 | "grid[\"scenario\"] = grid.scenario.str[10:]\n", 115 | "\n", 116 | "# drop land pixels that are not valid\n", 117 | "grid = grid.sel(site_id=(grid != -31985).all([\"scenario\", \"quantile\", \"year\"]))\n", 118 | "assert grid.notnull().all()\n", 119 | "\n", 120 | "# rename site_id\n", 121 | "grid = grid.reset_index(\"site_id\").assign_coords(\n", 122 | " site_id=np.arange(ds_loc.site_id.size, ds_loc.site_id.size + grid.site_id.size)\n", 123 | ")" 124 | ] 125 | }, 126 | { 127 | "cell_type": "code", 128 | "execution_count": 6, 129 | "metadata": {}, 130 | "outputs": [], 131 | "source": [ 132 | "# we call these \"05\" even though they are currently relative to '00. Will convert next\n", 133 | "full = xr.Dataset(\n", 134 | " {\n", 135 | " \"lsl_msl05\": xr.concat((ds_loc, grid.interp(year=ds_loc.year)), \"site_id\"),\n", 136 | " \"gsl_msl05\": glob,\n", 137 | " }\n", 138 | ")\n", 139 | "assert full.lsl_msl05.notnull().all()" 140 | ] 141 | }, 142 | { 143 | "cell_type": "markdown", 144 | "metadata": {}, 145 | "source": [ 146 | "### Adjust to 2005 baseline" 147 | ] 148 | }, 149 | { 150 | "cell_type": "markdown", 151 | "metadata": {}, 152 | "source": [ 153 | "First the LSL" 154 | ] 155 | }, 156 | { 157 | "cell_type": "code", 158 | "execution_count": 7, 159 | "metadata": {}, 160 | "outputs": [], 161 | "source": [ 162 | "adj = (\n", 163 | " open_dataarray(shared.PATH_SLR_HIST_TREND_MAP).rename(\n", 164 | " longitude=\"lon\", latitude=\"lat\"\n", 165 | " )\n", 166 | " * 5\n", 167 | ")\n", 168 | "adj[\"lon\"] = adj.lon.where(adj.lon <= 180, adj.lon - 360)\n", 169 | "adj = adj.to_dataframe().dropna().reset_index()\n", 170 | "\n", 171 | "full_sites = full[[\"lon\", \"lat\"]].to_dataframe()\n", 172 | "full_sites[\"lon\"] = full_sites.lon.where(full_sites.lon <= 180, full_sites.lon - 360)\n", 173 | "\n", 174 | "vals = adj.loc[snn(full_sites, adj.reset_index()).values, \"sea_level_trends\"]\n", 175 | "vals.index = pd.Index(full.site_id.values, name=\"site_id\")\n", 176 | "\n", 177 | "full[\"lsl_msl05\"] -= vals.to_xarray()" 178 | ] 179 | }, 180 | { 181 | "cell_type": "markdown", 182 | "metadata": {}, 183 | "source": [ 184 | "Then the GSL" 185 | ] 186 | }, 187 | { 188 | "cell_type": "code", 189 | "execution_count": 8, 190 | "metadata": {}, 191 | "outputs": [], 192 | "source": [ 193 | "adj = open_dataset(shared.PATH_SLR_GMSL_HIST_TIMESERIES).msl.to_series()\n", 194 | "full[\"gsl_msl05\"] -= (\n", 195 | " adj.loc[\"1996\":\"2014\"].mean() - adj.loc[\"1991\":\"2009\"].mean()\n", 196 | ") * 1000" 197 | ] 198 | }, 199 | { 200 | "cell_type": "markdown", 201 | "metadata": {}, 202 | "source": [ 203 | "### Update to meters" 204 | ] 205 | }, 206 | { 207 | "cell_type": "code", 208 | "execution_count": 9, 209 | "metadata": {}, 210 | "outputs": [], 211 | "source": [ 212 | "full.lon.attrs.clear()\n", 213 | "full.lat.attrs.clear()\n", 214 | "full = (\n", 215 | " full.pint.quantify(lsl_msl05=\"mm\", gsl_msl05=\"mm\")\n", 216 | " .pint.to(lsl_msl05=\"meters\", gsl_msl05=\"meters\")\n", 217 | " .pint.dequantify()\n", 218 | ")" 219 | ] 220 | }, 221 | { 222 | "cell_type": "markdown", 223 | "metadata": {}, 224 | "source": [ 225 | "### Add NCC scenario" 226 | ] 227 | }, 228 | { 229 | "cell_type": "code", 230 | "execution_count": 11, 231 | "metadata": {}, 232 | "outputs": [], 233 | "source": [ 234 | "# use AR6 VLM scenario for Sweet\n", 235 | "\n", 236 | "ar6 = open_zarr(shared.PATH_SLR_AR6)[[\"lon\", \"lat\", \"lsl_ncc_msl05\"]]\n", 237 | "\n", 238 | "aligned_sites = snn(\n", 239 | " full_sites, ar6[[\"lon\", \"lat\"]].to_dataframe().reset_index(drop=True)\n", 240 | ").values\n", 241 | "full[\"lsl_ncc_msl05\"] = (\n", 242 | " ar6.lsl_ncc_msl05.sel(quantile=full[\"quantile\"])\n", 243 | " .isel(site_id=aligned_sites)\n", 244 | " .interp(year=full.year)\n", 245 | " .assign_coords(site_id=full.site_id)\n", 246 | ")" 247 | ] 248 | }, 249 | { 250 | "cell_type": "markdown", 251 | "metadata": {}, 252 | "source": [ 253 | "### Add description" 254 | ] 255 | }, 256 | { 257 | "cell_type": "code", 258 | "execution_count": 12, 259 | "metadata": {}, 260 | "outputs": [], 261 | "source": [ 262 | "full.scenario.attrs.update(\n", 263 | " {\n", 264 | " \"description\": (\n", 265 | " \"Scenarios are defined by their 2100 GMSL change relative to 2000. \"\n", 266 | " \"Low=0.3m; IntLow=0.5m; Int=1.0m; IntHigh=1.5m; High=2.0m\"\n", 267 | " )\n", 268 | " }\n", 269 | ")" 270 | ] 271 | }, 272 | { 273 | "cell_type": "markdown", 274 | "metadata": {}, 275 | "source": [ 276 | "## Save" 277 | ] 278 | }, 279 | { 280 | "cell_type": "code", 281 | "execution_count": 13, 282 | "metadata": {}, 283 | "outputs": [], 284 | "source": [ 285 | "for v in full.variables:\n", 286 | " full[v].encoding.clear()" 287 | ] 288 | }, 289 | { 290 | "cell_type": "code", 291 | "execution_count": 14, 292 | "metadata": {}, 293 | "outputs": [], 294 | "source": [ 295 | "save(full.chunk({\"site_id\": 100}), shared.PATH_SLR_SWEET, mode=\"w\")" 296 | ] 297 | } 298 | ], 299 | "metadata": { 300 | "kernelspec": { 301 | "display_name": "Python 3 (ipykernel)", 302 | "language": "python", 303 | "name": "python3" 304 | }, 305 | "language_info": { 306 | "codemirror_mode": { 307 | "name": "ipython", 308 | "version": 3 309 | }, 310 | "file_extension": ".py", 311 | "mimetype": "text/x-python", 312 | "name": "python", 313 | "nbconvert_exporter": "python", 314 | "pygments_lexer": "ipython3", 315 | "version": "3.10.8" 316 | }, 317 | "widgets": { 318 | "application/vnd.jupyter.widget-state+json": { 319 | "state": {}, 320 | "version_major": 2, 321 | "version_minor": 0 322 | } 323 | } 324 | }, 325 | "nbformat": 4, 326 | "nbformat_minor": 4 327 | } 328 | -------------------------------------------------------------------------------- /notebooks/models/create-surge-lookup-tables.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "5468498c-bfaa-408b-ae36-841b16a69a3e", 6 | "metadata": {}, 7 | "source": [ 8 | "# Create pyCIAM Storm Costs Lookup Table" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "id": "f6bee218-9782-40bf-8772-d2288235d1b1", 14 | "metadata": {}, 15 | "source": [ 16 | "Calculating the storm costs in a CIAM model involves a numerical integration over both elevation and the quantiles of storm surge at each segment-ADM1 location. This is too computationally intensive to run for all seg-ADMs for each year for all SLR trajectories, especially when using pyCIAM to run a Monte Carlo analysis across tens of thousands of SLR trajectories. Instead, we build a lookup table indexed by seg-ADM, LSLR, adaptation type (retreat vs. protect), cost type (mortality vs. capital loss), and `rhdiff` (the difference between the retreat/protect height and lslr). This is similar to how it is treated in the original CIAM model except that:\n", 17 | "\n", 18 | "1. We use a lookup table rather than a parameterized exponential function of `rhdiff` and `lslr`\n", 19 | "2. We account for elevational heterogeneity in population and capital when evaluating our costs in retreat scenarios. The original CIAM included `lslr` in their exponential function only for the protect adaptation type, while for `noAdaptation` and `retreat`, the function was only of `rhdiff`." 20 | ] 21 | }, 22 | { 23 | "cell_type": "markdown", 24 | "id": "8590679c-89af-4eb6-a105-752fbb51440a", 25 | "metadata": {}, 26 | "source": [ 27 | "## Setup" 28 | ] 29 | }, 30 | { 31 | "cell_type": "code", 32 | "execution_count": 1, 33 | "id": "6b183aa3-366a-4f5f-bb95-5ef4bf48d438", 34 | "metadata": { 35 | "tags": [] 36 | }, 37 | "outputs": [], 38 | "source": [ 39 | "%load_ext autoreload\n", 40 | "%autoreload 2" 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": 2, 46 | "id": "cd4b2928-65a0-4733-9c71-fbefa85590be", 47 | "metadata": { 48 | "tags": [] 49 | }, 50 | "outputs": [], 51 | "source": [ 52 | "import sys\n", 53 | "\n", 54 | "sys.path.append(\"../\")" 55 | ] 56 | }, 57 | { 58 | "cell_type": "code", 59 | "execution_count": 3, 60 | "id": "afd03880-8e46-4d81-867d-bfb6505ea788", 61 | "metadata": { 62 | "tags": [] 63 | }, 64 | "outputs": [ 65 | { 66 | "name": "stderr", 67 | "output_type": "stream", 68 | "text": [ 69 | "/srv/conda/envs/notebook/lib/python3.10/site-packages/google/cloud/storage/transfer_manager.py:30: UserWarning: The module `transfer_manager` is a preview feature. Functionality and API may change. This warning will be removed in a future release.\n", 70 | " warnings.warn(\n", 71 | "/home/jovyan/git-repos/pyCIAM-public/notebooks/models/../shared.py:3: UserWarning: Shapely 2.0 is installed, but because PyGEOS is also installed, GeoPandas will still use PyGEOS by default for now. To force to use and test Shapely 2.0, you have to set the environment variable USE_PYGEOS=0. You can do this before starting the Python process, or in your code before importing geopandas:\n", 72 | "\n", 73 | "import os\n", 74 | "os.environ['USE_PYGEOS'] = '0'\n", 75 | "import geopandas\n", 76 | "\n", 77 | "In a future release, GeoPandas will switch to using Shapely by default. If you are using PyGEOS directly (calling PyGEOS functions on geometries from GeoPandas), this will then stop working and you are encouraged to migrate from PyGEOS to Shapely 2.0 (https://shapely.readthedocs.io/en/latest/migration_pygeos.html).\n", 78 | " import geopandas as gpd\n" 79 | ] 80 | } 81 | ], 82 | "source": [ 83 | "import distributed as dd\n", 84 | "import pandas as pd\n", 85 | "from pyCIAM.surge import damage_funcs\n", 86 | "from pyCIAM.surge.lookup import create_surge_lookup\n", 87 | "from shared import (\n", 88 | " PATH_PARAMS,\n", 89 | " PATH_SLIIDERS,\n", 90 | " PATH_SLIIDERS_SEG,\n", 91 | " PATH_SLR_AR5_QUANTILES,\n", 92 | " PATH_SLR_AR6,\n", 93 | " PATH_SLR_SWEET,\n", 94 | " PATHS_SURGE_LOOKUP,\n", 95 | " QUANTILES,\n", 96 | " STORAGE_OPTIONS,\n", 97 | " start_dask_cluster,\n", 98 | ")" 99 | ] 100 | }, 101 | { 102 | "cell_type": "code", 103 | "execution_count": 4, 104 | "id": "47979c48-7fff-49b4-b445-ea6bf746fa37", 105 | "metadata": { 106 | "tags": [ 107 | "parameters" 108 | ] 109 | }, 110 | "outputs": [], 111 | "source": [ 112 | "# When running on larger/scalable dask cluster, may wish to specify number of workers\n", 113 | "# Default is LocalCluster which will use the number of CPUs available on local machine\n", 114 | "N_WORKERS_MIN = 7\n", 115 | "N_WORKERS_MAX = 700\n", 116 | "SEG_CHUNKSIZE = 5\n", 117 | "\n", 118 | "PARAMS = pd.read_json(PATH_PARAMS)[\"values\"]" 119 | ] 120 | }, 121 | { 122 | "cell_type": "code", 123 | "execution_count": 5, 124 | "id": "6104bced", 125 | "metadata": { 126 | "tags": [] 127 | }, 128 | "outputs": [], 129 | "source": [ 130 | "DMF_I = getattr(damage_funcs, PARAMS.dmf + \"_i\")\n", 131 | "DDF_I = getattr(damage_funcs, PARAMS.ddf + \"_i\")" 132 | ] 133 | }, 134 | { 135 | "cell_type": "code", 136 | "execution_count": 6, 137 | "id": "dbb04b4a-14b8-4403-ad33-88bfe71bd8fc", 138 | "metadata": { 139 | "tags": [] 140 | }, 141 | "outputs": [ 142 | { 143 | "data": { 144 | "application/vnd.jupyter.widget-view+json": { 145 | "model_id": "fcadb94377b4430d9c2964ac9c808c2a", 146 | "version_major": 2, 147 | "version_minor": 0 148 | }, 149 | "text/plain": [ 150 | "VBox(children=(HTML(value='

GatewayCluster

'), HBox(children=(HTML(value='\\n
\\n