├── scripts ├── facts.runs │ ├── template │ │ ├── location.lst │ │ ├── workflows.yml │ │ └── config.yml │ ├── facts_runs.sh │ ├── format_facts.py │ └── prepare_facts.py ├── directory_setup.py ├── create_config.py └── command_line_scghg.py ├── environment.yml ├── LICENSE ├── .gitignore └── README.md /scripts/facts.runs/template/location.lst: -------------------------------------------------------------------------------- 1 | New_York 12 40.70 -74.01 2 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: dscim-facts-epa 2 | channels: 3 | - conda-forge 4 | - defaults 5 | dependencies: 6 | - google-cloud-storage==2.5.0 7 | - statsmodels==0.13.2 8 | - python==3.9 9 | - pyfiglet==0.8.post1 10 | - pip==22.2.2 11 | - xarray==2022.3.0 12 | - pandas==1.4.3 13 | - numpy==1.23.3 14 | - scipy==1.11.3 15 | - jupyterlab==3.4.7 16 | - ipython==8.5.0 17 | - matplotlib==3.5.3 18 | - dask==2022.9.0 19 | - pytest==7.1.3 20 | - pytest-cov==3.0.0 21 | - papermill==2.3.4 22 | - click==8.1.3 23 | - netcdf4==1.6.0 24 | - h5netcdf==1.0.2 25 | - zarr==2.12.0 26 | - cftime==1.6.1 27 | - bottleneck==1.3.5 28 | - nc-time-axis==1.4.1 29 | - jupyterlab_widgets==3.0.3 30 | - pip: 31 | - dscim==0.5.0 32 | - black==22.1.0 33 | - flake8==5.0.4 34 | - dill==0.3.5.1 35 | - numbagg==0.2.1 36 | - p_tqdm==1.4.0 37 | - impactlab_tools==0.4.0 38 | - inquirer==2.10.0 39 | - pyyaml==6.0.1 40 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Climate Impact Lab 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /scripts/facts.runs/facts_runs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Define arrays for pulse years and gases 3 | # Additional pulse years and gases can be appended with spaces in between 4 | # example: pulse_years=(2020 2030 2040) 5 | # example: gases=("CO2_Fossil" "CH4" "N2O") 6 | pulse_years=(2020) 7 | gases=("CO2_Fossil") 8 | facts_dir="/opt/facts" 9 | dscim_facts_epa_dir="/opt/dscim-facts-epa" 10 | # Create FACTS experiments 11 | python3 prepare_facts.py --dscim_repo "${dscim_facts_epa_dir}" --facts_repo "${facts_dir}" --pulse_years "${pulse_years[@]}" --gases "${gases[@]}" 12 | # Loop through the pulse years 13 | for gas in "${gases[@]}"; do 14 | # Loop through the gases 15 | for year in "${pulse_years[@]}"; do 16 | gas_exp="${gas//_/.}" 17 | echo "Gas: $gas" 18 | echo "Pulse: $year" 19 | cd $facts_dir 20 | python3 runFACTS.py rff.$year.$gas_exp 21 | done 22 | done 23 | python3 runFACTS.py rff.control.control 24 | cd $dscim_facts_epa_dir/scripts/facts.runs 25 | # Take the outputs of the FACTS experiment and save in the proper format 26 | python3 format_facts.py --facts_repo "${facts_dir}" --pulse_years "${pulse_years[@]}" --gases "${gases[@]}" --gmsl_pulse facts_gmsl_pulse.nc4 27 | cd $dscim_facts_epa_dir/scripts 28 | # Create config for dscim run 29 | python3 create_config.py --gmsl_file facts_gmsl_pulse.nc4 --gmst_file gmst_pulse.nc4 --pulse_years "${pulse_years[@]}" --gases "${gases[@]}" 30 | -------------------------------------------------------------------------------- /scripts/facts.runs/template/workflows.yml: -------------------------------------------------------------------------------- 1 | # automatically generated by runFACTS.py 2 | # 3 | wf1f: 4 | global: 5 | - global/testrffnostack.GrIS1f.FittedISMIP.GrIS_GIS_globalsl.nc 6 | - global/testrffnostack.ar5glaciers.ipccar5.glaciers_globalsl.nc 7 | - global/testrffnostack.ar5AIS.ipccar5.icesheets_AIS_globalsl.nc 8 | - global/testrffnostack.ocean.tlm.sterodynamics_globalsl.nc 9 | - global/testrffnostack.lws.ssp.landwaterstorage_globalsl.nc 10 | local: 11 | - local/testrffnostack.GrIS1f.FittedISMIP.GrIS_GIS_localsl.nc 12 | - local/testrffnostack.ar5glaciers.ipccar5.glaciers_localsl.nc 13 | - local/testrffnostack.ar5AIS.ipccar5.icesheets_WAIS_localsl.nc 14 | - local/testrffnostack.ar5AIS.ipccar5.icesheets_EAIS_localsl.nc 15 | - local/testrffnostack.ocean.tlm.sterodynamics_localsl.nc 16 | - local/testrffnostack.k14vlm.kopp14.verticallandmotion_localsl.nc 17 | - local/testrffnostack.lws.ssp.landwaterstorage_localsl.nc 18 | options: 19 | pyear_end: 2300 20 | wf2f: 21 | global: 22 | - global/testrffnostack.GrIS1f.FittedISMIP.GrIS_GIS_globalsl.nc 23 | - global/testrffnostack.larmip.larmip.AIS_globalsl.nc 24 | - global/testrffnostack.ar5glaciers.ipccar5.glaciers_globalsl.nc 25 | - global/testrffnostack.ocean.tlm.sterodynamics_globalsl.nc 26 | - global/testrffnostack.lws.ssp.landwaterstorage_globalsl.nc 27 | local: 28 | - local/testrffnostack.GrIS1f.FittedISMIP.GrIS_GIS_localsl.nc 29 | - local/testrffnostack.larmip.larmip.AIS_localsl.nc 30 | - local/testrffnostack.ar5glaciers.ipccar5.glaciers_localsl.nc 31 | - local/testrffnostack.ocean.tlm.sterodynamics_localsl.nc 32 | - local/testrffnostack.k14vlm.kopp14.verticallandmotion_localsl.nc 33 | - local/testrffnostack.lws.ssp.landwaterstorage_localsl.nc 34 | options: 35 | pyear_end: 2300 36 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | *.csv 131 | *.nc4 132 | *.nc 133 | *.zarr 134 | *.zip 135 | scripts/input/* 136 | scripts/output/* 137 | attributes*-combined.txt 138 | generated_conf.yml 139 | *.yml 140 | input_creation/coastal_gmsl_inputs/* 141 | -------------------------------------------------------------------------------- /scripts/facts.runs/template/config.yml: -------------------------------------------------------------------------------- 1 | global-options: 2 | nsamps: 10000 3 | scenario: ssp245 4 | pyear_start: 2020 5 | pyear_end: 2300 6 | pyear_step: 1 7 | baseyear: 2005 8 | 9 | 10 | climate_step: 11 | dummy: 12 | module_set: "facts" 13 | module: "dummy" 14 | input_data_file: 15 | - "ohc.nc4" 16 | - "gsat.nc4" 17 | - "climate.nc4" 18 | climate_output_data: 19 | - "ohc.nc4" 20 | - "gsat.nc4" 21 | - "climate.nc4" 22 | generates_climate_output: true 23 | 24 | sealevel_step: 25 | GrIS1f: 26 | module_set: "FittedISMIP" 27 | module: "GrIS" 28 | options: 29 | crateyear_start: 2080 30 | crateyear_end: 2100 31 | options_allowoverwrite: 32 | climate_data_file: "%CLIMATE_DATA_FILE%" 33 | include_in_workflow: 34 | - "wf1f" 35 | - "wf2f" 36 | 37 | larmip: 38 | module_set: "larmip" 39 | module: "AIS" 40 | options_allowoverwrite: 41 | climate_data_file: "%CLIMATE_DATA_FILE%" 42 | include_in_workflow: 43 | - "wf2f" 44 | 45 | ar5glaciers: 46 | module_set: "ipccar5" 47 | module: "glaciers" 48 | options: 49 | gmip: 2 50 | options_allowoverwrite: 51 | climate_data_file: "%CLIMATE_DATA_FILE%" 52 | include_in_workflow: 53 | - "wf1f" 54 | - "wf2f" 55 | 56 | ar5AIS: 57 | module_set: "ipccar5" 58 | module: "icesheets" 59 | pipeline_file: "pipeline.AIS.yml" 60 | options_allowoverwrite: 61 | climate_data_file: "%CLIMATE_DATA_FILE%" 62 | include_in_workflow: 63 | - "wf1f" 64 | 65 | ocean: 66 | module_set: "tlm" 67 | module: "sterodynamics" 68 | options_allowoverwrite: 69 | climate_data_file: "%CLIMATE_DATA_FILE%" 70 | include_in_workflow: 71 | - "wf1f" 72 | - "wf2f" 73 | 74 | k14vlm: 75 | module_set: "kopp14" 76 | module: "verticallandmotion" 77 | include_in_workflow: 78 | - "wf1f" 79 | - "wf2f" 80 | 81 | lws: 82 | module_set: "ssp" 83 | module: "landwaterstorage" 84 | options: 85 | scenario: "ssp5" 86 | dcrate_lo: -0.4 87 | include_in_workflow: 88 | - "wf1f" 89 | - "wf2f" 90 | 91 | totaling_step: 92 | total: 93 | module_set: "facts" 94 | module: "total" 95 | loop_over_workflows: true 96 | loop_over_scales: true 97 | stages: 98 | - workflow 99 | -------------------------------------------------------------------------------- /scripts/directory_setup.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | import os 3 | from pathlib import Path 4 | import zipfile 5 | from tqdm import * 6 | import requests 7 | 8 | base = os.getcwd() 9 | input = Path(base) / "input" 10 | output = Path(base) / "output" 11 | 12 | def makedir(path): 13 | if not os.path.exists(path): 14 | os.makedirs(path) 15 | 16 | 17 | climate_inputs = input / "climate" 18 | econ_inputs = input / "econ" 19 | damage_functions = input / "damage_functions" 20 | 21 | 22 | conf_base = {'mortality_version': 1, 23 | 'coastal_version': '0.20', 24 | 'rff_climate': {'gases': ['CO2_Fossil', 'CH4', 'N2O'], 25 | 'gmsl_path': '', 26 | 'gmst_path': '', 27 | 'gmst_fair_path': str(climate_inputs / 'gmst_pulse.nc4'), 28 | 'gmsl_fair_path': str(climate_inputs / 'gmsl_pulse.nc4'), 29 | 'damages_pulse_conversion_path': str(climate_inputs / 'conversion_v5.03_Feb072022.nc4'), 30 | 'ecs_mask_path': None, 31 | 'emission_scenarios': None}, 32 | 'paths': {'rff_damage_function_library': str(damage_functions)}, 33 | 'rffdata': {'socioec_output': str(econ_inputs), 34 | 'pulse_years': [2020, 2030, 2040, 2050, 2060, 2070, 2080]}, 35 | 'sectors': {'coastal_v0.20': {'formula': 'damages ~ -1 + gmsl + np.power(gmsl, 2)'}, 36 | 'agriculture': {'formula': 'damages ~ -1 + anomaly + np.power(anomaly, 2)'}, 37 | 'mortality_v1': {'formula': 'damages ~ -1 + anomaly + np.power(anomaly, 2)'}, 38 | 'energy': {'formula': 'damages ~ -1 + anomaly + np.power(anomaly, 2)'}, 39 | 'labor': {'formula': 'damages ~ -1 + anomaly + np.power(anomaly, 2)'}, 40 | 'AMEL_m1': {'formula': 'damages ~ -1 + anomaly + np.power(anomaly, 2)'}, 41 | 'CAMEL_m1_c0.20': {'formula': 'damages ~ -1 + anomaly + np.power(anomaly, 2) + gmsl + np.power(gmsl, 2)'}}, 42 | 'save_path': str(output), 43 | 'gas_conversions':{ 44 | 'CO2_Fossil': 2.72916487e-10, 45 | 'CH4': 2.50000000e-08, 46 | 'N2O': 6.36480131e-07}} 47 | 48 | # Download inputs from internet 49 | print("Downloading input files...") 50 | name = 'dscim-v20231211_inputs.zip' 51 | url = 'https://storage.googleapis.com/climateimpactlab-scc-tool/dscim-facts-epa_input_data/' + name 52 | with requests.get(url, stream = True) as r: 53 | r.raise_for_status() 54 | with open(name, 'wb') as f: 55 | pbar = tqdm(total = int(r.headers['Content-Length'])) 56 | for chunk in r.iter_content(chunk_size=8192): 57 | if chunk: # filter out keep-alive new chunks 58 | f.write(chunk) 59 | pbar.update(len(chunk)) 60 | print("") 61 | print("Unzipping input files...") 62 | with zipfile.ZipFile(Path(base) / name, 'r') as zip_ref: 63 | for member in tqdm(zip_ref.infolist()): 64 | try: 65 | zip_ref.extract(member, Path(base)) 66 | except zipfile.error as e: 67 | pass 68 | 69 | if os.path.exists(Path(base) / name): 70 | os.remove(Path(base) / name) 71 | else: 72 | print("Download Failed") 73 | 74 | os.rename(Path(base) / 'inputs', input) 75 | 76 | with open('generated_conf.yml', 'w') as outfile: 77 | yaml.dump(conf_base, outfile, default_flow_style=False) 78 | -------------------------------------------------------------------------------- /scripts/facts.runs/format_facts.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import xarray as xr 4 | import pandas as pd 5 | import numpy as np 6 | import argparse 7 | import pickle 8 | from datetime import datetime 9 | from itertools import product 10 | from pathlib import Path 11 | import shutil 12 | 13 | # Create argument parser 14 | parser = argparse.ArgumentParser(description='Process lists of gases and pulse years to get experiments from FACTS') 15 | 16 | # Add named arguments for the lists 17 | parser.add_argument('--facts_repo', nargs=1, help = 'Path to the FACTS repo') 18 | parser.add_argument('--pulse_years', nargs='*', help='List of pulse years') 19 | parser.add_argument('--gases', nargs='*', help='List of gases') 20 | parser.add_argument('--gmsl_pulse', nargs=1, help='gmsl pulse filename to save out') 21 | 22 | # Parse the command line arguments 23 | args = parser.parse_args() 24 | 25 | # Access the lists using the argument names 26 | facts_dir = args.facts_repo[0] 27 | gmsl_pulse = args.gmsl_pulse[0] 28 | 29 | if args.pulse_years: 30 | pulse_years = list(map(int, args.pulse_years)) 31 | else: 32 | print("No pulse years specified") 33 | print("Defaulting to 2020 pulse year") 34 | pulse_years = [2020] 35 | 36 | if args.gases: 37 | gases = args.gases 38 | else: 39 | print("No gases specified") 40 | print("Defaulting to CO2_Fossil") 41 | gases = ['CO2_Fossil'] 42 | 43 | 44 | print("pulse_years:", pulse_years) 45 | print("gases:", gases) 46 | 47 | 48 | 49 | control = (0.5 * xr.open_dataset(facts_dir + '/rff.control.control/output/rff.control.control.total.workflow.wf1f.global.nc') + 50 | 0.5 * xr.open_dataset(facts_dir + '/rff.control.control/output/rff.control.control.total.workflow.wf2f.global.nc')) 51 | 52 | nsamps = len(control.samples.values) 53 | 54 | pulse_gas = [] 55 | for pulse_year, gas in list(product(pulse_years,gases)): 56 | gas_exp = gas.replace('_','.') 57 | pulse = ((0.5 * xr.open_dataset(facts_dir + f'/rff.{pulse_year}.{gas_exp}/output/rff.{pulse_year}.{gas_exp}.total.workflow.wf1f.global.nc') + 58 | 0.5 * xr.open_dataset(facts_dir + f'/rff.{pulse_year}.{gas_exp}/output/rff.{pulse_year}.{gas_exp}.total.workflow.wf2f.global.nc')) 59 | .rename({'samples':'runid','sea_level_change':'pulse_gmsl','years':'year'}) 60 | .assign_coords({'runid':np.arange(1,nsamps + 1),'gas':gas, 'pulse_year':int(pulse_year)}) 61 | .expand_dims(['gas','pulse_year']) 62 | ) 63 | pulse_gas = pulse_gas + [pulse,] 64 | pulse = xr.combine_by_coords(pulse_gas) 65 | 66 | pulse = (pulse 67 | .squeeze(drop = True) 68 | .drop(['lat','lon']) 69 | ) 70 | 71 | control = (control 72 | .squeeze(drop = True) 73 | .rename({'samples':'runid','sea_level_change':'control_gmsl','years':'year'}) 74 | .assign_coords({'runid':np.arange(1,nsamps + 1)}) 75 | .drop(['lat','lon'])) 76 | 77 | gmsl_ds = xr.merge([control,pulse])/10 78 | 79 | save = Path(os.getcwd()) 80 | save = save.parent.absolute() / 'input' / 'climate' 81 | 82 | gmsl_ds.to_netcdf(save / gmsl_pulse, encoding = {"control_gmsl":{"dtype":"float64"},"pulse_gmsl":{"dtype":"float64"}}) 83 | -------------------------------------------------------------------------------- /scripts/create_config.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | import os 3 | from pathlib import Path 4 | from datetime import datetime 5 | import sys 6 | import re 7 | import argparse 8 | 9 | # Create argument parser 10 | parser = argparse.ArgumentParser(description='Create config from gmsl and gmst pulse files') 11 | 12 | # Add named arguments for the lists 13 | parser.add_argument('--gmsl_file', nargs=1, help='Path to GMSL pulse file') 14 | parser.add_argument('--gmst_file', nargs=1, help='Path to GMST pulse file') 15 | parser.add_argument('--pulse_years', nargs='*', help='List of pulse years') 16 | parser.add_argument('--gases', nargs='*', help='List of gases') 17 | 18 | # Parse the command line arguments 19 | args = parser.parse_args() 20 | 21 | # Access the lists using the argument names 22 | gmsl_pulsename = args.gmsl_file 23 | gmst_pulsename = args.gmst_file 24 | 25 | if args.pulse_years: 26 | pulse_years = list(map(int, args.pulse_years)) 27 | else: 28 | print("No pulse years specified") 29 | print("Defaulting to 2020 pulse year") 30 | pulse_years = [2020] 31 | 32 | if args.gases: 33 | gases = args.gases 34 | else: 35 | print("No gases specified") 36 | print("Defaulting to CO2_Fossil") 37 | gases = ['CO2_Fossil'] 38 | 39 | print(pulse_years) 40 | print(gases) 41 | 42 | currentDay = datetime.now().day 43 | currentMonth = datetime.now().month 44 | currentYear = datetime.now().year 45 | 46 | 47 | # We may want to read these files in to make sure they exist/have the correct format 48 | 49 | base = os.getcwd() 50 | input = Path(base) / "input" 51 | output = Path(base) / "output" 52 | 53 | 54 | climate_inputs = input / "climate" 55 | econ_inputs = input / "econ" 56 | damage_functions = input / "damage_functions" 57 | 58 | conf_base = {'mortality_version': 1, 59 | 'coastal_version': '0.20', 60 | 'rff_climate': {'gases': gases, 61 | 'gmsl_path': '', 62 | 'gmst_path': '', 63 | 'gmst_fair_path': str(climate_inputs) + "/" + gmst_pulsename[0], 64 | 'gmsl_fair_path': str(climate_inputs) + "/" + gmsl_pulsename[0], 65 | 'damages_pulse_conversion_path': str(climate_inputs) + '/conversion_v5.03_Feb072022.nc4', 66 | 'ecs_mask_path': None, 67 | 'emission_scenarios': None}, 68 | 'paths': {'rff_damage_function_library': str(damage_functions)}, 69 | 'rffdata': {'socioec_output': str(econ_inputs), 70 | 'pulse_years': pulse_years}, 71 | 'sectors': {'coastal_v0.20': {'formula': 'damages ~ -1 + gmsl + np.power(gmsl, 2)'}, 72 | 'agriculture': {'formula': 'damages ~ -1 + anomaly + np.power(anomaly, 2)'}, 73 | 'mortality_v1': {'formula': 'damages ~ -1 + anomaly + np.power(anomaly, 2)'}, 74 | 'energy': {'formula': 'damages ~ -1 + anomaly + np.power(anomaly, 2)'}, 75 | 'labor': {'formula': 'damages ~ -1 + anomaly + np.power(anomaly, 2)'}, 76 | 'AMEL_m1': {'formula': 'damages ~ -1 + anomaly + np.power(anomaly, 2)'}, 77 | 'CAMEL_m1_c0.20': {'formula': 'damages ~ -1 + anomaly + np.power(anomaly, 2) + gmsl + np.power(gmsl, 2)'}}, 78 | 'save_path': str(output), 79 | 'gas_conversions':{ 80 | 'CO2_Fossil': 2.72916487e-10, 81 | 'CH4': 2.50000000e-08, 82 | 'N2O': 6.36480131e-07}} 83 | 84 | 85 | if os.path.exists(f"facts_conf_{currentDay}-{currentMonth}-{currentYear}.yaml"): 86 | files = os.listdir('.') 87 | 88 | def find_filenum(f): 89 | num = re.findall(f"facts_conf_{currentDay}-{currentMonth}-{currentYear}_(\d+).yaml",f) 90 | return int(num[0]) if num else 1 91 | 92 | i = max(map(find_filenum, files)) + 1 93 | 94 | config_file = f"facts_conf_{currentDay}-{currentMonth}-{currentYear}_{i}.yaml" 95 | else: 96 | config_file = f"facts_conf_{currentDay}-{currentMonth}-{currentYear}.yaml" 97 | 98 | 99 | with open(config_file, 'w') as outfile: 100 | yaml.dump(conf_base, outfile, default_flow_style=False) 101 | 102 | print(f"Saved config to {config_file}") 103 | print(f"To run SC-GHGs, use the command") 104 | print(f"python command_line_scghg.py {config_file}") -------------------------------------------------------------------------------- /scripts/facts.runs/prepare_facts.py: -------------------------------------------------------------------------------- 1 | import os 2 | import xarray as xr 3 | import numpy as np 4 | import argparse 5 | from datetime import datetime 6 | from itertools import product 7 | from pathlib import Path 8 | import shutil 9 | import argparse 10 | 11 | # Create argument parser 12 | parser = argparse.ArgumentParser(description='Process two lists.') 13 | 14 | # Add named arguments for the lists 15 | parser.add_argument('--facts_repo', nargs=1, help = 'Path to the FACTS repo') 16 | parser.add_argument('--dscim_repo', nargs=1, help = 'Path to the FACTS repo') 17 | parser.add_argument('--pulse_years', nargs='*', help='List of pulse years') 18 | parser.add_argument('--gases', nargs='*', help='List of gases') 19 | 20 | 21 | # Parse the command line arguments 22 | args = parser.parse_args() 23 | 24 | # Access the lists using the argument names 25 | pulse_years = args.pulse_years 26 | gases = args.gases 27 | facts_dir = Path(args.facts_repo[0]) 28 | dscim_dir = Path(args.dscim_repo[0]) 29 | print("Facts_dir:", facts_dir) 30 | print("pulse_years:", pulse_years) 31 | print("gases:", gases) 32 | 33 | scenario = 'ssp245' 34 | nsamps = 10000 35 | proj_years = np.arange(1750, 2501) 36 | 37 | 38 | for pulse_year, gas in list(product(pulse_years,gases)) + [('control','control'),]: 39 | template_dir = Path(os.getcwd()) / 'template' 40 | 41 | # FACTS does not accept underscores in experiment names 42 | gas_exp = gas.replace('_','.') 43 | 44 | run_dir = facts_dir / f'rff.{pulse_year}.{gas_exp}' 45 | input_dir = run_dir / "input" 46 | os.makedirs(run_dir, exist_ok = True) 47 | os.makedirs(input_dir, exist_ok = True) 48 | files = ['config.yml','location.lst','workflows.yml'] 49 | for i in files: 50 | shutil.copyfile(template_dir / i, run_dir / i) 51 | 52 | attrs = {"Source": "RFF", 53 | "Date Created": str(datetime.now()), 54 | "Description": ( 55 | " Simulations based on parameters developed here: https://github.com/chrisroadmap/ar6/tree/main/notebooks." 56 | " Parameters obtained from: https://zenodo.org/record/5513022#.YVW1HZpByUk."), 57 | "Scenario": scenario, 58 | "Method": ( 59 | "Temperature and ocean heat content were returned from fair.foward.fair_scm() in emission-driven mode."), 60 | "Note": "Code provided by Kelly McCusker of Rhodium Group Climate Impact Lab and adapted for use in FACTS." 61 | } 62 | 63 | temp_file = xr.open_dataset(dscim_dir/'scripts'/'input'/'climate'/'gmst_pulse.nc4') 64 | ohc_file = xr.open_dataset(dscim_dir/'scripts'/'input'/'climate'/'ohc_pulse.nc4') 65 | temp_file.close() 66 | ohc_file.close() 67 | if pulse_year == 'control': 68 | tempds = (temp_file 69 | .rename(control_temperature = 'surface_temperature',year = 'years',runid = 'samples') 70 | .assign_coords(locations = -1) 71 | .expand_dims("locations") 72 | ).surface_temperature.to_dataset() 73 | 74 | ohcds = (ohc_file 75 | .rename(control_ocean_heat_content = 'ocean_heat_content',year = 'years',runid = 'samples') 76 | .assign_coords(locations = -1) 77 | .expand_dims("locations") 78 | ).ocean_heat_content.to_dataset() 79 | else: 80 | tempds = (temp_file 81 | .rename(pulse_temperature = 'surface_temperature',year = 'years',runid = 'samples') 82 | .sel(pulse_year = int(pulse_year), gas = gas, drop = True) 83 | .assign_coords(locations = -1) 84 | .expand_dims("locations") 85 | ).surface_temperature.to_dataset() 86 | 87 | ohcds = (ohc_file 88 | .rename(pulse_ocean_heat_content = 'ocean_heat_content',year = 'years',runid = 'samples') 89 | .sel(pulse_year = int(pulse_year), gas = gas, drop = True) 90 | .assign_coords(locations = -1) 91 | .expand_dims("locations") 92 | ).ocean_heat_content.to_dataset() 93 | 94 | temps = tempds.surface_temperature.values 95 | ohcs = ohcds.ocean_heat_content.values 96 | # Write the datasets to netCDF 97 | tempds.to_netcdf(input_dir / "gsat.nc4", encoding={"surface_temperature": {"dtype": "float64", "zlib": True, "complevel":4}}) 98 | ohcds.to_netcdf(input_dir / "ohc.nc4", encoding={"ocean_heat_content": {"dtype": "float64", "zlib": True, "complevel":4}}) 99 | 100 | # create a single netCDF file that is compatible with modules expecting parameters organized in a certain fashion 101 | pooledds = xr.Dataset({"surface_temperature": (("years","samples"), temps[0,::,::].transpose(), {"units":"degC"}), 102 | "ocean_heat_content": (("years","samples"), ohcs[0,::,::].transpose(), {"units":"J"})}, 103 | coords={"years": proj_years, "samples": np.arange(nsamps)+1}, attrs=attrs) 104 | pooledds.to_netcdf(input_dir / "climate.nc4", mode = 'w', group = scenario, engine = 'netcdf4', format = 'NETCDF4', encoding={"ocean_heat_content": {"dtype": "float64", "zlib": True, "complevel":4}, 105 | "surface_temperature": {"dtype": "float64", "zlib": True, "complevel":4}}) 106 | yearsds = xr.Dataset({"year": proj_years}) 107 | yearsds.to_netcdf(input_dir / "climate.nc4", engine = 'netcdf4', format = 'NETCDF4', mode='a') 108 | 109 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DSCIM: The Data-driven Spatial Climate Impact Model 2 | 3 | This repository is an implementation of DSCIM, referred to as DSCIM-FACTS-EPA, that implements the SC-GHG specification for the U.S. Environmental Protection Agency’s (EPA) September 2022 draft technical report, "Report on the Social Cost of Greenhouse Gases: Estimates Incorporating Recent Scientific Advances", and includes the option to input exogenous global mean surface temperature (GMST) and global mean sea level (GMSL) trajectories. DSCIM-FACTS-EPA currently provides instructions for installing and running the Framework for Assessing Changes To Sea-level ([FACTS](https://github.com/radical-collaboration/facts)) to obtain GMSL from GMST. 4 | 5 | This Python library enables the calculation of sector-specific partial social cost of greenhouse gases (SC-GHG) and SC-GHGs that are combined across sectors. The main purpose of this library is to parse the monetized spatial damages from different sectors and integrate them into SC-GHGs for different discount levels, pulse years, and greenhouse gases. 6 | 7 | ## Run cases 8 | 9 | By default, DSCIM-FACTS-EPA can run SC-GHGs for carbon dioxide, methane, and nitrous oxide for pulse years 2020-2080 in 10 year increments for the Resources for the Future (RFF) emissions scenarios. For alternative gases or pulse years the user will need to provide new GMST and GMSL trajectories. The user can provide these trajectories directly, or can use the DSCIM-FACTS-EPA FACTS runner to generate GMSL from ocean heat content (OHC) and GMST. The intended use cases of this repository are thus: 10 | 11 | 1. The user wants to generate the Climate Impact Lab (CIL) RFF SC-GHGs themselves. 12 | 2. The user has GMST and GMSL files following the guidelines below and wants to use the CIL damage functions to generate SC-GHGs based on those files. 13 | 3. The user has GMST and OHC files following the guidelines below (usually directly from a simple climate model, such as FaIR) and wants to generate GMSL files from FACTS. 14 | 15 | ```mermaid 16 | flowchart LR 17 | 18 | A[1. Default] --> D(Setup) 19 | D --> |1.| E{Running SC-GHGs} 20 | 21 | B[2. GMST/GMSL] --> D(Setup) 22 | D --> |2.| F(Formatting GMST/GMSL files) 23 | F --> |2.| H(Creating a run config) 24 | H --> E{Running SC-GHGs} 25 | 26 | C[3. GMST/OHC] --> D(Setup) 27 | D --> |3.| F(Formatting GMST/GMSL files) 28 | F --> |3.| G(Running FACTS) 29 | G --> E{Running SC-GHGs} 30 | ``` 31 | 32 | 33 | ## Setup 34 | 35 | To begin, we assume you have a system with `conda` available from the command line, and some familiarity with it. A conda distribution is available from [miniconda](https://docs.conda.io/en/latest/miniconda.html), [Anaconda](https://www.anaconda.com/), or [mamba](https://mamba.readthedocs.io/en/latest/). This helps to ensure that required software packages are correctly compiled and installed, replicating the analysis environment. If you are using conda, we recommend following [this](https://www.anaconda.com/blog/a-faster-conda-for-a-growing-community) guide to speed up environment solve time. 36 | 37 | Begin in the `dscim-facts-epa` project directory, which can be downloaded and unzipped, or cloned with `git` in a terminal. For example: 38 | 39 | ```bash 40 | git clone https://github.com/ClimateImpactLab/dscim-facts-epa.git 41 | ``` 42 | 43 | Next, from within the root directory of `dscim-facts-epa`, set up a conda environment for this analysis. This replicates the software environment used for analysis. With `conda` from the command line this is 44 | 45 | ```bash 46 | conda env create -f environment.yml 47 | ``` 48 | 49 | and then activate the environment with 50 | 51 | ```bash 52 | conda activate dscim-facts-epa 53 | ``` 54 | 55 | Be sure that all commands and analyses are run from this conda environment. 56 | 57 | With the environment set up and active, the next step is downloading the required DSCIM-FACTS-EPA input data into the local directory. Assuming you are in the `dscim-facts-epa/scripts` directory, from the command line run: 58 | 59 | ```bash 60 | python directory_setup.py 61 | ``` 62 | 63 | Note that this will download several gigabytes of data and may take several minutes, depending on your connection speed. 64 | 65 | ## Formatting files 66 | 67 | To ensure that both `FACTS` and `dscim-facts-epa` can read new GMST, GMSL, and OHC files, a strict format must be adopted. 68 | 1. We require that there be a control and pulse version of the appropriate variable: 69 | - For GMST, these are `control_temperature` and `pulse_temperature` 70 | - For GMSL, these are `control_gmsl` and `pulse_gmsl` 71 | - For OHC, these are `control_ocean_heat_content` and `pulse_ocean_heat_content` 72 | 2. Any combination of gases and pulse years can be supplied. SC-GHGs will then be runnable for those gases and pulse years. 73 | 3. We expect `year` to be at minimum from 2000-2300. Climate inputs are automatically made relative to 2001-2010 in `dscim-facts-epa` to be consistent with the damage functions. 74 | 4. The `runid` dimension corresponds to the FaIR parameters and RFF-SPs crosswalk specified for EPA's September 2022 draft technical report, "Report on the Social Cost of Greenhouse Gases: Estimates Incorporating Recent Scientific Advances". Thus, each runid is associated with an RFF-SP index and a climate parameter index. We expect 10000 `runids` from 1 to 10000 75 | 76 | ### GMST 77 | ![gmst_pulse_720](https://github.com/ClimateImpactLab/dscim-facts-epa/assets/5862128/9631c307-6cb0-417f-9e1c-4835d5293c05) 78 | 79 | ### GMSL 80 | ![gmsl_pulse_720](https://github.com/ClimateImpactLab/dscim-facts-epa/assets/5862128/6335e4ae-0be2-4370-b001-75767c817197) 81 | 82 | ### OHC 83 | ![ohc_pulse_720](https://github.com/ClimateImpactLab/dscim-facts-epa/assets/5862128/f980274b-bc85-45fd-a7af-8b93003a919f) 84 | 85 | ## Creating a `dscim-facts-epa` run config 86 | 87 | If you already have alternative GMSL and GMST files, it is recommended to run them through the `create_config.py` script to ensure that the files are formatted correctly. In addition, this script will generate a config that will allow you to directly begin running `dscim-facts-epa` using the user-specified GMST and GMSL inputs, gases, and pulse_years. To run this script, you will need to specify your correctly formatted gmst and gmsl files: 88 | 89 | ```bash 90 | python create_config.py --gmst_file GMST_filename.nc4 --gmsl_file GMSL_filename.nc4 --pulse_years pulseyear1 pulseyear2 ... --gases gas1 gas2 ... 91 | ``` 92 | 93 | Description of arguments: 94 | - `--gmst_file`: The name of your GMST file placed in `dscim-facts-epa/scripts/input/climate` 95 | - `--gmsl_file`: The name of your GMSL file placed in `dscim-facts-epa/scripts/input/climate` 96 | - `--pulse_years` (optional -- default: 2020): Space delimited pulse years. Pulse years must be included in the coordinates of your gmst/gmsl files 97 | - `--gases` (optional -- default: "CO2_Fossil"): Space delimited gases. Gases must be included in the coordinates of your gmst/gmsl files 98 | 99 | Once this config is created, you can proceed to the **Running SC-GHGs** step. 100 | 101 | ## Running FACTS 102 | 103 | If you will be running FACTS, ensure you have followed the **Formatting GMST/GMSL files** section above. 104 | 105 | We recommend installing or cloning FACTS v1.1.1 found [here](https://github.com/radical-collaboration/facts/releases/tag/v1.1.1). To get started with FACTS, follow the [FACTS quick start instructions](https://fact-sealevel.readthedocs.io/en/latest/quickstart.html). If you are running on a Linux machine (quickstart Section 1.1), proceed to the **Not Docker** section below. If you are running in a Container (quickstart Section 1.2), proceed to the **Docker** section below. We recommend reading these sections before following the FACTS quickstart. Note that to run `facts` for DSCIM-FACTS-EPA, you will *not* need to set up the `emulandice` module in facts. 106 | 107 | ### Docker 108 | 109 | Once you have reached step 3 of section 1.2 in the FACTS quickstart, come back here and use the following `docker` command: 110 | 111 | ```bash 112 | docker run -it --volume=$HOME/facts:/opt/facts --volume=$HOME/dscim-facts-epa:/opt/dscim-facts-epa -w /opt/dscim-facts-epa/scripts/facts.runs facts 113 | ``` 114 | Replace `$HOME/dscim-facts-epa` and `$HOME/facts` with the path to your cloned or downloaded `dscim-facts-epa` repository and facts repository, respectively. This command will start the container, mounting the `dscim-facts-epa` directory and the `facts` directory. Once the container is running, your working directory will be `/opt/dscim-facts-epa/scripts/facts.runs`. 115 | 116 | Now proceed to the **Running the bash script** step. 117 | 118 | ### Not Docker 119 | 120 | To run FACTS outside of a docker, the user can use the `dscim-facts-epa` environment installed above. Activate the environment by typing `conda activate dscim-facts-epa` and install an additional python package: 121 | 122 | ```bash 123 | pip install radical.entk==1.41.0 124 | ``` 125 | 126 | and proceed to the next section. 127 | 128 | ### Running the bash script 129 | 130 | The user must now make modifications to the `scripts/facts.runs/facts_runs.sh` script to ensure all files are found and run specifications are set. Those changes are: 131 | - on line 6 of the script, change `pulse_years` to the desired pulse years to be run by FACTS 132 | - on line 7, change `gas` to the desired gases to be run by FACTS 133 | 134 | If not running in the Docker Container: 135 | - on line 8, change `facts_dir` to where you have cloned your FACTS repository 136 | - on line 9, change `dscim_facts_epa_dir` to where you have cloned this repository 137 | 138 | Assuming you are in the `dscim-facts-epa/scripts/facts.runs` folder run: 139 | 140 | ```bash 141 | bash facts_runs.sh 142 | ``` 143 | 144 | Running FACTS is a relatively memory-intensive and disk-space-intensive process. To successfully run FACTS, you will need a moderately powerful workstation (or server) with at least 32 gigabytes of computer RAM. By default, FACTS uses two CPU cores and is not particularly sensitive to clock speed or number of CPU cores. In addition, FACTS currently requires around 30 gigabytes of disk space per pulse year-gas, which means that 3 gases and 7 pulse years (a total of 22 runs including the control) will require approximately 660 gigabytes of disk space. Alternatively, one can run subsets of runs at a time and clear memory in between. To clear memory after a run has been completed, remove the subdirectories in the `~/radical.pilot.sandbox` folder. 145 | 146 | Note that the more pulse year and gas dimensions your input climate files have, the longer this run will take as pulse year-gas combinations are run in sequence. On a fast machine, each combination can take approximately 10 minutes, meaning that for a run of 3 gases for 7 pulse years, the run will take 220 minutes. The run script will create the appropriate number of FACTS "experiments" (22 in the example case), run through them, and concatenate the outputs into the format expected by `dscim-facts-epa`. 147 | 148 | If a docker was used, exit it once the run is complete using the `exit` command. 149 | 150 | ## Running SC-GHGs 151 | 152 | After setting up the dscim-facts-epa environment and input data, activate the environment by typing `conda activate dscim-facts-epa`. You can run SC-GHG calculations under different conditions with or without a config file. 153 | 154 | Assuming you are in the `dscim-facts-epa/scripts` folder, if you want to run the cil-spec SC-GHGs, you can run: 155 | ```bash 156 | python command_line_scghg.py 157 | ``` 158 | 159 | Alternatively, if you have run FACTS, or are using a gmsl file of your own, you can run: 160 | ```bash 161 | python command_line_scghg.py name_of_config.yml 162 | ``` 163 | 164 | and follow the on-screen prompts. When the selector is a carrot, you may only select one option. Use the arrow keys on your keyboard to highlight your desired option and click enter to submit. When you are presented with `X` and `o` selectors, you may use the spacebar to select (`X`) or deselect (`o`) then click enter to submit once you have chosen your desired number of parameters. Once you have completed all of the options, the DSCIM run will begin. 165 | 166 | ### Command line options 167 | 168 | Below is a short summary of what each command line option does. To view a more detailed description of what the run parameters do, see the [Documentation](https://impactlab.org/research/dscim-user-manual-version-092023-epa) for Data-driven Spatial Climate Impact Model (DSCIM). 169 | 170 | #### Sector 171 | 172 | The user may only select one sector per run. Sectors represent the combined SC-GHG or partial SC-GHGs of the chosen sector. 173 | 174 | #### Discount rate 175 | 176 | These runs use endogenous Ramsey discounting that are targeted to begin at the chosen near-term discount rate(s). 177 | 178 | #### Pulse years 179 | 180 | Pulse year represents the SC-GHG for a pulse of greenhouse gas (GHG) emitted in the chosen pulse year(s). 181 | 182 | #### Domain of damages 183 | 184 | The default is a global SC-GHG accounting for global damages in response to a pulse of GHG. The user has the option to instead limit damages to those occurring directly within the territorial United States. This is only a partial accounting of the cost of climate change to U.S. citizens and residents because it excludes international transmission mechanisms, like trade, cross-border investment and migration, damage to the assets of U.S. citizens and residents outside the United States, or consideration of how GHG emission reduction activity within the United States impacts emissions in other countries. 185 | 186 | #### Optional files 187 | 188 | By default, the script will produce the expected SC-GHGs as a `.csv`. The user also has the option to save the full distribution of 10,000 SC-GHGs -- across emissions, socioeconomics, and climate uncertainty -- as a `.csv`, and the option to save global consumption net of baseline climate damages ("global_consumption_no_pulse") as a netcdf `.nc4` file. 189 | 190 | ## Further Information 191 | 192 | #### Input Files 193 | These files are installed during the above Setup process and take up 4.65 GB of disk space. 194 | 195 | Climate 196 | - Global mean surface temperature (GMST) trajectories output from FaIR: gmst_pulse.nc 197 | - Global mean sea level (GMSL) trajectories derived from FaIR GMST: gmsl_pulse.zarr 198 | - Conversion factors to convert SC-GHGs to $/tonne of GHG: conversion_v5.03_Feb072022.nc4 199 | 200 | Econ 201 | - RFF USA aggregated GDP and population trajectories: rff_USA_socioeconomics.nc4 202 | - RFF global aggregated GDP and population trajectories: rff_global_socioeconomics.nc4 203 | 204 | Damage Functions 205 | - Files containing a set of damage function coefficients for each RFF draw for each economic sector and valuation choice. 206 | - RFF damage function emulator weights: damage_function_weights.nc4 207 | 208 | -------------------------------------------------------------------------------- /scripts/command_line_scghg.py: -------------------------------------------------------------------------------- 1 | import xarray as xr 2 | import dscim 3 | import yaml 4 | from dscim.menu.simple_storage import Climate, EconVars 5 | import pandas as pd 6 | import numpy as np 7 | from itertools import product 8 | from pathlib import Path 9 | import inquirer 10 | from pyfiglet import Figlet 11 | from pathlib import Path 12 | import os 13 | import re 14 | import subprocess 15 | from datetime import date 16 | import sys 17 | 18 | # EDIT this line upon each release of dscim-facts-epa 19 | VERSION = "0.1.0" 20 | 21 | args = sys.argv 22 | if len(args) == 1: 23 | conf_name = "generated_conf.yml" 24 | else: 25 | conf_name = args[1] 26 | 27 | 28 | 29 | master = Path(os.getcwd()) / conf_name 30 | 31 | # For configs that are generated in the FACTS docker, file structure is relative to the docker paths. 32 | # This function serves to ensure that configs generated in the FACTS docker have their filepaths 33 | # converted to paths that are relative to this script 34 | def read_replace_conf(master): 35 | try: 36 | with open(master, "r") as stream: 37 | docker_replace = stream.read().replace('/opt/dscim-facts-epa', 38 | str(Path(os.getcwd()).parent.absolute())) 39 | conf = yaml.safe_load(docker_replace) 40 | except FileNotFoundError: 41 | raise FileNotFoundError("Please run directory_setup.py or place the config in your current working directory") 42 | return(conf) 43 | 44 | conf = read_replace_conf(master) 45 | 46 | coastal_v = str(conf["coastal_version"]) 47 | mortality_v = str(conf["mortality_version"]) 48 | CAMEL_v = f"CAMEL_m{mortality_v}_c{coastal_v}" 49 | 50 | discount_conversion_dict = {'1.016010255_9.149608e-05': '1.5% Ramsey', 51 | '1.244459066_0.00197263997': '2.0% Ramsey', 52 | '1.421158116_0.00461878399': '2.5% Ramsey'} 53 | gas_conversion_dict = {'CO2_Fossil':'CO2', 54 | 'N2O':'N2O', 55 | 'CH4':'CH4'} 56 | 57 | for gas in conf['gas_conversions'].keys(): 58 | if gas not in gas_conversion_dict.keys(): 59 | gas_conversion_dict[gas] = gas 60 | 61 | def makedir(path): 62 | if not os.path.exists(path): 63 | os.makedirs(path) 64 | 65 | 66 | def generate_meta(menu_item): 67 | # find machine name 68 | machine_name = os.getenv("HOSTNAME") 69 | if machine_name is None: 70 | try: 71 | machine_name = os.uname()[1] 72 | except AttributeError: 73 | machine_name = "unknown" 74 | 75 | # find git commit hash 76 | try: 77 | gitlabel = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).decode('ascii').strip() 78 | except subprocess.CalledProcessError: 79 | gitlabel = "unknown" 80 | 81 | meta = {"Author": "Climate Impact Lab", 82 | "Date Created": date.today().strftime("%d/%m/%Y"), 83 | "Units": "2020 PPP-adjusted USD"} 84 | 85 | for attr_dict in [ 86 | vars(menu_item), 87 | vars(vars(menu_item)["climate"]), 88 | vars(vars(menu_item)["econ_vars"]), 89 | ]: 90 | meta.update( 91 | { 92 | k: v 93 | for k, v in attr_dict.items() 94 | if (type(v) not in [xr.DataArray, xr.Dataset, pd.DataFrame]) 95 | and k not in ["damage_function", "logger"] 96 | } 97 | ) 98 | 99 | # update with git hash and machine name 100 | meta.update(dict(machine=machine_name, commit=gitlabel,url=f"https://github.com/ClimateImpactLab/dscim-epa/commit/{gitlabel}")) 101 | 102 | # convert to strs 103 | meta = {k: v if type(v) in [int, float] else str(v) for k, v in meta.items()} 104 | 105 | 106 | # exclude irrelevant attrs 107 | irrelevant_keys = ['econ_vars', 108 | 'climate', 109 | 'subset_dict', 110 | 'filename_suffix', 111 | 'ext_subset_start_year', 112 | 'ext_subset_end_year', 113 | 'ext_end_year', 114 | 'ext_method', 115 | 'clip_gmsl', 116 | 'scenario_dimensions', 117 | 'scc_quantiles', 118 | 'quantreg_quantiles', 119 | 'quantreg_weights', 120 | 'full_uncertainty_quantiles', 121 | 'extrap_formula', 122 | 'fair_dims', 123 | 'sector_path', 124 | 'save_files', 125 | 'save_path', 126 | 'delta', 127 | 'histclim', 128 | 'ce_path', 129 | 'gmst_path', 130 | 'gmsl_path'] 131 | for k in irrelevant_keys: 132 | if k in meta.keys(): 133 | del meta[k] 134 | 135 | # adjust attrs 136 | meta['emission_scenarios'] = 'RFF-SPv2' 137 | meta['damagefunc_base_period'] = meta.pop('base_period') 138 | meta['socioeconomics_path'] = meta.pop('path') 139 | meta['gases'] = meta['gases'].split("'") 140 | meta['gases'] = [e for e in meta['gases'] if e not in (', ','[',']')] 141 | meta['gases'] = [gas_conversion_dict[gas] for gas in meta['gases']] 142 | 143 | if meta['sector']=='CAMEL_m1_c0.20': 144 | meta['sector'] = 'combined' 145 | else: 146 | meta['sector'] = re.split("_",meta['sector'])[0] 147 | 148 | if terr_us: 149 | meta.update(discounting_socioeconomics_path = f"{conf['rffdata']['socioec_output']}/rff_global_socioeconomics.nc4") 150 | 151 | return meta 152 | 153 | 154 | # Merge attrs 155 | def merge_meta(attrs,meta): 156 | if len(attrs)==0: 157 | attrs.update(meta) 158 | else: 159 | for meta_keys in attrs.keys(): 160 | if str(meta[meta_keys]) not in str(attrs[meta_keys]): 161 | if type(attrs[meta_keys])!=list: 162 | update = [attrs[meta_keys]] 163 | update.append(meta[meta_keys]) 164 | attrs[meta_keys] = update 165 | else: 166 | attrs[meta_keys].append(meta[meta_keys]) 167 | return attrs 168 | ################################################################################ 169 | 170 | # Function for one run of SCGHGs 171 | def epa_scghg(sector = "CAMEL_m1_c0.20", 172 | terr_us = False, 173 | eta = 2.0, 174 | rho = 0.0, 175 | pulse_year = 2020, 176 | discount_type = "euler_ramsey", 177 | menu_option = "risk_aversion", 178 | conf = conf): 179 | 180 | if menu_option != "risk_aversion": 181 | raise Exception("DSCIM-EPA provides only 'risk_aversion' SCGHGs") 182 | 183 | # Manually add other config parameters that are not meant to change run to run 184 | conf["global_parameters"] = {'fair_aggregation': ["uncollapsed"], 185 | 'subset_dict': {'ssp': []}, 186 | 'weitzman_parameter': [0.5], 187 | 'save_files': []} 188 | 189 | # there can be a molecular conversion bundled in with the conversion factor, so it isn't always just tonnes 190 | # for example with carbon vs carbon dioxide conversion 191 | gas_conversion_factors = { 192 | "coords": { 193 | "gas": { "dims":"gas", "data":list(conf['gas_conversions'].keys()), "attrs": {"units": "tonnes"}} 194 | }, 195 | "dims":"gas", 196 | "data":list(conf['gas_conversions'].values())} 197 | 198 | # This class allows us to bypass reading in the pulse conversion netcdf file 199 | # and instead set up pulse conversions from a user config file. 200 | class Climate2(Climate): 201 | @property 202 | def conversion(self): 203 | """Conversion factors to turn the pulse units 204 | into the appropriate units for an SCC calculation""" 205 | 206 | conversion = xr.DataArray.from_dict(gas_conversion_factors) 207 | return conversion 208 | 209 | # Read in U.S. and global socioeconomic files 210 | if terr_us: 211 | econ_terr_us = EconVars( 212 | path_econ=f"{conf['rffdata']['socioec_output']}/rff_USA_socioeconomics.nc4" 213 | ) 214 | # List of kwargs to add to kwargs read in from the config file for direct territorial U.S. damages 215 | add_kwargs = { 216 | "econ_vars": econ_terr_us, 217 | "climate_vars": Climate2(**conf["rff_climate"], pulse_year=pulse_year), 218 | "formula": conf["sectors"][sector if not terr_us else sector[:-4]]["formula"], 219 | "discounting_type": discount_type, 220 | "sector": sector, 221 | "ce_path": None, 222 | "save_path": None, 223 | "eta": eta, 224 | "rho": rho, 225 | "damage_function_path": Path(conf['paths']['rff_damage_function_library']) / sector, 226 | "ecs_mask_path": None, 227 | "ecs_mask_name": None, 228 | "fair_dims":[], 229 | } 230 | 231 | # An extra set of kwargs is needed when running U.S. SCGHGs 232 | # Combine config kwargs with the add_kwargs for direct territorial U.S. damages 233 | kwargs_terr_us = conf["global_parameters"].copy() 234 | for k, v in add_kwargs.items(): 235 | assert ( 236 | k not in kwargs_terr_us.keys() 237 | ), f"{k} already set in config. Please check `global_parameters`." 238 | kwargs_terr_us.update({k: v}) 239 | 240 | 241 | econ_glob = EconVars( 242 | path_econ=f"{conf['rffdata']['socioec_output']}/rff_global_socioeconomics.nc4" 243 | ) 244 | 245 | # This class allows for a shorter naming convention for the damage function files (rounding etas and rhos in the filename) 246 | class RiskAversionRecipe(dscim.menu.risk_aversion.RiskAversionRecipe): 247 | @property 248 | def damage_function_coefficients(self) -> xr.Dataset: 249 | """ 250 | Load damage function coefficients if the coefficients are provided by the user. 251 | Otherwise, compute them. 252 | """ 253 | if self.damage_function_path is not None: 254 | return xr.open_dataset( 255 | f"{self.damage_function_path}/{self.NAME}_{self.discounting_type}_eta{round(self.eta,3)}_rho{round(self.rho,3)}_dfc.nc4" 256 | ) 257 | else: 258 | return self.damage_function["params"] 259 | 260 | # List of kwargs to add to kwargs read in from the config file for global discounting and damages 261 | add_kwargs = { 262 | "econ_vars": econ_glob, 263 | "climate_vars": Climate2(**conf["rff_climate"], pulse_year=pulse_year), 264 | "formula": conf["sectors"][sector if not terr_us else sector[:-4]]["formula"], 265 | "discounting_type": discount_type, 266 | "sector": sector, 267 | "ce_path": None, 268 | "save_path": None, 269 | "eta": eta, 270 | "rho": rho, 271 | "damage_function_path": Path(conf['paths']['rff_damage_function_library']) / [sector if not terr_us else sector[:-4]][0], 272 | "ecs_mask_path": None, 273 | "ecs_mask_name": None, 274 | "fair_dims":[], 275 | } 276 | 277 | # Combine config kwargs with the add_kwargs for global discounting and damages 278 | kwargs_global = conf["global_parameters"].copy() 279 | for k, v in add_kwargs.items(): 280 | assert ( 281 | k not in kwargs_global.keys() 282 | ), f"{k} already set in config. Please check `global_parameters`." 283 | kwargs_global.update({k: v}) 284 | 285 | # For both territorial U.S. and global SCGHGs, endogenous Ramsey discounting based on global socioeconomics is used 286 | menu_item_global = RiskAversionRecipe(**kwargs_global) 287 | df = menu_item_global.uncollapsed_discount_factors 288 | 289 | # Compute damages for global or U.S. runs 290 | if terr_us: 291 | menu_item_terr_us = RiskAversionRecipe(**kwargs_terr_us) 292 | md = menu_item_terr_us.uncollapsed_marginal_damages 293 | else: 294 | md = menu_item_global.uncollapsed_marginal_damages 295 | 296 | # The 113.648/112.29 deflates the SCGHGs from 2019 dollars to 2020 dollars 297 | conv_2019to2020 = 113.648/112.29 298 | 299 | # Compute SCGHGs 300 | # Multiplying marginal damages by discount factors and summing across years creates the SCGHGs 301 | scghgs = ( 302 | (md.rename(marginal_damages = 'scghg') * df.rename(discount_factor = 'scghg')) 303 | .sum("year")* conv_2019to2020 304 | ) 305 | 306 | # Code to calculate epa-spec adjustment factors 307 | gcnp = menu_item_global.global_consumption_no_pulse.rename('gcnp') 308 | 309 | # Isolate population from socioeconomics 310 | pop = xr.open_dataset(f"{conf['rffdata']['socioec_output']}/rff_global_socioeconomics.nc4").sel(region = 'world', drop = True).pop 311 | 312 | # Calculate global consumption no pulse per population 313 | a = xr.merge([pop, gcnp]) 314 | ypv = a.gcnp/a.pop 315 | 316 | # Create adjustment factor using adjustment.factor = (ypc^-eta)/mean(ypc^-eta) 317 | c = np.power(ypv, -eta).sel(year = pulse_year, drop = True) 318 | adj = (c/c.mean()).rename('adjustment_factor') 319 | 320 | # Merge adjustments with uncollapsed scghgs 321 | adjustments = xr.merge([scghgs,adj.to_dataset()]) 322 | 323 | # generate attrs 324 | if terr_us: 325 | meta = generate_meta(menu_item_terr_us) 326 | else: 327 | meta = generate_meta(menu_item_global) 328 | 329 | return([adjustments, gcnp* conv_2019to2020, meta]) 330 | 331 | # Function to perform multiple runs of SCGHGs and combine into one file to save out 332 | def epa_scghgs(sectors, 333 | terr_us, 334 | etas_rhos, 335 | risk_combos = (('risk_aversion', 'euler_ramsey')), 336 | pulse_years = (2020,2030,2040,2050,2060,2070,2080), 337 | gcnp = False, 338 | uncollapsed = False, 339 | conf = conf): 340 | attrs={} 341 | 342 | # Nested for loops to run each combination of SCGHGs requested 343 | # Each run of the outer loop saves one set of SCGHGs 344 | # The inner loop combines all SCGHG runs for that file 345 | for j, pulse_year in product(risk_combos, pulse_years): 346 | # These arrays will be populated with data arrays to be combined 347 | all_arrays_uscghg = [] 348 | all_arrays_gcnp = [] 349 | 350 | discount_type= j[1] 351 | menu_option = j[0] 352 | for i, sector in product(etas_rhos, sectors): 353 | 354 | if re.split("_",sector)[0]=="CAMEL": 355 | sector_short = "combined" 356 | else: 357 | sector_short = re.split("_",sector)[0] 358 | 359 | eta = i[0] 360 | rho = i[1] 361 | 362 | print(f"Calculating {'territorial U.S.' if terr_us else 'global'} {sector_short} scghgs {'and gcnp' if gcnp else ''} \n discount rate: {discount_conversion_dict[str(eta) + '_' + str(rho)]} \n pulse year: {pulse_year}") 363 | df_single_scghg, df_single_gcnp, meta = epa_scghg(sector = sector, 364 | terr_us = terr_us, 365 | discount_type = discount_type, 366 | menu_option = menu_option, 367 | eta = eta, 368 | rho = rho, 369 | pulse_year = pulse_year, 370 | conf = conf) 371 | 372 | # Creates new coordinates to differentiate between runs 373 | # For SCGHGs 374 | df_scghg = df_single_scghg.assign_coords(discount_rate = discount_conversion_dict[str(eta) + "_" + str(rho)], menu_option = menu_option, sector = sector_short) 375 | df_scghg_expanded = df_scghg.expand_dims(['discount_rate','menu_option', 'sector']) 376 | if 'simulation' in df_scghg_expanded.dims: 377 | df_scghg_expanded = df_scghg_expanded.drop_vars('simulation') 378 | all_arrays_uscghg = all_arrays_uscghg + [df_scghg_expanded] 379 | 380 | # For global consumption no pulse 381 | df_gcnp = df_single_gcnp.assign_coords(discount_rate = discount_conversion_dict[str(eta) + "_" + str(rho)], menu_option = menu_option, sector = sector_short) 382 | df_gcnp_expanded = df_gcnp.expand_dims(['discount_rate','menu_option', 'sector']) 383 | if 'simulation' in df_gcnp_expanded.dims: 384 | df_gcnp_expanded = df_gcnp_expanded.drop_vars('simulation') 385 | all_arrays_gcnp = all_arrays_gcnp + [df_gcnp_expanded] 386 | 387 | attrs = merge_meta(attrs,meta) 388 | 389 | print("Processing...") 390 | df_full_scghg = xr.combine_by_coords(all_arrays_uscghg) 391 | df_full_gcnp = xr.combine_by_coords(all_arrays_gcnp) 392 | 393 | # Changes coordinate names of gases 394 | df_full_scghg = df_full_scghg.assign_coords(gas=[gas_conversion_dict[gas] for gas in df_full_scghg.gas.values]) 395 | 396 | # Splits SCGHGs by gas and saves them out separately 397 | # For uncollapsed SCGHGs 398 | if conf_name != "generated_conf.yml": 399 | conf_savename = re.split('\.', conf_name)[0] + "-" 400 | else: 401 | conf_savename = "" 402 | gases = conf["rff_climate"]["gases"] 403 | if uncollapsed: 404 | for gas in gases: 405 | out_dir = Path(conf['save_path']) / f"{'territorial_us' if terr_us else 'global'}_scghgs" / 'full_distributions' / gas 406 | makedir(out_dir) 407 | uncollapsed_gas_scghgs = df_full_scghg.sel(gas = gas_conversion_dict[gas], drop = True).to_dataframe().reindex() 408 | print(f"Saving {'territorial U.S.' if terr_us else 'global'} uncollapsed {sector_short} sc-{gas} \n pulse year: {pulse_year}") 409 | uncollapsed_gas_scghgs.to_csv(out_dir / f"{conf_savename}sc-{gas}-dscim-{sector_short}-{pulse_year}-n10000.csv") 410 | attrs_save = attrs.copy() 411 | attrs_save['gases'] = gas 412 | with open(out_dir / f"{conf_savename}attributes-{gas}-{sector_short}.txt", 'w') as f: 413 | for key, value in attrs_save.items(): 414 | f.write('%s:%s\n' % (key, value)) 415 | 416 | # Applies the adjustment factor to convert to certainty equivalent SCGHGs 417 | df_full_scghg = (df_full_scghg.adjustment_factor * df_full_scghg.scghg).mean(dim = 'runid') 418 | 419 | # Splits and saves collapsed SCGHGs 420 | for gas in gases: 421 | out_dir = Path(conf['save_path']) / f"{'territorial_us' if terr_us else 'global'}_scghgs" 422 | makedir(out_dir) 423 | collapsed_gas_scghg = df_full_scghg.sel(gas = gas_conversion_dict[gas], drop = True).rename('scghg').to_dataframe().reindex() 424 | print(f"Saving {'territorial U.S.' if terr_us else 'global'} collapsed {sector_short} sc-{gas} \n pulse year: {pulse_year}") 425 | collapsed_gas_scghg.to_csv(out_dir / f"{conf_savename}sc-{gas}-dscim-{sector_short}-{pulse_year}.csv") 426 | 427 | # Creates attribute files 428 | with open(out_dir / f"attributes-{sector_short}.txt", 'w') as f: 429 | for key, value in attrs.items(): 430 | f.write('%s:%s\n' % (key, value)) 431 | 432 | # Saves global consumption no pulse 433 | # Fewer GCNPs are saved because they vary across fewer dimensions than SCGHGs 434 | if gcnp: 435 | out_dir = Path(conf['save_path']) / 'gcnp' 436 | makedir(out_dir) 437 | df_full_gcnp.attrs=attrs 438 | print(f"Saving {sector_short} global consumption no pulse (gcnp)") 439 | df_full_gcnp.to_netcdf(out_dir / f"{conf_savename}gcnp-dscim-{sector_short}.nc4") 440 | print(f"gcnp is available in {str(out_dir)}") 441 | 442 | print(f"{'territorial_us' if terr_us else 'global'}_scghgs are available in {str(Path(conf['save_path']))}/{'territorial_us' if terr_us else 'global'}_scghgs") 443 | 444 | 445 | # Command line interface for DSCIM-epa runs 446 | f = Figlet(font='slant', width=100) 447 | print(f.renderText('DSCIM')) 448 | print(f"... dscim-facts-epa version {VERSION} ...") 449 | 450 | pulse_years = conf["rffdata"]["pulse_years"] 451 | pulse_year_choices = [(str(i), i) for i in pulse_years] 452 | questions = [ 453 | inquirer.List("sector", 454 | message= 'Select sector', 455 | choices= [ 456 | ('Combined',CAMEL_v), 457 | ('Coastal',"coastal_v" + coastal_v), 458 | ('Agriculture','agriculture'), 459 | ('Mortality',"mortality_v" + mortality_v), 460 | ('Energy','energy'), 461 | ('Labor','labor'), 462 | ], 463 | default = [CAMEL_v]), 464 | inquirer.Checkbox("eta_rhos", 465 | message= 'Select discount rates', 466 | choices= [ 467 | ( 468 | '1.5% Ramsey', 469 | [1.016010255, 9.149608e-05] 470 | ), 471 | ( 472 | '2.0% Ramsey', 473 | [1.244459066, 0.00197263997] 474 | ), 475 | ( 476 | '2.5% Ramsey', 477 | [1.421158116, 0.00461878399] 478 | ), 479 | ], 480 | default = [[1.016010255, 9.149608e-05], 481 | [1.244459066, 0.00197263997], 482 | [1.421158116, 0.00461878399]]), 483 | inquirer.Checkbox("pulse_year", 484 | message= 'Select pulse years', 485 | choices= pulse_year_choices, 486 | default = pulse_years), 487 | inquirer.List("U.S.", 488 | message= 'Select valuation type', 489 | choices= [ 490 | ('Global',False), 491 | ('Territorial U.S.',True) 492 | ]), 493 | inquirer.Checkbox("files", 494 | message= 'Optional files to save (will increase runtime substantially)', 495 | choices= [ 496 | ( 497 | 'Global consumption no pulse', 498 | 'gcnp' 499 | ), 500 | ( 501 | 'Uncollapsed scghgs', 502 | 'uncollapsed' 503 | ), 504 | ]) 505 | 506 | ] 507 | 508 | answers = inquirer.prompt(questions) 509 | etas_rhos = answers['eta_rhos'] 510 | sector = [answers['sector']] 511 | pulse_years = answers['pulse_year'] 512 | terr_us = answers['U.S.'] 513 | gcnp = True if 'gcnp' in answers['files'] else False 514 | uncollapsed = True if 'uncollapsed' in answers['files'] else False 515 | 516 | if terr_us: 517 | sector = [i + "_USA" for i in sector] 518 | 519 | if len(etas_rhos) == 0: 520 | raise ValueError('You must select at least one eta, rho combination') 521 | 522 | risk_combos = [['risk_aversion', 'euler_ramsey']] # Default 523 | gases = ['CO2_Fossil', 'CH4', 'N2O'] # Default 524 | epa_scghgs(sector, 525 | terr_us, 526 | etas_rhos, 527 | risk_combos, 528 | pulse_years=pulse_years, 529 | gcnp = gcnp, 530 | uncollapsed = uncollapsed, 531 | conf = conf) 532 | 533 | 534 | print(f"Full results are available in {str(Path(conf['save_path']))}") 535 | 536 | --------------------------------------------------------------------------------