├── runtime.txt ├── requirements.txt ├── Procfile ├── notebooks ├── utils │ ├── cime.py │ ├── __init__.py │ ├── config.py │ ├── utils_units.py │ ├── compare_ts_and_hist.py │ ├── utils.py │ ├── PlotTypeClass.py │ ├── Plotting.py │ └── CaseClass.py ├── run_all.py ├── dashboard.yaml ├── run_notebooks.sh ├── plot_suite_maps_0095_1deg.ipynb ├── diag_metadata.yaml ├── compare_ts_and_hist_003.ipynb ├── plot_suite_maps_0001_003.ipynb ├── plot_suite_maps_0001_004.ipynb ├── trend_maps.003.ipynb ├── trend_maps.004.ipynb ├── plot_suite_003.ipynb ├── plot_suite_004.ipynb ├── compare_ts_and_hist_004.ipynb ├── plot_suite_1deg.ipynb └── gen_csv.ipynb ├── .github └── workflows │ ├── verify_pre-commit.yaml │ └── ci.yaml ├── .gitignore ├── .pre-commit-config.yaml ├── environments └── environment.yaml ├── tests ├── test_utils_units.py ├── xr_ds_ex.py └── test_utils.py ├── data_reshaping ├── pop.h_t13.sh ├── cice.h_t13.sh ├── cice.h1_t13.sh ├── pop.h.nyear1_t13.sh ├── pop.h.nday1_t13.sh └── run_all.py └── README.md /runtime.txt: -------------------------------------------------------------------------------- 1 | python-3.9.2 2 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | hvplot 2 | jupyterlab 3 | fsspec 4 | psutil 5 | aiohttp 6 | git+https://github.com/andersy005/panelify.git 7 | -------------------------------------------------------------------------------- /Procfile: -------------------------------------------------------------------------------- 1 | web: panel serve --address="0.0.0.0" --port=$PORT notebooks/Interactive_Dashboard.ipynb --allow-websocket-origin=hires-cesm-analysis.dokku.projectpythia.org --stats-log-frequency 100000 --mem-log-frequency 100000 2 | -------------------------------------------------------------------------------- /notebooks/utils/cime.py: -------------------------------------------------------------------------------- 1 | """ 2 | methods specific to CIME, but independent of models/components that are run with CIME 3 | """ 4 | 5 | import subprocess 6 | 7 | 8 | def cime_xmlquery(caseroot, varname): 9 | """run CIME's xmlquery for varname in the directory caseroot, return the value""" 10 | return subprocess.check_output( 11 | ["./xmlquery", "--value", varname], cwd=caseroot 12 | ).decode() 13 | -------------------------------------------------------------------------------- /notebooks/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # make methods available for usage externally and in notebooks 2 | 3 | from .CaseClass import CaseClass 4 | from .Plotting import ( 5 | compare_fields_at_lat_lon, 6 | plot_dict_with_date_keys, 7 | summary_plot_global_ts, 8 | summary_plot_histogram, 9 | summary_plot_maps, 10 | trend_plot, 11 | ) 12 | from .utils import ( 13 | gen_output_roots_from_caseroot, 14 | get_varnames_from_metadata_list, 15 | timeseries_and_history_comparison, 16 | generate_plot_catalog, 17 | ) 18 | -------------------------------------------------------------------------------- /.github/workflows/verify_pre-commit.yaml: -------------------------------------------------------------------------------- 1 | name: Run all pre-commit checks one more time 2 | 3 | on: 4 | push: 5 | branches: "*" 6 | pull_request: 7 | branches: master 8 | 9 | jobs: 10 | pre-commit: 11 | name: pre-commit 12 | runs-on: ubuntu-latest 13 | 14 | steps: 15 | - name: checkout 16 | uses: actions/checkout@v2 17 | 18 | - name: set up python 19 | uses: actions/setup-python@v2 20 | with: 21 | python-version: 3.8 22 | 23 | - name: Run pre-commit 24 | uses: pre-commit/action@v2.0.0 25 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | **/.ipynb_checkpoints 2 | **/__pycache__ 3 | notebooks/logs 4 | data_reshaping/logs 5 | 6 | # ignore images directory, except for the image catalog 7 | # complicated, but it works... https://stackoverflow.com/a/16318111 8 | notebooks/images/* 9 | !notebooks/images/g.e22.G1850ECO_JRA_HR.TL319_t13.003 10 | !notebooks/images/g.e22.G1850ECO_JRA_HR.TL319_t13.004 11 | !notebooks/images/g.e22b05.G1850ECOIAF_JRA.TL319_g17.cocco.001 12 | notebooks/images/g.e22.G1850ECO_JRA_HR.TL319_t13.003/* 13 | notebooks/images/g.e22.G1850ECO_JRA_HR.TL319_t13.004/* 14 | notebooks/images/g.e22b05.G1850ECOIAF_JRA.TL319_g17.cocco.001/* 15 | !notebooks/images/*/png_catalog.csv 16 | -------------------------------------------------------------------------------- /notebooks/run_all.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | This script is intended for developers to rerun all dask-free notebooks without 4 | launching JupyterHub or a jupyter lab session. 5 | It relies on the run_notebooks function. 6 | """ 7 | 8 | import os 9 | 10 | # For now, plot_suite and trend_maps don't run with nbconvert 11 | # It may be NCAR_jobqueue related... 12 | notebooks = [] 13 | notebooks.append("Sanity\ Check.ipynb") 14 | notebooks.append("Pull\ info\ from\ logs.ipynb") 15 | notebooks.append(f"compare_ts_and_hist_*.ipynb") 16 | notebooks.append(f"plot_suite_maps_*.ipynb") 17 | 18 | cmd = "./run_notebooks.sh " + " ".join(notebooks) 19 | os.system(cmd) 20 | -------------------------------------------------------------------------------- /notebooks/utils/config.py: -------------------------------------------------------------------------------- 1 | """ 2 | Helper functions to find files in the various directories 3 | """ 4 | 5 | import os 6 | 7 | 8 | ################################################################################ 9 | 10 | 11 | def add_first_date_and_reformat(date_list): 12 | new_list = [] 13 | for date in date_list: 14 | year = int(date[:4]) 15 | month = int(date[4:6]) 16 | day = int(date[6:]) 17 | if len(new_list) == 0: 18 | if day > 1: 19 | first_date = f"{year:04}-{month:02}-{(day-1):02}" 20 | else: 21 | first_date = "first" 22 | new_list.append(first_date) 23 | new_list.append(f"{year:04}-{month:02}-{day:02}") 24 | return new_list 25 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v3.2.0 4 | hooks: 5 | - id: trailing-whitespace 6 | - id: end-of-file-fixer 7 | - id: check-yaml 8 | 9 | - repo: https://github.com/ambv/black 10 | rev: 19.10b0 # consistent with conda environment 11 | hooks: 12 | - id: black 13 | args: [] 14 | 15 | - repo: https://github.com/deathbeds/prenotebook 16 | rev: f5bdb72a400f1a56fe88109936c83aa12cc349fa 17 | hooks: 18 | - id: prenotebook 19 | args: 20 | [ 21 | '--keep-output', 22 | '--keep-metadata', 23 | '--keep-execution-count', 24 | '--keep-empty', 25 | ] 26 | -------------------------------------------------------------------------------- /notebooks/dashboard.yaml: -------------------------------------------------------------------------------- 1 | summary_map: 2 | keys: 3 | - "casename" 4 | - "varname" 5 | - "date" 6 | - "apply_log10" 7 | - "sel_dict" 8 | column_widget_types: 9 | date: "discrete_slider" 10 | 11 | time_series: 12 | keys: 13 | - "casename" 14 | - "varname" 15 | - "time_period" 16 | - "sel_dict" 17 | 18 | histogram: 19 | keys: 20 | - "casename" 21 | - "varname" 22 | - "time_period" 23 | - "apply_log10" 24 | - "sel_dict" 25 | column_widget_types: 26 | time_period: "discrete_slider" 27 | 28 | trend_hist: 29 | keys: 30 | - "casename" 31 | - "varname" 32 | - "time_period" 33 | - "sel_dict" 34 | 35 | trend_map: 36 | keys: 37 | - "casename" 38 | - "varname" 39 | - "time_period" 40 | - "sel_dict" 41 | -------------------------------------------------------------------------------- /.github/workflows/ci.yaml: -------------------------------------------------------------------------------- 1 | name: Continuous Integration 2 | on: 3 | push: 4 | branches: 5 | - "*" 6 | pull_request: 7 | branches: 8 | - "master" 9 | 10 | jobs: 11 | build: 12 | name: Test on ubuntu 13 | runs-on: ubuntu-latest 14 | strategy: 15 | fail-fast: false 16 | steps: 17 | - name: checkout 18 | uses: actions/checkout@v2 19 | 20 | - name: Install conda 21 | uses: conda-incubator/setup-miniconda@v2 22 | with: 23 | auto-update-conda: true 24 | activate-environment: hires-marbl 25 | environment-file: environments/environment.yaml 26 | auto-activate-base: false 27 | 28 | - name: Show conda environment 29 | shell: bash -l {0} 30 | run: conda list 31 | 32 | - name: Run Tests 33 | shell: bash -l {0} 34 | run: pytest -v tests/ 35 | -------------------------------------------------------------------------------- /environments/environment.yaml: -------------------------------------------------------------------------------- 1 | name: hires-marbl 2 | channels: 3 | - conda-forge 4 | - nodefaults 5 | dependencies: 6 | - aiohttp 7 | - bokeh 8 | - bottleneck 9 | - cartopy 10 | - cf-units 11 | - cftime 12 | - cmocean 13 | - dask-jobqueue>=0.7.2 14 | - dask-labextension 15 | - dask-mpi 16 | - dask==2021.7.0 17 | - distributed==2021.7.0 18 | - eofs 19 | - esmpy 20 | - fsspec 21 | - hvplot 22 | - intake 23 | - intake-esm 24 | - ipykernel 25 | - ipywidgets 26 | - jupyter-server-proxy 27 | - jupyterlab>=3 28 | - matplotlib==3.4.2 29 | - metpy 30 | - nc-time-axis 31 | - ncar-jobqueue 32 | - netcdf4 33 | - nodejs 34 | - numba 35 | - numpy 36 | - pandas 37 | - pint 38 | - pip 39 | - pop-tools 40 | - pre-commit 41 | - pytest 42 | - python=3.9 43 | - scipy 44 | - seaborn 45 | - seawater 46 | - statsmodels 47 | - toolz 48 | - tqdm 49 | - watermark 50 | - xarray==0.18.2 51 | - xesmf 52 | - xgcm 53 | - xhistogram 54 | - xrft 55 | - zarr 56 | - pip 57 | - pip: 58 | - -r ../requirements.txt 59 | -------------------------------------------------------------------------------- /notebooks/utils/utils_units.py: -------------------------------------------------------------------------------- 1 | """ 2 | utility functions related to units 3 | """ 4 | 5 | import re 6 | 7 | from pint import UnitRegistry 8 | import xarray as xr 9 | 10 | 11 | def conv_units(da, units_out, units_scalef=None): 12 | """ 13 | return a copy of da, with units converted to units_out 14 | """ 15 | # use apply_ufunc to preserve dask-ness of da 16 | func = lambda values: _conv_units_np( 17 | values, da.attrs["units"], units_out, units_scalef 18 | ) 19 | da_out = xr.apply_ufunc( 20 | func, da, keep_attrs=True, dask="parallelized", output_dtypes=[da.dtype] 21 | ) 22 | da_out.attrs["units"] = units_out 23 | da_out.encoding = da.encoding 24 | return da_out 25 | 26 | 27 | def _clean_units(units): 28 | """replace some troublesome unit terms with acceptable replacements""" 29 | replacements = { 30 | "kgC": "kg", 31 | "gC": "g", 32 | "gC13": "g", 33 | "gC14": "g", 34 | "gN": "g", 35 | "unitless": "1", 36 | "years": "common_years", 37 | "yr": "common_year", 38 | "meq": "mmol", 39 | "neq": "nmol", 40 | } 41 | units_split = re.split(r"( |\(|\)|\^|\*|/|-[0-9]+|[0-9]+)", units) 42 | units_split_repl = [ 43 | replacements[token] if token in replacements else token for token in units_split 44 | ] 45 | return "".join(units_split_repl) 46 | 47 | 48 | def _conv_units_np(values, units_in, units_out, units_scalef=None): 49 | """ 50 | return a copy of numpy array values, with units converted from units_in to units_out 51 | """ 52 | ureg = UnitRegistry() 53 | values_in_pint = ureg.Quantity(values, ureg(_clean_units(units_in))) 54 | if units_scalef is not None: 55 | values_in_pint *= ureg(_clean_units(units_scalef)) 56 | values_out_pint = values_in_pint.to(_clean_units(units_out)) 57 | return values_out_pint.magnitude 58 | -------------------------------------------------------------------------------- /tests/test_utils_units.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | 3 | import os 4 | import sys 5 | import pytest 6 | import xarray as xr 7 | import numpy as np 8 | 9 | sys.path.append(os.path.abspath(os.path.join("notebooks", "utils"))) 10 | sys.path.append(os.path.abspath("tests")) 11 | from utils_units import _clean_units, conv_units 12 | from xr_ds_ex import xr_ds_ex 13 | 14 | nyrs = 3 15 | var_const = False 16 | 17 | 18 | @pytest.mark.parametrize( 19 | "units_in, units_out", 20 | [ 21 | # basic example, straight from dictionary 22 | ("years", "common_years"), 23 | # ensure 'gC' in 'degC' doesn't get converted 24 | ("degC", "degC"), 25 | # matches within expressions 26 | ("gN leaf/m^2", "g leaf/m^2"), # LNC 27 | ("gC/m^2/s", "g/m^2/s"), # AR 28 | ("meq/m^3", "mmol/m^3"), # ALK 29 | ( 30 | "(centimeter^2)(meq/m^3 cm/s)", 31 | "(centimeter^2)(mmol/m^3 cm/s)", 32 | ), # ALK_RIV_FLUX integral 33 | ("neq/cm3", "nmol/cm3"), # ABIO_ALK_SURF 34 | ("degC*cm/s", "degC*cm/s"), # T_FLUX_EXCH_INTRF 35 | ("days since 0001-01-01 00:00:00", "days since 0001-01-01 00:00:00"), # time 36 | # multiple matches 37 | ("gC/gN", "g/g"), # LEAFCN 38 | ], 39 | ) 40 | def test_clean_units(units_in, units_out): 41 | assert _clean_units(units_in) == units_out 42 | 43 | 44 | @pytest.mark.parametrize("apply_chunk", [True, False]) 45 | @pytest.mark.parametrize("add_encoding", [True, False]) 46 | def test_conv_units(apply_chunk, add_encoding): 47 | da = xr_ds_ex()["var_ex"] 48 | da.attrs["units"] = "kg" 49 | da.attrs["long_name"] = "var_ex" 50 | if apply_chunk: 51 | da = da.chunk({"time": 12}) 52 | if add_encoding: 53 | da.encoding["_FillValue"] = None 54 | 55 | da_out = conv_units(da, "g") 56 | 57 | assert da_out.attrs["units"] == "g" 58 | assert da_out.encoding == da.encoding 59 | assert da_out.chunks == da.chunks 60 | assert np.all(da_out.values == 1000.0 * da.values) 61 | -------------------------------------------------------------------------------- /notebooks/utils/compare_ts_and_hist.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | A script to verify that converting from history files to time series worked as expected 4 | """ 5 | 6 | import xarray as xr 7 | from . import CaseClass 8 | 9 | 10 | def compare_ts_and_hist( 11 | casename, output_roots, stream, year, exclude_vars=["time_bound", "time_bounds"], 12 | ): 13 | """ 14 | Generate a CaseClass object from a given casename. For a given stream 15 | and year, open the history files from the case. Then loop through the 16 | variables (excluding time_bound in POP and time_bounds in CICE) and 17 | verify that those fields are available in time series. 18 | """ 19 | # Set some defaults to pass to open_mfdataset, then apply kwargs argument 20 | open_mfdataset_kwargs = dict() 21 | # data_vars="minimal", to avoid introducing time dimension to time-invariant fields 22 | open_mfdataset_kwargs["data_vars"] = "minimal" 23 | # compat="override", to skip var consistency checks (for speed) 24 | open_mfdataset_kwargs["compat"] = "override" 25 | # coords="minimal", because coords cannot be default="different" if compat="override" 26 | open_mfdataset_kwargs["coords"] = "minimal" 27 | # parallel=True to open files in parallel 28 | open_mfdataset_kwargs["parallel"] = True 29 | 30 | found_all = True 31 | 32 | case = CaseClass.CaseClass(casename, output_roots) 33 | # Return if no time series is available 34 | if not case.check_for_year_in_timeseries_files(year, stream): 35 | return "no time series" 36 | 37 | # Return if no history files are available 38 | history_filenames = case.get_history_files(year, stream) 39 | if len(history_filenames) == 0: 40 | return "no history" 41 | 42 | # Open history files to build dataset 43 | ds_hist = xr.open_mfdataset(history_filenames, **open_mfdataset_kwargs) 44 | vars_to_check = [ 45 | var 46 | for var in ds_hist.data_vars 47 | if "time" in ds_hist[var].coords and not var in exclude_vars 48 | ] 49 | 50 | # Look for each variable in time series 51 | for var in vars_to_check: 52 | if len(case.get_timeseries_files(year, stream, var)) == 0: 53 | print(f"No time series files for {var} in year {year:04}") 54 | found_all = False 55 | 56 | # Return "same" if all variables were found, otherwise return "datasets differ" 57 | if not found_all: 58 | return "datasets differ" 59 | return "same" 60 | 61 | 62 | ######################## 63 | 64 | if __name__ == "__main__": 65 | print("Feature not implemented yet") 66 | -------------------------------------------------------------------------------- /tests/xr_ds_ex.py: -------------------------------------------------------------------------------- 1 | """function for example xarray.Dataset object""" 2 | 3 | import cftime 4 | import numpy as np 5 | import xarray as xr 6 | 7 | days_1yr = np.array( 8 | [31.0, 28.0, 31.0, 30.0, 31.0, 30.0, 31.0, 31.0, 30.0, 31.0, 30.0, 31.0] 9 | ) 10 | 11 | 12 | def gen_time_bounds_values(nyrs=3): 13 | """return numpy array of values of month boundaries""" 14 | time_edges = np.insert(np.cumsum(np.tile(days_1yr, nyrs)), 0, 0) 15 | return np.stack((time_edges[:-1], time_edges[1:]), axis=1) 16 | 17 | 18 | def xr_ds_ex(decode_times=True, nyrs=3, var_const=True, time_mid=True): 19 | """return an example xarray.Dataset object, useful for testing functions""" 20 | 21 | # set up values for Dataset, nyrs yrs of analytic monthly values 22 | time_bounds_values = gen_time_bounds_values(nyrs) 23 | if time_mid: 24 | time_values = 0.5 * time_bounds_values[:, 0] + 0.5 * time_bounds_values[:, 1] 25 | else: 26 | time_values = 0.25 * time_bounds_values[:, 0] + 0.75 * time_bounds_values[:, 1] 27 | time_values_yr = time_values / 365.0 28 | if var_const: 29 | var_values = np.ones_like(time_values_yr) 30 | else: 31 | var_values = np.sin(np.pi * time_values_yr) * np.exp(-0.1 * time_values_yr) 32 | 33 | time_units = "days since 0001-01-01" 34 | calendar = "noleap" 35 | 36 | if decode_times: 37 | time_values = cftime.num2date(time_values, time_units, calendar) 38 | time_bounds_values = cftime.num2date(time_bounds_values, time_units, calendar) 39 | 40 | # create Dataset, including time_bounds 41 | time_var = xr.DataArray( 42 | time_values, 43 | name="time", 44 | dims="time", 45 | coords={"time": time_values}, 46 | attrs={"bounds": "time_bounds"}, 47 | ) 48 | if not decode_times: 49 | time_var.attrs["units"] = time_units 50 | time_var.attrs["calendar"] = calendar 51 | time_bounds = xr.DataArray( 52 | time_bounds_values, 53 | name="time_bounds", 54 | dims=("time", "d2"), 55 | coords={"time": time_var}, 56 | ) 57 | var = xr.DataArray( 58 | var_values, name="var_ex", dims="time", coords={"time": time_var} 59 | ) 60 | ds = var.to_dataset() 61 | days_in_month = xr.DataArray( 62 | np.tile(days_1yr, nyrs).squeeze(), 63 | name="days_in_month", 64 | dims="time", 65 | coords={"time": time_var}, 66 | ) 67 | ds = xr.merge([ds, time_bounds, days_in_month]) 68 | 69 | if decode_times: 70 | ds.time.encoding["units"] = time_units 71 | ds.time.encoding["calendar"] = calendar 72 | 73 | return ds 74 | -------------------------------------------------------------------------------- /notebooks/run_notebooks.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | usage () { 4 | echo "$0 NOTEBOOK [NOTEBOOK2 ... NOTEBOOKN]" 5 | echo "Submit job(s) to run all notebooks on casper node via jupyter nbconvert" 6 | echo "" 7 | echo "For each specified file, the full call is:" 8 | echo "jupyter nbconvert --to notebook --inplace --ExecutePreprocessor.kernel_name=python \\ 9 | --ExecutePreprocessor.timeout=3600 --execute NOTEBOOK" 10 | echo "" 11 | echo "Output from the pbs job is written in the logs/ directory," 12 | echo "which will be created if it does not exist." 13 | } 14 | 15 | ######################### 16 | 17 | # Function that creates a temporary script 18 | # that is submitted via qsub 19 | submit_pbs_script () { 20 | 21 | nbname=`echo ${notebook} | sed -e "s/ /_/g"` 22 | 23 | echo "running ${notebook}.ipynb..." 24 | cat > ${nbname}.sub << EOF 25 | #!/bin/bash 26 | # 27 | #PBS -N ${nbname} 28 | #PBS -A P93300606 29 | #PBS -l select=1:ncpus=1:mem=100G 30 | #PBS -l walltime=6:00:00 31 | #PBS -q casper 32 | #PBS -j oe 33 | #PBS -m ea 34 | 35 | ${set_env} 36 | jupyter nbconvert --to notebook --inplace --ExecutePreprocessor.kernel_name=python \\ 37 | --ExecutePreprocessor.timeout=3600 --execute "${notebook}.ipynb" 38 | EOF 39 | 40 | qsub ${nbname}.sub 41 | rm -f ${nbname}.sub 42 | } 43 | 44 | ######################## 45 | 46 | # Function that creates a temporary script 47 | # that is submitted via sbatch 48 | submit_slurm_script () { 49 | 50 | nbname=`echo ${notebook} | sed -e "s/ /_/g"` 51 | 52 | echo "running ${notebook}.ipynb..." 53 | cat > ${nbname}.sub << EOF 54 | #!/bin/bash 55 | # 56 | #SBATCH -n 16 57 | #SBATCH -N 1 58 | #SBATCH --ntasks-per-node=16 59 | #SBATCH -t 6:00:00 60 | #SBATCH -p dav 61 | #SBATCH -J ${nbname} 62 | #SBATCH --account=P93300606 63 | #SBATCH --mem 100G 64 | #SBATCH -e logs/${nbname}.err.%J 65 | #SBATCH -o logs/${nbname}.out.%J 66 | #SBATCH --mail-type=ALL 67 | #SBATCH --mail-user=${USER}@ucar.edu 68 | #SBATCH -m block 69 | 70 | ${set_env} 71 | jupyter nbconvert --to notebook --inplace --ExecutePreprocessor.kernel_name=python \\ 72 | --ExecutePreprocessor.timeout=3600 --execute "${notebook}.ipynb" 73 | EOF 74 | 75 | sbatch ${nbname}.sub 76 | rm -f ${nbname}.sub 77 | } 78 | 79 | ######################### 80 | 81 | if [ $# == 0 ]; then 82 | usage 83 | exit 1 84 | fi 85 | 86 | for args in "$@" 87 | do 88 | if [ "$args" == "-h" ] || [ "$args" == "--help" ]; then 89 | usage 90 | exit 0 91 | fi 92 | done 93 | 94 | # not sure why conda activate doesn't work but source activate does... 95 | set_env="export PATH=/glade/work/${USER}/miniconda3/bin/:$PATH ; source activate hires-marbl || exit -1" 96 | 97 | # make sure log directory exists 98 | mkdir -p logs 99 | 100 | for notebook_full in "$@" 101 | do 102 | if [ ! -f "${notebook_full}" ]; then 103 | echo "WARNING: can not find ${notebook_full}" 104 | continue 105 | fi 106 | notebook=`echo ${notebook_full} | cut -d '.' -f 1` 107 | submit_pbs_script $notebook 108 | done 109 | -------------------------------------------------------------------------------- /data_reshaping/pop.h_t13.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | # 3 | #SBATCH -n 64 4 | #SBATCH -N 4 5 | #SBATCH --ntasks-per-node=16 6 | #SBATCH -t 24:00:00 7 | #SBATCH -p dav 8 | #SBATCH -J Pop.h_t13 9 | #SBATCH --account=P93300606 10 | #SBATCH --mem 100G 11 | #SBATCH -e logs/Pop.h_t13.err.%J 12 | #SBATCH -o logs/Pop.h_t13.out.%J 13 | #SBATCH -m block 14 | # 15 | module purge 16 | conda deactivate || echo "conda not loaded" 17 | # 18 | # PARSE COMMAND LINE ARGUMENTS 19 | CASE=${1} ; export CASE 20 | ARCHIVE_ROOT=${2} 21 | YEAR=${3} 22 | echo "Reshaping year ${YEAR} for ${CASE}..." 23 | # 24 | cd /glade/p/cesm/postprocessing_dav/cesm-env2/bin 25 | . activate 26 | # 27 | module load intel/17.0.1 28 | module load ncarenv 29 | module load ncarcompilers 30 | module load impi 31 | module load netcdf/4.6.1 32 | module load nco/4.7.4 33 | module load ncl/6.4.0 34 | # 35 | HIST=pop.h ; export HIST 36 | # 37 | PATH=/glade/p/cesm/postprocessing_dav/cesm-env2/bin:/usr/local/bin:${PATH} ; export PATH 38 | # 39 | NCKS=`which ncks` ; export NCKS 40 | PROCHOST=`hostname`;export PROCHOST 41 | # 42 | BASEDIR=/glade/u/home/strandwg/CCP_Processing_Suite 43 | LOCALDSK=${ARCHIVE_ROOT}/${CASE} ; export LOCALDSK 44 | PROCBASE=/glade/scratch/$USER/T13/${CASE} ; export PROCBASE 45 | # 46 | HTYP=`echo $HIST | cut -d'.' -f1` ; export HTYP 47 | case "$HTYP" in 48 | cam2 | cam ) 49 | COMP_NAME=atm ;; 50 | cism ) 51 | COMP_NAME=glc ;; 52 | clm2 ) 53 | COMP_NAME=lnd ;; 54 | pop ) 55 | COMP_NAME=ocn ;; 56 | rtm | mosart ) 57 | COMP_NAME=rof ;; 58 | cice | csim ) 59 | COMP_NAME=ice ;; 60 | * ) 61 | echo "Unable to continue because "$HIST" not known." 62 | exit 1 ;; 63 | esac 64 | # 65 | LOCAL_HIST=${LOCALDSK}/${COMP_NAME}/hist ; export LOCAL_HIST 66 | LOCAL_PROC=${PROCBASE}/${HIST}/proc ; export LOCAL_PROC 67 | CACHEDIR=${LOCAL_PROC}/COMPLETED ; export CACHEDIR 68 | # 69 | VERBOSITY=0 ; export VERBOSITY 70 | PREFIX="${CACHEDIR}/${CASE}.${HIST}." ; export PREFIX 71 | NCFORMAT=netcdf4c ; export NCFORMAT ; export NCFORMAT 72 | # 73 | if [ ! -d $LOCAL_PROC ] ; then 74 | mkdir -p $LOCAL_PROC 75 | fi 76 | if [ ! -d $CACHEDIR ] ; then 77 | mkdir -p $CACHEDIR 78 | fi 79 | # 80 | cd $LOCAL_PROC 81 | ln -s -f $BASEDIR/run_slice2series_dav Transpose_Data 82 | # 83 | rm -f ${CASE}.${HIST}.*nc 84 | if [ ! -f ${LOCAL_PROC}/.DONE.${CASE}.${HIST}.${YEAR} ] ; then 85 | ln -s -f ${LOCAL_HIST}/${CASE}.${HIST}.${YEAR}*nc . 86 | NHISTF=`/bin/ls ${CASE}.${HIST}.${YEAR}*nc | wc -l` 87 | if [ $NHISTF -eq 12 ] ; then 88 | OUTTIME="${YEAR}01-${YEAR}12" 89 | SUFFIX=".${OUTTIME}.nc" ; export SUFFIX 90 | echo -n "TS transpose_data start: " ; date 91 | ./Transpose_Data 92 | if [ $? -ne 0 ] ; then 93 | echo "Transpose_Data failed" 94 | exit 1 95 | fi 96 | echo -n "TS transpose_data end : " ; date 97 | touch ${LOCAL_PROC}/.DONE.${CASE}.${HIST}.${YEAR} 98 | else 99 | echo "File count mismatch on "${CASE}"."${HIST}"."${YEAR}": "${NHISTF}" instead of 12" 100 | fi 101 | fi 102 | # 103 | echo -n "TS COMPLETE: " ; date 104 | # 105 | exit 106 | -------------------------------------------------------------------------------- /data_reshaping/cice.h_t13.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | # 3 | #SBATCH -n 64 4 | #SBATCH -N 4 5 | #SBATCH --ntasks-per-node=16 6 | #SBATCH -t 24:00:00 7 | #SBATCH -p dav 8 | #SBATCH -J Cice.h_t13 9 | #SBATCH --account=P93300606 10 | #SBATCH --mem 100G 11 | #SBATCH -e logs/Cice.h_t13.err.%J 12 | #SBATCH -o logs/Cice.h_t13.out.%J 13 | #SBATCH -m block 14 | # 15 | module purge 16 | conda deactivate || echo "conda not loaded" 17 | # 18 | # PARSE COMMAND LINE ARGUMENTS 19 | CASE=${1} ; export CASE 20 | ARCHIVE_ROOT=${2} 21 | YEAR=${3} 22 | echo "Reshaping year ${YEAR} for ${CASE}..." 23 | # 24 | cd /glade/p/cesm/postprocessing_dav/cesm-env2/bin 25 | . activate 26 | # 27 | module load intel/17.0.1 28 | module load ncarenv 29 | module load ncarcompilers 30 | module load impi 31 | module load netcdf/4.6.1 32 | module load nco/4.7.4 33 | module load ncl/6.4.0 34 | # 35 | HIST=cice.h ; export HIST 36 | # 37 | PATH=/glade/p/cesm/postprocessing_dav/cesm-env2/bin:/usr/local/bin:${PATH} ; export PATH 38 | # 39 | NCKS=`which ncks` ; export NCKS 40 | PROCHOST=`hostname`;export PROCHOST 41 | # 42 | BASEDIR=/glade/u/home/strandwg/CCP_Processing_Suite 43 | LOCALDSK=${ARCHIVE_ROOT}/${CASE} ; export LOCALDSK 44 | PROCBASE=/glade/scratch/$USER/T13/${CASE} ; export PROCBASE 45 | # 46 | HTYP=`echo $HIST | cut -d'.' -f1` ; export HTYP 47 | case "$HTYP" in 48 | cam2 | cam ) 49 | COMP_NAME=atm ;; 50 | cism ) 51 | COMP_NAME=glc ;; 52 | clm2 ) 53 | COMP_NAME=lnd ;; 54 | pop ) 55 | COMP_NAME=ocn ;; 56 | rtm | mosart ) 57 | COMP_NAME=rof ;; 58 | cice | csim ) 59 | COMP_NAME=ice ;; 60 | * ) 61 | echo "Unable to continue because "$HIST" not known." 62 | exit 1 ;; 63 | esac 64 | # 65 | LOCAL_HIST=${LOCALDSK}/${COMP_NAME}/hist ; export LOCAL_HIST 66 | LOCAL_PROC=${PROCBASE}/${HIST}/proc ; export LOCAL_PROC 67 | CACHEDIR=${LOCAL_PROC}/COMPLETED ; export CACHEDIR 68 | # 69 | VERBOSITY=0 ; export VERBOSITY 70 | PREFIX="${CACHEDIR}/${CASE}.${HIST}." ; export PREFIX 71 | NCFORMAT=netcdf4c ; export NCFORMAT ; export NCFORMAT 72 | # 73 | if [ ! -d $LOCAL_PROC ] ; then 74 | mkdir -p $LOCAL_PROC 75 | fi 76 | if [ ! -d $CACHEDIR ] ; then 77 | mkdir -p $CACHEDIR 78 | fi 79 | # 80 | cd $LOCAL_PROC 81 | ln -s -f $BASEDIR/run_slice2series_dav Transpose_Data 82 | # 83 | rm -f ${CASE}.${HIST}.*nc 84 | if [ ! -f ${LOCAL_PROC}/.DONE.${CASE}.${HIST}.${YEAR} ] ; then 85 | ln -s -f ${LOCAL_HIST}/${CASE}.${HIST}.${YEAR}*nc . 86 | NHISTF=`/bin/ls ${CASE}.${HIST}.${YEAR}*nc | wc -l` 87 | if [ $NHISTF -eq 12 ] ; then 88 | OUTTIME="${YEAR}01-${YEAR}12" 89 | SUFFIX=".${OUTTIME}.nc" ; export SUFFIX 90 | echo -n "TS transpose_data start: " ; date 91 | ./Transpose_Data 92 | if [ $? -ne 0 ] ; then 93 | echo "Transpose_Data failed" 94 | exit 1 95 | fi 96 | echo -n "TS transpose_data end : " ; date 97 | touch ${LOCAL_PROC}/.DONE.${CASE}.${HIST}.${YEAR} 98 | else 99 | echo "File count mismatch on "${CASE}"."${HIST}"."${YEAR}": "${NHISTF}" instead of 12" 100 | fi 101 | fi 102 | # 103 | echo -n "TS COMPLETE: " ; date 104 | # 105 | exit 106 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | [](https://github.com/marbl-ecosys/HiRes-CESM-analysis/actions?query=workflow%3A%22Continuous+Integration%22) 3 | [](https://github.com/marbl-ecosys/HiRes-CESM-analysis/actions?query=workflow%3A%22Run+all+pre-commit+checks+one+more+time%22) 4 | 5 | # HiRes-CESM Analysis 6 | 7 | - [HiRes-CESM Analysis](#hires-cesm-analysis) 8 | - [For Developers](#for-developers) 9 | - [Keep your conda environment up to date](#keep-your-conda-environment-up-to-date) 10 | - [Use `pre-commit` to test code before commiting](#use-pre-commit-to-test-code-before-commiting) 11 | - [Run `pytest` after modifying python in `utils/`](#run-pytest-after-modifying-python-in-utils) 12 | 13 | This repository is building a set of tools for analyzing BGC output in a high-resolution POP run. 14 | 15 | ## For Developers 16 | 17 | A few recommended practices to incorporate in your development sandbox: 18 | 19 | ### Keep your conda environment up to date 20 | 21 | The first time you check out this repository, run 22 | 23 | ``` 24 | $ conda env install -f environments/environment.yaml 25 | ``` 26 | 27 | If you notice the YAML file has changed after you fetch changes from github, 28 | update the environment with 29 | 30 | ``` 31 | $ conda env update -f environments/environment.yaml 32 | ``` 33 | 34 | If the `env update` command fails, you can remove the environment and re-create it: 35 | 36 | ``` 37 | $ conda env remove --name hires-marbl 38 | $ conda env create -f environments/environment.yaml 39 | ``` 40 | 41 | ### Use `pre-commit` to test code before commiting 42 | 43 | Please take advantage of the pre-commit package to ensure that `black` is run before commiting: 44 | 45 | ``` 46 | $ pre-commit install --install-hooks # set up pre-commit 47 | $ pre-commit run -a # check all the files currently in the repo 48 | ``` 49 | 50 | The pre-commit package is already installed via the `hires-marbl` conda environment. 51 | There is a github action to run these checks on all pull requests, 52 | but running them locally via-pre-commit will reduce the number of failed actions. 53 | NOTE: for some reason, to properly install `pre-commit` on the CISL systems, 54 | the above command must be run from `casper` rather than `cheyenne`. 55 | 56 | Note that pre-commit creates a virtual environment using specific tags of each package. 57 | As newer versions of `black` become available on `conda-forge`, we will update the pre-commit environment. 58 | 59 | ### Run `pytest` after modifying python in `utils/` 60 | 61 | To test some of the python code in `notebooks/utils/`, run `pytest`. 62 | These tests can be run from the top level of this repository by running 63 | 64 | ``` 65 | $ pytest tests/ 66 | ``` 67 | 68 | If you add new code to this directory, 69 | consider writing small tests to ensure it is running as expected. 70 | -------------------------------------------------------------------------------- /data_reshaping/cice.h1_t13.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | # 3 | #SBATCH -n 64 4 | #SBATCH -N 4 5 | #SBATCH --ntasks-per-node=16 6 | #SBATCH -t 24:00:00 7 | #SBATCH -p dav 8 | #SBATCH -J Cice.h1_t13 9 | #SBATCH --account=P93300606 10 | #SBATCH --mem 100G 11 | #SBATCH -e logs/Cice.h1_t13.err.%J 12 | #SBATCH -o logs/Cice.h1_t13.out.%J 13 | #SBATCH -m block 14 | # 15 | module purge 16 | conda deactivate || echo "conda not loaded" 17 | # 18 | # PARSE COMMAND LINE ARGUMENTS 19 | CASE=${1} ; export CASE 20 | ARCHIVE_ROOT=${2} 21 | YEAR=${3} 22 | echo "Reshaping year ${YEAR} for ${CASE}..." 23 | # 24 | cd /glade/p/cesm/postprocessing_dav/cesm-env2/bin 25 | . activate 26 | # 27 | module load intel/17.0.1 28 | module load ncarenv 29 | module load ncarcompilers 30 | module load impi 31 | module load netcdf/4.6.1 32 | module load nco/4.7.4 33 | module load ncl/6.4.0 34 | # 35 | HIST=cice.h1 ; export HIST 36 | # 37 | PATH=/glade/p/cesm/postprocessing_dav/cesm-env2/bin:/usr/local/bin:${PATH} ; export PATH 38 | # 39 | NCKS=`which ncks` ; export NCKS 40 | PROCHOST=`hostname`;export PROCHOST 41 | # 42 | BASEDIR=/glade/u/home/strandwg/CCP_Processing_Suite 43 | LOCALDSK=${ARCHIVE_ROOT}/${CASE} ; export LOCALDSK 44 | PROCBASE=/glade/scratch/$USER/T13/${CASE} ; export PROCBASE 45 | # 46 | HTYP=`echo $HIST | cut -d'.' -f1` ; export HTYP 47 | case "$HTYP" in 48 | cam2 | cam ) 49 | COMP_NAME=atm ;; 50 | cism ) 51 | COMP_NAME=glc ;; 52 | clm2 ) 53 | COMP_NAME=lnd ;; 54 | pop ) 55 | COMP_NAME=ocn ;; 56 | rtm | mosart ) 57 | COMP_NAME=rof ;; 58 | cice | csim ) 59 | COMP_NAME=ice ;; 60 | * ) 61 | echo "Unable to continue because "$HIST" not known." 62 | exit 1 ;; 63 | esac 64 | # 65 | LOCAL_HIST=${LOCALDSK}/${COMP_NAME}/hist ; export LOCAL_HIST 66 | LOCAL_PROC=${PROCBASE}/${HIST}/proc ; export LOCAL_PROC 67 | CACHEDIR=${LOCAL_PROC}/COMPLETED ; export CACHEDIR 68 | # 69 | VERBOSITY=0 ; export VERBOSITY 70 | PREFIX="${CACHEDIR}/${CASE}.${HIST}." ; export PREFIX 71 | NCFORMAT=netcdf4c ; export NCFORMAT ; export NCFORMAT 72 | # 73 | if [ ! -d $LOCAL_PROC ] ; then 74 | mkdir -p $LOCAL_PROC 75 | fi 76 | if [ ! -d $CACHEDIR ] ; then 77 | mkdir -p $CACHEDIR 78 | fi 79 | # 80 | cd $LOCAL_PROC 81 | ln -s -f $BASEDIR/run_slice2series_dav Transpose_Data 82 | # 83 | rm -f ${CASE}.${HIST}.*nc 84 | if [ ! -f ${LOCAL_PROC}/.DONE.${CASE}.${HIST}.${YEAR} ] ; then 85 | ln -s -f ${LOCAL_HIST}/${CASE}.${HIST}.${YEAR}*nc . 86 | NHISTF=`/bin/ls ${CASE}.${HIST}.${YEAR}*nc | wc -l` 87 | if [ $NHISTF -eq 365 ] ; then 88 | OUTTIME="${YEAR}0101-${YEAR}1231" 89 | SUFFIX=".${OUTTIME}.nc" ; export SUFFIX 90 | echo -n "TS transpose_data start: " ; date 91 | ./Transpose_Data 92 | if [ $? -ne 0 ] ; then 93 | echo "Transpose_Data failed" 94 | exit 1 95 | fi 96 | echo -n "TS transpose_data end : " ; date 97 | touch ${LOCAL_PROC}/.DONE.${CASE}.${HIST}.${YEAR} 98 | else 99 | echo "File count mismatch on "${CASE}"."${HIST}"."${YEAR}": "${NHISTF}" instead of 365" 100 | fi 101 | fi 102 | # 103 | echo -n "TS COMPLETE: " ; date 104 | # 105 | exit 106 | -------------------------------------------------------------------------------- /data_reshaping/pop.h.nyear1_t13.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | # 3 | #SBATCH -n 64 4 | #SBATCH -N 4 5 | #SBATCH --ntasks-per-node=16 6 | #SBATCH -t 24:00:00 7 | #SBATCH -p dav 8 | #SBATCH -J Pop.h.nyear1_t13 9 | #SBATCH --account=P93300606 10 | #SBATCH --mem 100G 11 | #SBATCH -e logs/Pop.h.nyear1_t13.err.%J 12 | #SBATCH -o logs/Pop.h.nyear1_t13.out.%J 13 | #SBATCH -m block 14 | # 15 | module purge 16 | conda deactivate || echo "conda not loaded" 17 | # 18 | # PARSE COMMAND LINE ARGUMENTS 19 | CASE=${1} ; export CASE 20 | ARCHIVE_ROOT=${2} 21 | YEAR=${3} 22 | echo "Reshaping year ${YEAR} for ${CASE}..." 23 | # 24 | cd /glade/p/cesm/postprocessing_dav/cesm-env2/bin 25 | . activate 26 | # 27 | module load intel/17.0.1 28 | module load ncarenv 29 | module load ncarcompilers 30 | module load impi 31 | module load netcdf/4.6.1 32 | module load nco/4.7.4 33 | module load ncl/6.4.0 34 | # 35 | HIST=pop.h.nyear1 ; export HIST 36 | # 37 | PATH=/glade/p/cesm/postprocessing_dav/cesm-env2/bin:/usr/local/bin:${PATH} ; export PATH 38 | # 39 | NCKS=`which ncks` ; export NCKS 40 | PROCHOST=`hostname`;export PROCHOST 41 | # 42 | BASEDIR=/glade/u/home/strandwg/CCP_Processing_Suite 43 | LOCALDSK=${ARCHIVE_ROOT}/${CASE} ; export LOCALDSK 44 | PROCBASE=/glade/scratch/$USER/T13/${CASE} ; export PROCBASE 45 | # 46 | HTYP=`echo $HIST | cut -d'.' -f1` ; export HTYP 47 | case "$HTYP" in 48 | cam2 | cam ) 49 | COMP_NAME=atm ;; 50 | cism ) 51 | COMP_NAME=glc ;; 52 | clm2 ) 53 | COMP_NAME=lnd ;; 54 | pop ) 55 | COMP_NAME=ocn ;; 56 | rtm | mosart ) 57 | COMP_NAME=rof ;; 58 | cice | csim ) 59 | COMP_NAME=ice ;; 60 | * ) 61 | echo "Unable to continue because "$HIST" not known." 62 | exit 1 ;; 63 | esac 64 | # 65 | LOCAL_HIST=${LOCALDSK}/${COMP_NAME}/hist ; export LOCAL_HIST 66 | LOCAL_PROC=${PROCBASE}/${HIST}/proc ; export LOCAL_PROC 67 | CACHEDIR=${LOCAL_PROC}/COMPLETED ; export CACHEDIR 68 | # 69 | VERBOSITY=0 ; export VERBOSITY 70 | PREFIX="${CACHEDIR}/${CASE}.${HIST}." ; export PREFIX 71 | NCFORMAT=netcdf4c ; export NCFORMAT ; export NCFORMAT 72 | # 73 | if [ ! -d $LOCAL_PROC ] ; then 74 | mkdir -p $LOCAL_PROC 75 | fi 76 | if [ ! -d $CACHEDIR ] ; then 77 | mkdir -p $CACHEDIR 78 | fi 79 | # 80 | cd $LOCAL_PROC 81 | ln -s -f $BASEDIR/run_slice2series_dav Transpose_Data 82 | # 83 | rm -f ${CASE}.${HIST}.*nc 84 | if [ ! -f ${LOCAL_PROC}/.DONE.${CASE}.${HIST}.${YEAR} ] ; then 85 | ln -s -f ${LOCAL_HIST}/${CASE}.${HIST}.${YEAR}*nc . 86 | NHISTF=`/bin/ls ${CASE}.${HIST}.${YEAR}*nc | wc -l` 87 | if [ $NHISTF -eq 1 ] ; then 88 | OUTTIME="${YEAR}-${YEAR}" 89 | SUFFIX=".${OUTTIME}.nc" ; export SUFFIX 90 | echo -n "TS transpose_data start: " ; date 91 | ./Transpose_Data 92 | if [ $? -ne 0 ] ; then 93 | echo "Transpose_Data failed" 94 | exit 1 95 | fi 96 | echo -n "TS transpose_data end : " ; date 97 | touch ${LOCAL_PROC}/.DONE.${CASE}.${HIST}.${YEAR} 98 | else 99 | echo "File count mismatch on "${CASE}"."${HIST}"."${YEAR}": "${NHISTF}" instead of 1" 100 | fi 101 | fi 102 | # 103 | echo -n "TS COMPLETE: " ; date 104 | # 105 | exit 106 | -------------------------------------------------------------------------------- /data_reshaping/pop.h.nday1_t13.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | # 3 | #SBATCH -n 64 4 | #SBATCH -N 4 5 | #SBATCH --ntasks-per-node=16 6 | #SBATCH -t 24:00:00 7 | #SBATCH -p dav 8 | #SBATCH -J Pop.h.nday1_t13 9 | #SBATCH --account=P93300606 10 | #SBATCH --mem 100G 11 | #SBATCH -e logs/Pop.h.nday1_t13.err.%J 12 | #SBATCH -o logs/Pop.h.nday1_t13.out.%J 13 | #SBATCH -m block 14 | # 15 | module purge 16 | conda deactivate || echo "conda not loaded" 17 | # 18 | # PARSE COMMAND LINE ARGUMENTS 19 | CASE=${1} ; export CASE 20 | ARCHIVE_ROOT=${2} 21 | YEAR=${3} 22 | echo "Reshaping year ${YEAR} for ${CASE}..." 23 | # 24 | cd /glade/p/cesm/postprocessing_dav/cesm-env2/bin 25 | . activate 26 | # 27 | module load intel/17.0.1 28 | module load ncarenv 29 | module load ncarcompilers 30 | module load impi 31 | module load netcdf/4.6.1 32 | module load nco/4.7.4 33 | module load ncl/6.4.0 34 | # 35 | HIST=pop.h.nday1 ; export HIST 36 | # 37 | PATH=/glade/p/cesm/postprocessing_dav/cesm-env2/bin:/usr/local/bin:${PATH} ; export PATH 38 | # 39 | NCKS=`which ncks` ; export NCKS 40 | PROCHOST=`hostname`;export PROCHOST 41 | # 42 | BASEDIR=/glade/u/home/strandwg/CCP_Processing_Suite 43 | LOCALDSK=${ARCHIVE_ROOT}/${CASE} ; export LOCALDSK 44 | PROCBASE=/glade/scratch/$USER/T13/${CASE} ; export PROCBASE 45 | # 46 | HTYP=`echo $HIST | cut -d'.' -f1` ; export HTYP 47 | case "$HTYP" in 48 | cam2 | cam ) 49 | COMP_NAME=atm ;; 50 | cism ) 51 | COMP_NAME=glc ;; 52 | clm2 ) 53 | COMP_NAME=lnd ;; 54 | pop ) 55 | COMP_NAME=ocn ;; 56 | rtm | mosart ) 57 | COMP_NAME=rof ;; 58 | cice | csim ) 59 | COMP_NAME=ice ;; 60 | * ) 61 | echo "Unable to continue because "$HIST" not known." 62 | exit 1 ;; 63 | esac 64 | # 65 | LOCAL_HIST=${LOCALDSK}/${COMP_NAME}/hist ; export LOCAL_HIST 66 | LOCAL_PROC=${PROCBASE}/${HIST}/proc ; export LOCAL_PROC 67 | CACHEDIR=${LOCAL_PROC}/COMPLETED ; export CACHEDIR 68 | # 69 | VERBOSITY=0 ; export VERBOSITY 70 | PREFIX="${CACHEDIR}/${CASE}.${HIST}." ; export PREFIX 71 | NCFORMAT=netcdf4c ; export NCFORMAT ; export NCFORMAT 72 | # 73 | if [ ! -d $LOCAL_PROC ] ; then 74 | mkdir -p $LOCAL_PROC 75 | fi 76 | if [ ! -d $CACHEDIR ] ; then 77 | mkdir -p $CACHEDIR 78 | fi 79 | # 80 | cd $LOCAL_PROC 81 | ln -s -f $BASEDIR/run_slice2series_dav Transpose_Data 82 | # 83 | rm -f ${CASE}.${HIST}.*nc 84 | if [ ! -f ${LOCAL_PROC}/.DONE.${CASE}.${HIST}.${YEAR} ] ; then 85 | ln -s -f ${LOCAL_HIST}/${CASE}.${HIST}.${YEAR}*nc . 86 | NHISTF=`/bin/ls ${CASE}.${HIST}.${YEAR}*nc | wc -l` 87 | if [ $NHISTF -eq 12 ] ; then 88 | OUTTIME="${YEAR}0101-${YEAR}1231" 89 | SUFFIX=".${OUTTIME}.nc" ; export SUFFIX 90 | echo -n "TS transpose_data start: " ; date 91 | ./Transpose_Data 92 | if [ $? -ne 0 ] ; then 93 | echo "Transpose_Data failed" 94 | exit 1 95 | fi 96 | echo -n "TS transpose_data end : " ; date 97 | touch ${LOCAL_PROC}/.DONE.${CASE}.${HIST}.${YEAR} 98 | else 99 | echo "File count mismatch on "${CASE}"."${HIST}"."${YEAR}": "${NHISTF}" instead of 12" 100 | fi 101 | fi 102 | # 103 | echo -n "TS COMPLETE: " ; date 104 | # 105 | exit 106 | -------------------------------------------------------------------------------- /data_reshaping/run_all.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import os 3 | 4 | 5 | def _parse_args(): 6 | """ Parse command line arguments """ 7 | 8 | import argparse 9 | 10 | parser = argparse.ArgumentParser( 11 | description="Submit scripts to reshape highres BGC output", 12 | formatter_class=argparse.ArgumentDefaultsHelpFormatter, 13 | ) 14 | 15 | # Required: specify year 16 | parser.add_argument( 17 | "-y", 18 | "--years", 19 | action="store", 20 | dest="years", 21 | type=int, 22 | required=True, 23 | nargs="+", 24 | help="Year of run to convert to time series", 25 | ) 26 | 27 | # Optional: which case to convert 28 | # if this tool is made public, drop the default and require case as well 29 | parser.add_argument( 30 | "-c", 31 | "--case", 32 | action="store", 33 | dest="case", 34 | type=str, 35 | default="g.e22.G1850ECO_JRA_HR.TL319_t13.004", 36 | help="Suffix of case to convert to time series", 37 | ) 38 | 39 | # Optional: location of DOUT_S_ROOT 40 | archive_default = os.path.join( 41 | os.sep, "glade", "scratch", os.environ["USER"], "archive" 42 | ) 43 | parser.add_argument( 44 | "-a", 45 | "--archive-root", 46 | action="store", 47 | dest="archive_root", 48 | type=str, 49 | default=archive_default, 50 | help="base of DOUT_S_ROOT", 51 | ) 52 | 53 | # Optional: specify which scripts to run 54 | parser.add_argument( 55 | "-s", 56 | "--scripts", 57 | action="store", 58 | dest="scripts", 59 | type=str, 60 | nargs="+", 61 | default=[ 62 | "pop.h_t13.sh", 63 | "pop.h.nday1_t13.sh", 64 | "cice.h_t13.sh", 65 | "pop.h.nyear1_t13.sh", 66 | "cice.h1_t13.sh", 67 | ], 68 | help="Scripts to submit to slurm", 69 | ) 70 | 71 | # Optional: is this a dry-run? If so, don't submit anything 72 | parser.add_argument( 73 | "-d", 74 | "--dry-run", 75 | action="store_true", 76 | dest="dryrun", 77 | help="If true, do not actually submit job", 78 | ) 79 | 80 | # Optional: By default, slurm will email users when jobs start and finish 81 | parser.add_argument( 82 | "--no-mail", 83 | action="store_false", 84 | dest="send_mail", 85 | help="If true, send SLURM emails to {user}@ucar.edu", 86 | ) 87 | 88 | return parser.parse_args() 89 | 90 | 91 | ################### 92 | 93 | if __name__ == "__main__": 94 | args = _parse_args() 95 | case = args.case 96 | archive_root = args.archive_root 97 | mail_opt = ( 98 | f"--mail-type=ALL --mail-user={os.environ['USER']}@ucar.edu" 99 | if args.send_mail 100 | else "--mail-type=NONE" 101 | ) 102 | 103 | for yr in args.years: 104 | year = f"{yr:04}" 105 | for script in args.scripts: 106 | print(f"Submitting {script} for year {year} of {case}...") 107 | cmd = f"sbatch {mail_opt} --dependency=singleton {script} {case} {archive_root} {year}" 108 | if not args.dryrun: 109 | # note: the --dependency=singleton option means only one job per job name 110 | # Some jobs had been crashing, and I think it was due to temporary 111 | # files clobbering each other? But only having one pop.h_t13.sh job 112 | # at a time seems to have prevented these issues. 113 | os.system(cmd) 114 | else: 115 | print(f"Command to run: {cmd}") 116 | -------------------------------------------------------------------------------- /notebooks/plot_suite_maps_0095_1deg.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import os\n", 10 | "import yaml\n", 11 | "\n", 12 | "import utils\n", 13 | "\n", 14 | "%matplotlib inline" 15 | ] 16 | }, 17 | { 18 | "cell_type": "code", 19 | "execution_count": 2, 20 | "metadata": {}, 21 | "outputs": [], 22 | "source": [ 23 | "casename = \"g.e22b05.G1850ECOIAF_JRA.TL319_g17.cocco.001\"\n", 24 | "year = 95\n", 25 | "campaign_root = os.path.join(\n", 26 | " os.path.sep,\n", 27 | " \"glade\",\n", 28 | " \"campaign\",\n", 29 | " \"cesm\",\n", 30 | " \"development\",\n", 31 | " \"bgcwg\",\n", 32 | " \"projects\",\n", 33 | " \"1deg_cocco_JRA\",\n", 34 | " \"cases\",\n", 35 | ")\n", 36 | "\n", 37 | "# Set up CaseClass object\n", 38 | "case = utils.CaseClass(casename, os.path.join(campaign_root, casename))" 39 | ] 40 | }, 41 | { 42 | "cell_type": "code", 43 | "execution_count": 3, 44 | "metadata": {}, 45 | "outputs": [], 46 | "source": [ 47 | "with open(\"diag_metadata.yaml\", mode=\"r\") as fptr:\n", 48 | " diag_metadata_list = yaml.safe_load(fptr)\n", 49 | "\n", 50 | "varnames = utils.get_varnames_from_metadata_list(diag_metadata_list)" 51 | ] 52 | }, 53 | { 54 | "cell_type": "code", 55 | "execution_count": 4, 56 | "metadata": {}, 57 | "outputs": [], 58 | "source": [ 59 | "def summary_plots(ds, diag_metadata, save_pngs=False):\n", 60 | " varname = diag_metadata[\"varname\"]\n", 61 | " print(varname)\n", 62 | " da = ds[varname].isel(diag_metadata.get(\"isel_dict\"))\n", 63 | "\n", 64 | " utils.summary_plot_maps(\n", 65 | " ds,\n", 66 | " da,\n", 67 | " diag_metadata,\n", 68 | " save_pngs=save_pngs,\n", 69 | " savefig_kwargs={\"dpi\": 72}, # match default behavior of savefig\n", 70 | " )" 71 | ] 72 | }, 73 | { 74 | "cell_type": "code", 75 | "execution_count": 5, 76 | "metadata": {}, 77 | "outputs": [ 78 | { 79 | "name": "stdout", 80 | "output_type": "stream", 81 | "text": [ 82 | "Datasets contain a total of 12 time samples\n", 83 | "Last average written at 0096-01-01 00:00:00\n", 84 | "POC_FLUX_100m\n", 85 | "CaCO3_FLUX_100m\n", 86 | "FG_CO2\n", 87 | "DpCO2\n", 88 | "PH\n", 89 | "spChl\n", 90 | "photoC_sp_zint\n", 91 | "coccoChl\n", 92 | "photoC_cocco_zint\n", 93 | "diatChl\n", 94 | "photoC_diat_zint\n", 95 | "diazChl\n", 96 | "photoC_diaz_zint\n", 97 | "NHx_SURFACE_EMIS\n", 98 | "NH4\n", 99 | "O2_ZMIN\n", 100 | "O2_ZMIN_DEPTH\n", 101 | "O2\n", 102 | "PO4\n", 103 | "PO4\n", 104 | "NO3\n", 105 | "NO3\n", 106 | "SiO3\n", 107 | "SiO3\n" 108 | ] 109 | } 110 | ], 111 | "source": [ 112 | "stream = \"pop.h\"\n", 113 | "ds = case.gen_dataset(varnames, stream, start_year=year, end_year=year)\n", 114 | "for diag_metadata in diag_metadata_list:\n", 115 | " # ds = case.gen_dataset(\n", 116 | " # diag_metadata[\"varname\"], stream, start_year=year, end_year=year\n", 117 | " # )\n", 118 | " summary_plots(ds, diag_metadata, save_pngs=True)" 119 | ] 120 | } 121 | ], 122 | "metadata": { 123 | "kernelspec": { 124 | "display_name": "Python [conda env:hires-marbl]", 125 | "language": "python", 126 | "name": "conda-env-hires-marbl-py" 127 | }, 128 | "language_info": { 129 | "codemirror_mode": { 130 | "name": "ipython", 131 | "version": 3 132 | }, 133 | "file_extension": ".py", 134 | "mimetype": "text/x-python", 135 | "name": "python", 136 | "nbconvert_exporter": "python", 137 | "pygments_lexer": "ipython3", 138 | "version": "3.7.8" 139 | } 140 | }, 141 | "nbformat": 4, 142 | "nbformat_minor": 4 143 | } 144 | -------------------------------------------------------------------------------- /tests/test_utils.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | 3 | import os 4 | import sys 5 | import pytest 6 | import cftime 7 | import numpy as np 8 | import xarray as xr 9 | 10 | sys.path.append(os.path.abspath(os.path.join("notebooks"))) 11 | sys.path.append(os.path.abspath("tests")) 12 | from utils.utils import time_year_plus_frac, time_set_mid, repl_coord, round_sig 13 | from xr_ds_ex import gen_time_bounds_values, xr_ds_ex 14 | 15 | nyrs = 300 16 | var_const = False 17 | 18 | 19 | @pytest.mark.parametrize("decode_times1", [True, False]) 20 | @pytest.mark.parametrize("decode_times2", [True, False]) 21 | @pytest.mark.parametrize("apply_chunk1", [True, False]) 22 | def test_repl_coord(decode_times1, decode_times2, apply_chunk1): 23 | ds1 = time_set_mid(xr_ds_ex(decode_times1, nyrs=nyrs, var_const=var_const), "time") 24 | if apply_chunk1: 25 | ds1 = ds1.chunk({"time": 12}) 26 | 27 | # change time:bounds attribute variable rename corresponding variable 28 | tb_name_old = ds1["time"].attrs["bounds"] 29 | tb_name_new = tb_name_old + "_new" 30 | ds1["time"].attrs["bounds"] = tb_name_new 31 | ds1 = ds1.rename({tb_name_old: tb_name_new}) 32 | 33 | # verify that repl_coord on xr_ds_ex gives same results as 34 | # 1) executing time_set_mid 35 | # 2) manually changing bounds 36 | ds2 = repl_coord( 37 | "time", ds1, xr_ds_ex(decode_times2, nyrs=nyrs, var_const=var_const) 38 | ) 39 | assert ds2.identical(ds1) 40 | 41 | assert ds2["time"].encoding == ds1["time"].encoding 42 | assert ds2["time"].chunks == ds1["time"].chunks 43 | 44 | 45 | @pytest.mark.parametrize("decode_times", [True, False]) 46 | @pytest.mark.parametrize("deep", [True, False]) 47 | @pytest.mark.parametrize("apply_chunk", [True, False]) 48 | def test_time_set_mid(decode_times, deep, apply_chunk): 49 | ds = xr_ds_ex(decode_times, nyrs=nyrs, var_const=var_const, time_mid=False) 50 | if apply_chunk: 51 | ds = ds.chunk({"time": 12}) 52 | 53 | mid_month_values = gen_time_bounds_values(nyrs).mean(axis=1) 54 | if decode_times: 55 | time_encoding = ds["time"].encoding 56 | expected_values = cftime.num2date( 57 | mid_month_values, time_encoding["units"], time_encoding["calendar"] 58 | ) 59 | else: 60 | expected_values = mid_month_values 61 | 62 | ds_out = time_set_mid(ds, "time", deep) 63 | 64 | assert ds_out.attrs == ds.attrs 65 | assert ds_out.encoding == ds.encoding 66 | assert ds_out.chunks == ds.chunks 67 | 68 | for varname in ds.variables: 69 | assert ds_out[varname].attrs == ds[varname].attrs 70 | assert ds_out[varname].encoding == ds[varname].encoding 71 | assert ds_out[varname].chunks == ds[varname].chunks 72 | if varname == "time": 73 | assert np.all(ds_out[varname].values == expected_values) 74 | else: 75 | assert np.all(ds_out[varname].values == ds[varname].values) 76 | assert (ds_out[varname].data is ds[varname].data) == (not deep) 77 | 78 | # verify that values are independent of ds being chunked in time 79 | ds_chunk = xr_ds_ex( 80 | decode_times, nyrs=nyrs, var_const=var_const, time_mid=False 81 | ).chunk({"time": 6}) 82 | ds_chunk_out = time_set_mid(ds_chunk, "time") 83 | assert ds_chunk_out.identical(ds_out) 84 | 85 | 86 | @pytest.mark.parametrize("decode_times", [True, False]) 87 | def test_time_year_plus_frac(decode_times): 88 | ds = xr_ds_ex(decode_times, nyrs=nyrs, var_const=var_const) 89 | 90 | # call time_year_plus_frac to ensure that it doesn't raise an exception 91 | ty = time_year_plus_frac(ds, "time") 92 | 93 | 94 | @pytest.mark.parametrize( 95 | "x, ndigits, expected", 96 | [ 97 | (0.0, 1, 0.0), 98 | (0.0, 2, 0.0), 99 | (1.25, 1, 1.0), 100 | (1.25, 3, 1.25), 101 | (12.5, 1, 10.0), 102 | (12.5, 2, 12.0), # round to even 103 | (12.5, 3, 12.5), 104 | (12.5, 4, 12.5), 105 | (13.5, 1, 10.0), 106 | (13.5, 2, 14.0), # round to even 107 | (13.5, 3, 13.5), 108 | (13.52, 3, 13.5), 109 | (13.48, 3, 13.5), 110 | (13.5, 4, 13.5), 111 | ], 112 | ) 113 | def test_round_sig(x, ndigits, expected): 114 | assert round_sig(x, ndigits) == expected 115 | -------------------------------------------------------------------------------- /notebooks/diag_metadata.yaml: -------------------------------------------------------------------------------- 1 | - varname: POC_FLUX_100m 2 | spatial_op: integrate 3 | integral_unit_conv: (12 g)/(mol) # convert from mol to g C 4 | integral_display_units: Pg / yr 5 | display_units: mol / m^2 / yr 6 | map_vmin: 1.0e-2 7 | map_vmax: 10.0 8 | apply_log10: True 9 | 10 | - varname: CaCO3_FLUX_100m 11 | spatial_op: integrate 12 | integral_unit_conv: (12 g)/(mol) # convert from mol to g C 13 | integral_display_units: Pg / yr 14 | display_units: mol / m^2 / yr 15 | map_vmin: 1.0e-3 16 | map_vmax: 3.0 17 | apply_log10: True 18 | 19 | - varname: FG_CO2 20 | spatial_op: integrate 21 | integral_unit_conv: (12 g)/(mol) # convert from mol to g C 22 | integral_display_units: Pg / yr 23 | display_units: mol / m^2 / yr 24 | map_vmin: -10.0 25 | map_vmax: 10.0 26 | 27 | - varname: DpCO2 28 | spatial_op: average 29 | map_vmin: -75.0 30 | map_vmax: 75.0 31 | 32 | - varname: PH 33 | spatial_op: average 34 | map_vmin: 8.0 35 | map_vmax: 8.4 36 | 37 | - varname: spChl 38 | isel_dict: 39 | z_t_150m: 0 40 | spatial_op: average 41 | map_vmin: 1.0e-3 42 | map_vmax: 1.0e-1 43 | apply_log10: True 44 | 45 | - varname: photoC_sp_zint 46 | spatial_op: integrate 47 | integral_unit_conv: (12 g)/(mol) # convert from mol to g C 48 | integral_display_units: Pg / yr 49 | display_units: mol / m^2 / yr 50 | map_vmin: 1.0e-2 51 | map_vmax: 15.0 52 | apply_log10: True 53 | 54 | - varname: coccoChl 55 | isel_dict: 56 | z_t_150m: 0 57 | spatial_op: average 58 | map_vmin: 1.0e-3 59 | map_vmax: 1.0 60 | apply_log10: True 61 | 62 | - varname: photoC_cocco_zint 63 | spatial_op: integrate 64 | integral_unit_conv: (12 g)/(mol) # convert from mol to g C 65 | integral_display_units: Pg / yr 66 | display_units: mol / m^2 / yr 67 | map_vmin: 1.0e-2 68 | map_vmax: 15.0 69 | apply_log10: True 70 | 71 | - varname: diatChl 72 | isel_dict: 73 | z_t_150m: 0 74 | spatial_op: average 75 | map_vmin: 1.0e-3 76 | map_vmax: 10.0 77 | apply_log10: True 78 | 79 | - varname: photoC_diat_zint 80 | spatial_op: integrate 81 | integral_unit_conv: (12 g)/(mol) # convert from mol to g C 82 | integral_display_units: Pg / yr 83 | display_units: mol / m^2 / yr 84 | map_vmin: 1.0e-2 85 | map_vmax: 15.0 86 | apply_log10: True 87 | 88 | - varname: diazChl 89 | isel_dict: 90 | z_t_150m: 0 91 | spatial_op: average 92 | map_vmin: 1.0e-4 93 | map_vmax: 1.0e-2 94 | apply_log10: True 95 | 96 | - varname: photoC_diaz_zint 97 | spatial_op: integrate 98 | integral_unit_conv: (12 g)/(mol) # convert from mol to g C 99 | integral_display_units: Pg / yr 100 | display_units: mol / m^2 / yr 101 | map_vmin: 1.0e-2 102 | map_vmax: 1.0 103 | apply_log10: True 104 | 105 | - varname: NHx_SURFACE_EMIS 106 | spatial_op: integrate 107 | integral_unit_conv: (14 g)/(mol) # convert from mol to g N 108 | integral_display_units: Tg yr^-1 109 | display_units: mol / m^2 / yr 110 | map_vmin: 1.0e-5 111 | map_vmax: 0.01 112 | apply_log10: True 113 | 114 | - varname: NH4 115 | isel_dict: 116 | z_t: 0 117 | spatial_op: average 118 | map_vmin: 1.0e-3 119 | map_vmax: 3.0 120 | apply_log10: True 121 | 122 | - varname: O2_ZMIN 123 | spatial_op: average 124 | map_vmin: -5.0 125 | map_vmax: 50.0 126 | 127 | - varname: O2_ZMIN_DEPTH 128 | spatial_op: average 129 | display_units: m 130 | map_vmin: 0.0 131 | map_vmax: 1000.0 132 | 133 | - varname: O2 134 | isel_dict: 135 | z_t: 28 136 | spatial_op: average 137 | map_vmin: 1.0 138 | map_vmax: 300.0 139 | apply_log10: True 140 | 141 | - varname: PO4 142 | isel_dict: 143 | z_t: 0 144 | spatial_op: average 145 | map_vmin: 1.0e-2 146 | map_vmax: 2.2 147 | apply_log10: True 148 | 149 | - varname: PO4 150 | isel_dict: 151 | z_t: 28 152 | spatial_op: average 153 | map_vmin: 0.5 154 | map_vmax: 3.5 155 | 156 | - varname: NO3 157 | isel_dict: 158 | z_t: 0 159 | spatial_op: average 160 | map_vmin: 1.0e-2 161 | map_vmax: 35.0 162 | apply_log10: True 163 | 164 | - varname: NO3 165 | isel_dict: 166 | z_t: 28 167 | spatial_op: average 168 | map_vmin: 10.0 169 | map_vmax: 35.0 170 | 171 | - varname: SiO3 172 | isel_dict: 173 | z_t: 0 174 | spatial_op: average 175 | map_vmin: 1.0 176 | map_vmax: 75.0 177 | apply_log10: True 178 | 179 | - varname: SiO3 180 | isel_dict: 181 | z_t: 28 182 | spatial_op: average 183 | map_vmin: 0.0 184 | map_vmax: 100.0 185 | -------------------------------------------------------------------------------- /notebooks/compare_ts_and_hist_003.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import os\n", 10 | "\n", 11 | "import utils" 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": 2, 17 | "metadata": {}, 18 | "outputs": [ 19 | { 20 | "name": "stdout", 21 | "output_type": "stream", 22 | "text": [ 23 | "Checking year 0001...\n", 24 | "... checking stream pop.h.nyear1 ...\n", 25 | "Skipping stream pop.h.nyear1 for year 0001 because there are no history files\n", 26 | "... checking stream pop.h.nday1 ...\n", 27 | "Skipping stream pop.h.nday1 for year 0001 because there are no history files\n", 28 | "... checking stream pop.h ...\n", 29 | "Skipping stream pop.h for year 0001 because there are no history files\n", 30 | "... checking stream cice.h ...\n", 31 | "Skipping stream cice.h for year 0001 because there are no history files\n", 32 | "Could not find time series for all variables in year 0001\n", 33 | "----\n", 34 | "Checking year 0002...\n", 35 | "... checking stream pop.h.nyear1 ...\n", 36 | "Skipping stream pop.h.nyear1 for year 0002 because there are no history files\n", 37 | "... checking stream pop.h.nday1 ...\n", 38 | "Skipping stream pop.h.nday1 for year 0002 because there are no history files\n", 39 | "... checking stream pop.h ...\n", 40 | "Skipping stream pop.h for year 0002 because there are no history files\n", 41 | "... checking stream cice.h1 ...\n", 42 | "Skipping stream cice.h1 for year 0002 because there are no history files\n", 43 | "... checking stream cice.h ...\n", 44 | "Skipping stream cice.h for year 0002 because there are no history files\n", 45 | "Could not find time series for all variables in year 0002\n", 46 | "----\n", 47 | "Checking year 0003...\n", 48 | "... checking stream pop.h.nyear1 ...\n", 49 | "Skipping stream pop.h.nyear1 for year 0003 because there are no history files\n", 50 | "... checking stream pop.h.nday1 ...\n", 51 | "Skipping stream pop.h.nday1 for year 0003 because there are no history files\n", 52 | "... checking stream pop.h ...\n", 53 | "Skipping stream pop.h for year 0003 because there are no history files\n", 54 | "... checking stream cice.h1 ...\n", 55 | "Skipping stream cice.h1 for year 0003 because there are no history files\n", 56 | "... checking stream cice.h ...\n", 57 | "Skipping stream cice.h for year 0003 because there are no history files\n", 58 | "Could not find time series for all variables in year 0003\n", 59 | "----\n", 60 | "Checking year 0004...\n", 61 | "... checking stream pop.h.nyear1 ...\n", 62 | "Skipping stream pop.h.nyear1 for year 0004 because there are no history files\n", 63 | "... checking stream pop.h.nday1 ...\n", 64 | "Skipping stream pop.h.nday1 for year 0004 because there are no history files\n", 65 | "... checking stream pop.h ...\n", 66 | "Skipping stream pop.h for year 0004 because there are no history files\n", 67 | "... checking stream cice.h1 ...\n", 68 | "Skipping stream cice.h1 for year 0004 because there are no history files\n", 69 | "... checking stream cice.h ...\n", 70 | "Skipping stream cice.h for year 0004 because there are no history files\n", 71 | "Could not find time series for all variables in year 0004\n", 72 | "----\n", 73 | "Checking year 0005...\n", 74 | "... checking stream pop.h.nyear1 ...\n", 75 | "Could not find time series for year 0005\n", 76 | "CPU times: user 207 ms, sys: 86.5 ms, total: 294 ms\n", 77 | "Wall time: 1.39 s\n" 78 | ] 79 | } 80 | ], 81 | "source": [ 82 | "%%time\n", 83 | "\n", 84 | "casename = \"g.e22.G1850ECO_JRA_HR.TL319_t13.003\"\n", 85 | "\n", 86 | "# Directories to search for netCDF files\n", 87 | "caseroot = os.path.join(os.sep, \"glade\", \"work\", \"mlevy\", \"hi-res_BGC_JRA\", \"cases\")\n", 88 | "campaign_root = os.path.join(os.sep, \"glade\", \"campaign\", \"cesm\", \"development\", \"bgcwg\", \"projects\", \"hi-res_JRA\", \"cases\")\n", 89 | "output_roots = [os.path.join(campaign_root, casename, \"output\")]\n", 90 | "output_roots += utils.gen_output_roots_from_caseroot(os.path.join(caseroot, casename))\n", 91 | "\n", 92 | "utils.timeseries_and_history_comparison(casename, output_roots)" 93 | ] 94 | } 95 | ], 96 | "metadata": { 97 | "kernelspec": { 98 | "display_name": "Python [conda env:hires-marbl]", 99 | "language": "python", 100 | "name": "conda-env-hires-marbl-py" 101 | }, 102 | "language_info": { 103 | "codemirror_mode": { 104 | "name": "ipython", 105 | "version": 3 106 | }, 107 | "file_extension": ".py", 108 | "mimetype": "text/x-python", 109 | "name": "python", 110 | "nbconvert_exporter": "python", 111 | "pygments_lexer": "ipython3", 112 | "version": "3.7.8" 113 | } 114 | }, 115 | "nbformat": 4, 116 | "nbformat_minor": 4 117 | } 118 | -------------------------------------------------------------------------------- /notebooks/plot_suite_maps_0001_003.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "execution": {} 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "import os\n", 12 | "import yaml\n", 13 | "\n", 14 | "import utils\n", 15 | "\n", 16 | "%matplotlib inline" 17 | ] 18 | }, 19 | { 20 | "cell_type": "code", 21 | "execution_count": 2, 22 | "metadata": {}, 23 | "outputs": [], 24 | "source": [ 25 | "casename = \"g.e22.G1850ECO_JRA_HR.TL319_t13.003\"\n", 26 | "year = 1\n", 27 | "campaign_root = os.path.join(\n", 28 | " os.sep,\n", 29 | " \"glade\",\n", 30 | " \"campaign\",\n", 31 | " \"cesm\",\n", 32 | " \"development\",\n", 33 | " \"bgcwg\",\n", 34 | " \"projects\",\n", 35 | " \"hi-res_JRA\",\n", 36 | " \"cases\",\n", 37 | ")\n", 38 | "\n", 39 | "# Set up CaseClass object\n", 40 | "case = utils.CaseClass(\n", 41 | " casename, os.path.join(campaign_root, casename, \"output\")\n", 42 | ")" 43 | ] 44 | }, 45 | { 46 | "cell_type": "code", 47 | "execution_count": 3, 48 | "metadata": { 49 | "execution": {} 50 | }, 51 | "outputs": [], 52 | "source": [ 53 | "with open(\"diag_metadata.yaml\", mode=\"r\") as fptr:\n", 54 | " diag_metadata_list = yaml.safe_load(fptr)\n", 55 | "\n", 56 | "# varnames = utils.get_varnames_from_metadata_list(diag_metadata_list)" 57 | ] 58 | }, 59 | { 60 | "cell_type": "code", 61 | "execution_count": 4, 62 | "metadata": { 63 | "execution": {} 64 | }, 65 | "outputs": [], 66 | "source": [ 67 | "def summary_plots(ds, diag_metadata, save_pngs=False):\n", 68 | " varname = diag_metadata[\"varname\"]\n", 69 | " print(varname)\n", 70 | " da = ds[varname].isel(diag_metadata.get(\"isel_dict\"))\n", 71 | "\n", 72 | " utils.summary_plot_maps(\n", 73 | " ds,\n", 74 | " da,\n", 75 | " diag_metadata,\n", 76 | " save_pngs=save_pngs,\n", 77 | " savefig_kwargs={\"dpi\": 72}, # match default behavior of savefig\n", 78 | " )" 79 | ] 80 | }, 81 | { 82 | "cell_type": "code", 83 | "execution_count": 5, 84 | "metadata": { 85 | "execution": {} 86 | }, 87 | "outputs": [ 88 | { 89 | "name": "stdout", 90 | "output_type": "stream", 91 | "text": [ 92 | "Datasets contain a total of 12 time samples\n", 93 | "Last average written at 0002-01-01 00:00:00\n", 94 | "POC_FLUX_100m\n", 95 | "Datasets contain a total of 12 time samples\n", 96 | "Last average written at 0002-01-01 00:00:00\n", 97 | "CaCO3_FLUX_100m\n", 98 | "Datasets contain a total of 12 time samples\n", 99 | "Last average written at 0002-01-01 00:00:00\n", 100 | "FG_CO2\n", 101 | "Datasets contain a total of 12 time samples\n", 102 | "Last average written at 0002-01-01 00:00:00\n", 103 | "DpCO2\n", 104 | "Datasets contain a total of 12 time samples\n", 105 | "Last average written at 0002-01-01 00:00:00\n", 106 | "PH\n", 107 | "Datasets contain a total of 12 time samples\n", 108 | "Last average written at 0002-01-01 00:00:00\n", 109 | "spChl\n", 110 | "Datasets contain a total of 12 time samples\n", 111 | "Last average written at 0002-01-01 00:00:00\n", 112 | "photoC_sp_zint\n", 113 | "Datasets contain a total of 12 time samples\n", 114 | "Last average written at 0002-01-01 00:00:00\n", 115 | "coccoChl\n", 116 | "Datasets contain a total of 12 time samples\n", 117 | "Last average written at 0002-01-01 00:00:00\n", 118 | "photoC_cocco_zint\n", 119 | "Datasets contain a total of 12 time samples\n", 120 | "Last average written at 0002-01-01 00:00:00\n", 121 | "diatChl\n", 122 | "Datasets contain a total of 12 time samples\n", 123 | "Last average written at 0002-01-01 00:00:00\n", 124 | "photoC_diat_zint\n", 125 | "Datasets contain a total of 12 time samples\n", 126 | "Last average written at 0002-01-01 00:00:00\n", 127 | "diazChl\n", 128 | "Datasets contain a total of 12 time samples\n", 129 | "Last average written at 0002-01-01 00:00:00\n", 130 | "photoC_diaz_zint\n", 131 | "Datasets contain a total of 12 time samples\n", 132 | "Last average written at 0002-01-01 00:00:00\n", 133 | "NHx_SURFACE_EMIS\n", 134 | "Datasets contain a total of 12 time samples\n", 135 | "Last average written at 0002-01-01 00:00:00\n", 136 | "NH4\n", 137 | "Datasets contain a total of 12 time samples\n", 138 | "Last average written at 0002-01-01 00:00:00\n", 139 | "O2_ZMIN\n", 140 | "Datasets contain a total of 12 time samples\n", 141 | "Last average written at 0002-01-01 00:00:00\n", 142 | "O2_ZMIN_DEPTH\n", 143 | "Datasets contain a total of 12 time samples\n", 144 | "Last average written at 0002-01-01 00:00:00\n", 145 | "O2\n", 146 | "Datasets contain a total of 12 time samples\n", 147 | "Last average written at 0002-01-01 00:00:00\n", 148 | "PO4\n", 149 | "Datasets contain a total of 12 time samples\n", 150 | "Last average written at 0002-01-01 00:00:00\n", 151 | "PO4\n", 152 | "Datasets contain a total of 12 time samples\n", 153 | "Last average written at 0002-01-01 00:00:00\n", 154 | "NO3\n", 155 | "Datasets contain a total of 12 time samples\n", 156 | "Last average written at 0002-01-01 00:00:00\n", 157 | "NO3\n", 158 | "Datasets contain a total of 12 time samples\n", 159 | "Last average written at 0002-01-01 00:00:00\n", 160 | "SiO3\n", 161 | "Datasets contain a total of 12 time samples\n", 162 | "Last average written at 0002-01-01 00:00:00\n", 163 | "SiO3\n" 164 | ] 165 | } 166 | ], 167 | "source": [ 168 | "stream = \"pop.h\"\n", 169 | "# ds = case.gen_dataset(varnames, stream, start_year=year, end_year=year)\n", 170 | "for diag_metadata in diag_metadata_list:\n", 171 | " ds = case.gen_dataset(\n", 172 | " diag_metadata[\"varname\"], stream, start_year=year, end_year=year\n", 173 | " )\n", 174 | " summary_plots(ds, diag_metadata, save_pngs=True)" 175 | ] 176 | } 177 | ], 178 | "metadata": { 179 | "kernelspec": { 180 | "display_name": "Python [conda env:hires-marbl]", 181 | "language": "python", 182 | "name": "conda-env-hires-marbl-py" 183 | }, 184 | "language_info": { 185 | "codemirror_mode": { 186 | "name": "ipython", 187 | "version": 3 188 | }, 189 | "file_extension": ".py", 190 | "mimetype": "text/x-python", 191 | "name": "python", 192 | "nbconvert_exporter": "python", 193 | "pygments_lexer": "ipython3", 194 | "version": "3.7.8" 195 | } 196 | }, 197 | "nbformat": 4, 198 | "nbformat_minor": 4 199 | } 200 | -------------------------------------------------------------------------------- /notebooks/plot_suite_maps_0001_004.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "execution": {} 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "import os\n", 12 | "import yaml\n", 13 | "\n", 14 | "import utils\n", 15 | "\n", 16 | "%matplotlib inline" 17 | ] 18 | }, 19 | { 20 | "cell_type": "code", 21 | "execution_count": 2, 22 | "metadata": {}, 23 | "outputs": [], 24 | "source": [ 25 | "casename = \"g.e22.G1850ECO_JRA_HR.TL319_t13.004\"\n", 26 | "year = 1\n", 27 | "campaign_root = os.path.join(\n", 28 | " os.sep,\n", 29 | " \"glade\",\n", 30 | " \"campaign\",\n", 31 | " \"cesm\",\n", 32 | " \"development\",\n", 33 | " \"bgcwg\",\n", 34 | " \"projects\",\n", 35 | " \"hi-res_JRA\",\n", 36 | " \"cases\",\n", 37 | ")\n", 38 | "\n", 39 | "# Set up CaseClass object\n", 40 | "case = utils.CaseClass(\n", 41 | " casename, os.path.join(campaign_root, casename, \"output\")\n", 42 | ")" 43 | ] 44 | }, 45 | { 46 | "cell_type": "code", 47 | "execution_count": 3, 48 | "metadata": { 49 | "execution": {} 50 | }, 51 | "outputs": [], 52 | "source": [ 53 | "with open(\"diag_metadata.yaml\", mode=\"r\") as fptr:\n", 54 | " diag_metadata_list = yaml.safe_load(fptr)\n", 55 | "\n", 56 | "# varnames = utils.get_varnames_from_metadata_list(diag_metadata_list)" 57 | ] 58 | }, 59 | { 60 | "cell_type": "code", 61 | "execution_count": 4, 62 | "metadata": { 63 | "execution": {} 64 | }, 65 | "outputs": [], 66 | "source": [ 67 | "def summary_plots(ds, diag_metadata, save_pngs=False):\n", 68 | " varname = diag_metadata[\"varname\"]\n", 69 | " print(varname)\n", 70 | " da = ds[varname].isel(diag_metadata.get(\"isel_dict\"))\n", 71 | "\n", 72 | " utils.summary_plot_maps(\n", 73 | " ds,\n", 74 | " da,\n", 75 | " diag_metadata,\n", 76 | " save_pngs=save_pngs,\n", 77 | " savefig_kwargs={\"dpi\": 72}, # match default behavior of savefig\n", 78 | " )" 79 | ] 80 | }, 81 | { 82 | "cell_type": "code", 83 | "execution_count": 5, 84 | "metadata": { 85 | "execution": {} 86 | }, 87 | "outputs": [ 88 | { 89 | "name": "stdout", 90 | "output_type": "stream", 91 | "text": [ 92 | "Datasets contain a total of 12 time samples\n", 93 | "Last average written at 0002-01-01 00:00:00\n", 94 | "POC_FLUX_100m\n", 95 | "Datasets contain a total of 12 time samples\n", 96 | "Last average written at 0002-01-01 00:00:00\n", 97 | "CaCO3_FLUX_100m\n", 98 | "Datasets contain a total of 12 time samples\n", 99 | "Last average written at 0002-01-01 00:00:00\n", 100 | "FG_CO2\n", 101 | "Datasets contain a total of 12 time samples\n", 102 | "Last average written at 0002-01-01 00:00:00\n", 103 | "DpCO2\n", 104 | "Datasets contain a total of 12 time samples\n", 105 | "Last average written at 0002-01-01 00:00:00\n", 106 | "PH\n", 107 | "Datasets contain a total of 12 time samples\n", 108 | "Last average written at 0002-01-01 00:00:00\n", 109 | "spChl\n", 110 | "Datasets contain a total of 12 time samples\n", 111 | "Last average written at 0002-01-01 00:00:00\n", 112 | "photoC_sp_zint\n", 113 | "Datasets contain a total of 12 time samples\n", 114 | "Last average written at 0002-01-01 00:00:00\n", 115 | "coccoChl\n", 116 | "Datasets contain a total of 12 time samples\n", 117 | "Last average written at 0002-01-01 00:00:00\n", 118 | "photoC_cocco_zint\n", 119 | "Datasets contain a total of 12 time samples\n", 120 | "Last average written at 0002-01-01 00:00:00\n", 121 | "diatChl\n", 122 | "Datasets contain a total of 12 time samples\n", 123 | "Last average written at 0002-01-01 00:00:00\n", 124 | "photoC_diat_zint\n", 125 | "Datasets contain a total of 12 time samples\n", 126 | "Last average written at 0002-01-01 00:00:00\n", 127 | "diazChl\n", 128 | "Datasets contain a total of 12 time samples\n", 129 | "Last average written at 0002-01-01 00:00:00\n", 130 | "photoC_diaz_zint\n", 131 | "Datasets contain a total of 12 time samples\n", 132 | "Last average written at 0002-01-01 00:00:00\n", 133 | "NHx_SURFACE_EMIS\n", 134 | "Datasets contain a total of 12 time samples\n", 135 | "Last average written at 0002-01-01 00:00:00\n", 136 | "NH4\n", 137 | "Datasets contain a total of 12 time samples\n", 138 | "Last average written at 0002-01-01 00:00:00\n", 139 | "O2_ZMIN\n", 140 | "Datasets contain a total of 12 time samples\n", 141 | "Last average written at 0002-01-01 00:00:00\n", 142 | "O2_ZMIN_DEPTH\n", 143 | "Datasets contain a total of 12 time samples\n", 144 | "Last average written at 0002-01-01 00:00:00\n", 145 | "O2\n", 146 | "Datasets contain a total of 12 time samples\n", 147 | "Last average written at 0002-01-01 00:00:00\n", 148 | "PO4\n", 149 | "Datasets contain a total of 12 time samples\n", 150 | "Last average written at 0002-01-01 00:00:00\n", 151 | "PO4\n", 152 | "Datasets contain a total of 12 time samples\n", 153 | "Last average written at 0002-01-01 00:00:00\n", 154 | "NO3\n", 155 | "Datasets contain a total of 12 time samples\n", 156 | "Last average written at 0002-01-01 00:00:00\n", 157 | "NO3\n", 158 | "Datasets contain a total of 12 time samples\n", 159 | "Last average written at 0002-01-01 00:00:00\n", 160 | "SiO3\n", 161 | "Datasets contain a total of 12 time samples\n", 162 | "Last average written at 0002-01-01 00:00:00\n", 163 | "SiO3\n" 164 | ] 165 | } 166 | ], 167 | "source": [ 168 | "stream = \"pop.h\"\n", 169 | "# ds = case.gen_dataset(varnames, stream, start_year=year, end_year=year)\n", 170 | "for diag_metadata in diag_metadata_list:\n", 171 | " ds = case.gen_dataset(\n", 172 | " diag_metadata[\"varname\"], stream, start_year=year, end_year=year\n", 173 | " )\n", 174 | " summary_plots(ds, diag_metadata, save_pngs=True)" 175 | ] 176 | } 177 | ], 178 | "metadata": { 179 | "kernelspec": { 180 | "display_name": "Python [conda env:hires-marbl]", 181 | "language": "python", 182 | "name": "conda-env-hires-marbl-py" 183 | }, 184 | "language_info": { 185 | "codemirror_mode": { 186 | "name": "ipython", 187 | "version": 3 188 | }, 189 | "file_extension": ".py", 190 | "mimetype": "text/x-python", 191 | "name": "python", 192 | "nbconvert_exporter": "python", 193 | "pygments_lexer": "ipython3", 194 | "version": "3.7.8" 195 | } 196 | }, 197 | "nbformat": 4, 198 | "nbformat_minor": 4 199 | } 200 | -------------------------------------------------------------------------------- /notebooks/trend_maps.003.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import glob\n", 10 | "import os\n", 11 | "\n", 12 | "import dask.distributed\n", 13 | "import matplotlib.pyplot as plt\n", 14 | "import ncar_jobqueue\n", 15 | "import xarray as xr\n", 16 | "\n", 17 | "import utils\n", 18 | "from utils.utils import time_set_mid\n", 19 | "\n", 20 | "%matplotlib inline\n", 21 | "%load_ext autoreload\n", 22 | "%autoreload 2\n" 23 | ] 24 | }, 25 | { 26 | "cell_type": "code", 27 | "execution_count": 2, 28 | "metadata": {}, 29 | "outputs": [], 30 | "source": [ 31 | "casename = \"g.e22.G1850ECO_JRA_HR.TL319_t13.003\"\n", 32 | "campaign_root = os.path.join(\n", 33 | " os.sep,\n", 34 | " \"glade\",\n", 35 | " \"campaign\",\n", 36 | " \"cesm\",\n", 37 | " \"development\",\n", 38 | " \"bgcwg\",\n", 39 | " \"projects\",\n", 40 | " \"hi-res_JRA\",\n", 41 | " \"cases\",\n", 42 | ")\n", 43 | "\n", 44 | "# Set up CaseClass object\n", 45 | "case = utils.CaseClass(\n", 46 | " casename, os.path.join(campaign_root, casename, \"output\")\n", 47 | ")" 48 | ] 49 | }, 50 | { 51 | "cell_type": "code", 52 | "execution_count": 3, 53 | "metadata": {}, 54 | "outputs": [ 55 | { 56 | "name": "stdout", 57 | "output_type": "stream", 58 | "text": [ 59 | "dashboard_link=https://jupyterhub.hpc.ucar.edu/stable/user/mlevy/proxy/8787/status\n" 60 | ] 61 | } 62 | ], 63 | "source": [ 64 | "cluster = ncar_jobqueue.NCARCluster(\n", 65 | " cores=2, memory=\"64 GB\", processes=2, walltime=\"6:00:00\"\n", 66 | ")\n", 67 | "cluster.scale(n=8) # n = number of workers\n", 68 | "print(f\"dashboard_link={cluster.dashboard_link}\")\n", 69 | "client = dask.distributed.Client(cluster)" 70 | ] 71 | }, 72 | { 73 | "cell_type": "code", 74 | "execution_count": 4, 75 | "metadata": {}, 76 | "outputs": [ 77 | { 78 | "name": "stdout", 79 | "output_type": "stream", 80 | "text": [ 81 | "Datasets contain a total of 36 time samples\n", 82 | "Last average written at 0005-01-01 00:00:00\n" 83 | ] 84 | } 85 | ], 86 | "source": [ 87 | "stream = \"pop.h\"\n", 88 | "varnames = [\"PO4\", \"NO3\", \"SiO3\", \"O2\", \"DIC\", \"ALK\"]\n", 89 | "ds_4d = case.gen_dataset(\n", 90 | " varnames,\n", 91 | " stream,\n", 92 | " start_year=2,\n", 93 | " end_year=4,\n", 94 | ")\n", 95 | "\n", 96 | "ds_3d = ds_4d.isel(z_t=28).chunk({\"time\": 36, \"nlat\": 300, \"nlon\": 900})" 97 | ] 98 | }, 99 | { 100 | "cell_type": "code", 101 | "execution_count": 5, 102 | "metadata": {}, 103 | "outputs": [], 104 | "source": [ 105 | "za_dir = f\"/glade/campaign/cesm/development/bgcwg/projects/hi-res_JRA/cases/{casename}/output/ocn/proc/za\"\n", 106 | "ds_list = []\n", 107 | "for var in varnames:\n", 108 | " filename_pattern = f\"{za_dir}/za_{casename}.pop.h.{var}.??????-??????.nc\"\n", 109 | " za_filenames = glob.glob(filename_pattern)\n", 110 | " za_filenames.sort()\n", 111 | " ds_tmp = xr.open_mfdataset(\n", 112 | " za_filenames,\n", 113 | " data_vars=\"minimal\",\n", 114 | " compat=\"override\",\n", 115 | " coords=\"minimal\",\n", 116 | " ).chunk({\"basins\": 1, \"time\": 36, \"z_t\": 62, \"lat_t\": 2400})\n", 117 | " ds_list.append(\n", 118 | " time_set_mid(ds_tmp, \"time\").assign_coords(\n", 119 | " {\"basins\": [\"Global\", \"Pacific\", \"Indian\", \"Atlantic\"]}\n", 120 | " )\n", 121 | " )\n", 122 | "ds_za = xr.merge(ds_list, compat=\"override\", join=\"left\")\n", 123 | "ds_za.attrs = ds_3d.attrs" 124 | ] 125 | }, 126 | { 127 | "cell_type": "code", 128 | "execution_count": 6, 129 | "metadata": {}, 130 | "outputs": [], 131 | "source": [ 132 | "def trend_plots(varname, map_vminmax=None, za_vminmax=None, save_pngs=False):\n", 133 | " da = ds_3d[varname]\n", 134 | " utils.trend_plot(\n", 135 | " ds_3d,\n", 136 | " da,\n", 137 | " vmin=-map_vminmax,\n", 138 | " vmax=map_vminmax,\n", 139 | " save_pngs=save_pngs,\n", 140 | " isel_dict={\"z_t\": 0},\n", 141 | " savefig_kwargs={\"dpi\": 72}, # match default behavior of savefig\n", 142 | " )\n", 143 | "\n", 144 | " for basin_ind in range(ds_za.dims[\"basins\"]):\n", 145 | " da = ds_za[varname].isel(basins=basin_ind)\n", 146 | " utils.trend_plot(\n", 147 | " ds_za,\n", 148 | " da,\n", 149 | " vmin=-za_vminmax,\n", 150 | " vmax=za_vminmax,\n", 151 | " invert_yaxis=True,\n", 152 | " save_pngs=save_pngs,\n", 153 | " isel_dict={\"basins\": 0},\n", 154 | " savefig_kwargs={\"dpi\": 72}, # match default behavior of savefig\n", 155 | " )" 156 | ] 157 | }, 158 | { 159 | "cell_type": "code", 160 | "execution_count": 7, 161 | "metadata": {}, 162 | "outputs": [], 163 | "source": [ 164 | "trend_plots(\"PO4\", map_vminmax=0.1, za_vminmax=0.05, save_pngs=True)" 165 | ] 166 | }, 167 | { 168 | "cell_type": "code", 169 | "execution_count": 8, 170 | "metadata": {}, 171 | "outputs": [], 172 | "source": [ 173 | "trend_plots(\"NO3\", map_vminmax=1.0, za_vminmax=0.5, save_pngs=True)" 174 | ] 175 | }, 176 | { 177 | "cell_type": "code", 178 | "execution_count": 9, 179 | "metadata": {}, 180 | "outputs": [], 181 | "source": [ 182 | "trend_plots(\"SiO3\", map_vminmax=5.0, za_vminmax=2.0, save_pngs=True)" 183 | ] 184 | }, 185 | { 186 | "cell_type": "code", 187 | "execution_count": 10, 188 | "metadata": {}, 189 | "outputs": [], 190 | "source": [ 191 | "trend_plots(\"O2\", map_vminmax=5.0, za_vminmax=2.0, save_pngs=True)" 192 | ] 193 | }, 194 | { 195 | "cell_type": "code", 196 | "execution_count": 11, 197 | "metadata": {}, 198 | "outputs": [], 199 | "source": [ 200 | "trend_plots(\"DIC\", map_vminmax=10.0, za_vminmax=5.0, save_pngs=True)" 201 | ] 202 | }, 203 | { 204 | "cell_type": "code", 205 | "execution_count": 12, 206 | "metadata": {}, 207 | "outputs": [], 208 | "source": [ 209 | "trend_plots(\"ALK\", map_vminmax=10.0, za_vminmax=5.0, save_pngs=True)" 210 | ] 211 | }, 212 | { 213 | "cell_type": "code", 214 | "execution_count": 13, 215 | "metadata": {}, 216 | "outputs": [], 217 | "source": [ 218 | "client.close()\n", 219 | "cluster.close()" 220 | ] 221 | } 222 | ], 223 | "metadata": { 224 | "kernelspec": { 225 | "display_name": "Python [conda env:miniconda3-hires-marbl]", 226 | "language": "python", 227 | "name": "conda-env-miniconda3-hires-marbl-py" 228 | }, 229 | "language_info": { 230 | "codemirror_mode": { 231 | "name": "ipython", 232 | "version": 3 233 | }, 234 | "file_extension": ".py", 235 | "mimetype": "text/x-python", 236 | "name": "python", 237 | "nbconvert_exporter": "python", 238 | "pygments_lexer": "ipython3", 239 | "version": "3.7.8" 240 | } 241 | }, 242 | "nbformat": 4, 243 | "nbformat_minor": 4 244 | } 245 | -------------------------------------------------------------------------------- /notebooks/trend_maps.004.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import os\n", 10 | "import glob\n", 11 | "\n", 12 | "import dask.distributed\n", 13 | "import matplotlib.pyplot as plt\n", 14 | "import ncar_jobqueue\n", 15 | "import xarray as xr\n", 16 | "\n", 17 | "import utils\n", 18 | "from utils.utils import time_set_mid\n", 19 | "\n", 20 | "%matplotlib inline\n", 21 | "%load_ext autoreload\n", 22 | "%autoreload 2\n" 23 | ] 24 | }, 25 | { 26 | "cell_type": "code", 27 | "execution_count": 2, 28 | "metadata": {}, 29 | "outputs": [], 30 | "source": [ 31 | "casename = \"g.e22.G1850ECO_JRA_HR.TL319_t13.004\"\n", 32 | "campaign_root = os.path.join(\n", 33 | " os.sep,\n", 34 | " \"glade\",\n", 35 | " \"campaign\",\n", 36 | " \"cesm\",\n", 37 | " \"development\",\n", 38 | " \"bgcwg\",\n", 39 | " \"projects\",\n", 40 | " \"hi-res_JRA\",\n", 41 | " \"cases\",\n", 42 | ")\n", 43 | "\n", 44 | "# Set up CaseClass object\n", 45 | "case = utils.CaseClass(\n", 46 | " casename, os.path.join(campaign_root, casename, \"output\")\n", 47 | ")" 48 | ] 49 | }, 50 | { 51 | "cell_type": "code", 52 | "execution_count": 3, 53 | "metadata": {}, 54 | "outputs": [ 55 | { 56 | "name": "stdout", 57 | "output_type": "stream", 58 | "text": [ 59 | "dashboard_link=https://jupyterhub.hpc.ucar.edu/stable/user/mlevy/proxy/8787/status\n" 60 | ] 61 | } 62 | ], 63 | "source": [ 64 | "cluster = ncar_jobqueue.NCARCluster(\n", 65 | " cores=2, memory=\"64 GB\", processes=2, walltime=\"6:00:00\"\n", 66 | ")\n", 67 | "cluster.scale(n=8) # n = number of workers\n", 68 | "print(f\"dashboard_link={cluster.dashboard_link}\")\n", 69 | "client = dask.distributed.Client(cluster)" 70 | ] 71 | }, 72 | { 73 | "cell_type": "code", 74 | "execution_count": 4, 75 | "metadata": {}, 76 | "outputs": [ 77 | { 78 | "name": "stdout", 79 | "output_type": "stream", 80 | "text": [ 81 | "Datasets contain a total of 36 time samples\n", 82 | "Last average written at 0005-01-01 00:00:00\n" 83 | ] 84 | } 85 | ], 86 | "source": [ 87 | "stream = \"pop.h\"\n", 88 | "varnames = [\"PO4\", \"NO3\", \"SiO3\", \"O2\", \"DIC\", \"ALK\"]\n", 89 | "ds_4d = case.gen_dataset(\n", 90 | " varnames,\n", 91 | " stream,\n", 92 | " start_year=2,\n", 93 | " end_year=4,\n", 94 | ")\n", 95 | "\n", 96 | "ds_3d = ds_4d.isel(z_t=28).chunk({\"time\": 36, \"nlat\": 300, \"nlon\": 900})" 97 | ] 98 | }, 99 | { 100 | "cell_type": "code", 101 | "execution_count": 5, 102 | "metadata": {}, 103 | "outputs": [], 104 | "source": [ 105 | "za_dir = f\"/glade/campaign/cesm/development/bgcwg/projects/hi-res_JRA/cases/{casename}/output/ocn/proc/za\"\n", 106 | "ds_list = []\n", 107 | "for var in varnames:\n", 108 | " filename_pattern = f\"{za_dir}/za_{casename}.pop.h.{var}.??????-??????.nc\"\n", 109 | " za_filenames = glob.glob(filename_pattern)\n", 110 | " za_filenames.sort()\n", 111 | " ds_tmp = xr.open_mfdataset(\n", 112 | " za_filenames,\n", 113 | " data_vars=\"minimal\",\n", 114 | " compat=\"override\",\n", 115 | " coords=\"minimal\",\n", 116 | " ).chunk({\"basins\": 1, \"time\": 36, \"z_t\": 62, \"lat_t\": 2400})\n", 117 | " ds_list.append(\n", 118 | " time_set_mid(ds_tmp, \"time\").assign_coords(\n", 119 | " {\"basins\": [\"Global\", \"Pacific\", \"Indian\", \"Atlantic\"]}\n", 120 | " )\n", 121 | " )\n", 122 | "ds_za = xr.merge(ds_list, compat=\"override\", join=\"left\")\n", 123 | "ds_za.attrs = ds_3d.attrs" 124 | ] 125 | }, 126 | { 127 | "cell_type": "code", 128 | "execution_count": 6, 129 | "metadata": {}, 130 | "outputs": [], 131 | "source": [ 132 | "def trend_plots(varname, map_vminmax=None, za_vminmax=None, save_pngs=False):\n", 133 | " # da = ds_4d[varname][:, 28, :, :].chunk({\"time\": 36, \"nlat\": 300, \"nlon\": 900})\n", 134 | " da = ds_3d[varname]\n", 135 | " utils.trend_plot(\n", 136 | " ds_3d,\n", 137 | " da,\n", 138 | " vmin=-map_vminmax,\n", 139 | " vmax=map_vminmax,\n", 140 | " save_pngs=save_pngs,\n", 141 | " isel_dict={\"z_t\": 0},\n", 142 | " savefig_kwargs={\"dpi\": 72}, # match default behavior of savefig\n", 143 | " )\n", 144 | "\n", 145 | " for basin_ind in range(ds_za.dims[\"basins\"]):\n", 146 | " da = ds_za[varname].isel(basins=basin_ind)\n", 147 | " da = da\n", 148 | " utils.trend_plot(\n", 149 | " ds_za,\n", 150 | " da,\n", 151 | " vmin=-za_vminmax,\n", 152 | " vmax=za_vminmax,\n", 153 | " invert_yaxis=True,\n", 154 | " save_pngs=save_pngs,\n", 155 | " isel_dict={\"basins\": 0},\n", 156 | " savefig_kwargs={\"dpi\": 72}, # match default behavior of savefig\n", 157 | " )" 158 | ] 159 | }, 160 | { 161 | "cell_type": "code", 162 | "execution_count": 7, 163 | "metadata": {}, 164 | "outputs": [], 165 | "source": [ 166 | "trend_plots(\"PO4\", map_vminmax=0.1, za_vminmax=0.05, save_pngs=True)" 167 | ] 168 | }, 169 | { 170 | "cell_type": "code", 171 | "execution_count": 8, 172 | "metadata": {}, 173 | "outputs": [], 174 | "source": [ 175 | "trend_plots(\"NO3\", map_vminmax=1.0, za_vminmax=0.5, save_pngs=True)" 176 | ] 177 | }, 178 | { 179 | "cell_type": "code", 180 | "execution_count": 9, 181 | "metadata": {}, 182 | "outputs": [], 183 | "source": [ 184 | "trend_plots(\"SiO3\", map_vminmax=5.0, za_vminmax=2.0, save_pngs=True)" 185 | ] 186 | }, 187 | { 188 | "cell_type": "code", 189 | "execution_count": 10, 190 | "metadata": {}, 191 | "outputs": [], 192 | "source": [ 193 | "trend_plots(\"O2\", map_vminmax=5.0, za_vminmax=2.0, save_pngs=True)" 194 | ] 195 | }, 196 | { 197 | "cell_type": "code", 198 | "execution_count": 11, 199 | "metadata": {}, 200 | "outputs": [], 201 | "source": [ 202 | "trend_plots(\"DIC\", map_vminmax=10.0, za_vminmax=5.0, save_pngs=True)" 203 | ] 204 | }, 205 | { 206 | "cell_type": "code", 207 | "execution_count": 12, 208 | "metadata": {}, 209 | "outputs": [], 210 | "source": [ 211 | "trend_plots(\"ALK\", map_vminmax=10.0, za_vminmax=5.0, save_pngs=True)" 212 | ] 213 | }, 214 | { 215 | "cell_type": "code", 216 | "execution_count": 13, 217 | "metadata": {}, 218 | "outputs": [], 219 | "source": [ 220 | "client.close()\n", 221 | "cluster.close()" 222 | ] 223 | } 224 | ], 225 | "metadata": { 226 | "kernelspec": { 227 | "display_name": "Python [conda env:miniconda3-hires-marbl]", 228 | "language": "python", 229 | "name": "conda-env-miniconda3-hires-marbl-py" 230 | }, 231 | "language_info": { 232 | "codemirror_mode": { 233 | "name": "ipython", 234 | "version": 3 235 | }, 236 | "file_extension": ".py", 237 | "mimetype": "text/x-python", 238 | "name": "python", 239 | "nbconvert_exporter": "python", 240 | "pygments_lexer": "ipython3", 241 | "version": "3.7.8" 242 | } 243 | }, 244 | "nbformat": 4, 245 | "nbformat_minor": 4 246 | } 247 | -------------------------------------------------------------------------------- /notebooks/plot_suite_003.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import os\n", 10 | "import warnings\n", 11 | "\n", 12 | "import dask\n", 13 | "import ncar_jobqueue\n", 14 | "import yaml\n", 15 | "\n", 16 | "import utils\n", 17 | "\n", 18 | "%matplotlib inline" 19 | ] 20 | }, 21 | { 22 | "cell_type": "code", 23 | "execution_count": 2, 24 | "metadata": {}, 25 | "outputs": [], 26 | "source": [ 27 | "with open(\"diag_metadata.yaml\", mode=\"r\") as fptr:\n", 28 | " diag_metadata_list = yaml.safe_load(fptr)\n", 29 | "\n", 30 | "# varnames = utils.get_varnames_from_metadata_list(diag_metadata_list)" 31 | ] 32 | }, 33 | { 34 | "cell_type": "code", 35 | "execution_count": 3, 36 | "metadata": {}, 37 | "outputs": [], 38 | "source": [ 39 | "def summary_plots(ds, diag_metadata, save_pngs=False):\n", 40 | " varname = diag_metadata[\"varname\"]\n", 41 | " print(varname)\n", 42 | " da = ds[varname].isel(diag_metadata.get(\"isel_dict\"))\n", 43 | "\n", 44 | " utils.summary_plot_global_ts(\n", 45 | " ds,\n", 46 | " da,\n", 47 | " diag_metadata,\n", 48 | " time_coarsen_len=12,\n", 49 | " save_pngs=save_pngs,\n", 50 | " savefig_kwargs={\"dpi\": 72}, # match default behavior of savefig\n", 51 | " )\n", 52 | "\n", 53 | " utils.summary_plot_histogram(\n", 54 | " ds,\n", 55 | " da,\n", 56 | " diag_metadata,\n", 57 | " save_pngs=save_pngs,\n", 58 | " savefig_kwargs={\"dpi\": 72}, # match default behavior of savefig\n", 59 | " )" 60 | ] 61 | }, 62 | { 63 | "cell_type": "code", 64 | "execution_count": 4, 65 | "metadata": {}, 66 | "outputs": [ 67 | { 68 | "name": "stdout", 69 | "output_type": "stream", 70 | "text": [ 71 | "dashboard_link=https://jupyterhub.hpc.ucar.edu/stable/user/mlevy/proxy/8787/status\n" 72 | ] 73 | } 74 | ], 75 | "source": [ 76 | "cluster = ncar_jobqueue.NCARCluster(\n", 77 | " cores=2, memory=\"64 GB\", processes=2, walltime=\"6:00:00\"\n", 78 | ")\n", 79 | "cluster.scale(n=8) # n = number of workers\n", 80 | "print(f\"dashboard_link={cluster.dashboard_link}\")" 81 | ] 82 | }, 83 | { 84 | "cell_type": "code", 85 | "execution_count": 5, 86 | "metadata": {}, 87 | "outputs": [], 88 | "source": [ 89 | "casename = \"g.e22.G1850ECO_JRA_HR.TL319_t13.003\"\n", 90 | "campaign_root = os.path.join(\n", 91 | " os.sep,\n", 92 | " \"glade\",\n", 93 | " \"campaign\",\n", 94 | " \"cesm\",\n", 95 | " \"development\",\n", 96 | " \"bgcwg\",\n", 97 | " \"projects\",\n", 98 | " \"hi-res_JRA\",\n", 99 | " \"cases\",\n", 100 | ")\n", 101 | "\n", 102 | "# Set up CaseClass object\n", 103 | "case = utils.CaseClass(\n", 104 | " casename, os.path.join(campaign_root, casename, \"output\")\n", 105 | ")" 106 | ] 107 | }, 108 | { 109 | "cell_type": "code", 110 | "execution_count": 6, 111 | "metadata": {}, 112 | "outputs": [ 113 | { 114 | "name": "stdout", 115 | "output_type": "stream", 116 | "text": [ 117 | "Datasets contain a total of 48 time samples\n", 118 | "Last average written at 0005-01-01 00:00:00\n", 119 | "POC_FLUX_100m\n", 120 | "Datasets contain a total of 48 time samples\n", 121 | "Last average written at 0005-01-01 00:00:00\n", 122 | "CaCO3_FLUX_100m\n", 123 | "Datasets contain a total of 48 time samples\n", 124 | "Last average written at 0005-01-01 00:00:00\n", 125 | "FG_CO2\n", 126 | "Datasets contain a total of 48 time samples\n", 127 | "Last average written at 0005-01-01 00:00:00\n", 128 | "DpCO2\n", 129 | "Datasets contain a total of 48 time samples\n", 130 | "Last average written at 0005-01-01 00:00:00\n", 131 | "PH\n", 132 | "Datasets contain a total of 48 time samples\n", 133 | "Last average written at 0005-01-01 00:00:00\n", 134 | "spChl\n", 135 | "Datasets contain a total of 48 time samples\n", 136 | "Last average written at 0005-01-01 00:00:00\n", 137 | "photoC_sp_zint\n", 138 | "Datasets contain a total of 48 time samples\n", 139 | "Last average written at 0005-01-01 00:00:00\n", 140 | "coccoChl\n", 141 | "Datasets contain a total of 48 time samples\n", 142 | "Last average written at 0005-01-01 00:00:00\n", 143 | "photoC_cocco_zint\n", 144 | "Datasets contain a total of 48 time samples\n", 145 | "Last average written at 0005-01-01 00:00:00\n", 146 | "diatChl\n", 147 | "Datasets contain a total of 48 time samples\n", 148 | "Last average written at 0005-01-01 00:00:00\n", 149 | "photoC_diat_zint\n", 150 | "Datasets contain a total of 48 time samples\n", 151 | "Last average written at 0005-01-01 00:00:00\n", 152 | "diazChl\n", 153 | "Datasets contain a total of 48 time samples\n", 154 | "Last average written at 0005-01-01 00:00:00\n", 155 | "photoC_diaz_zint\n", 156 | "Datasets contain a total of 48 time samples\n", 157 | "Last average written at 0005-01-01 00:00:00\n", 158 | "NHx_SURFACE_EMIS\n", 159 | "Datasets contain a total of 48 time samples\n", 160 | "Last average written at 0005-01-01 00:00:00\n", 161 | "NH4\n", 162 | "Datasets contain a total of 48 time samples\n", 163 | "Last average written at 0005-01-01 00:00:00\n", 164 | "O2_ZMIN\n", 165 | "Datasets contain a total of 48 time samples\n", 166 | "Last average written at 0005-01-01 00:00:00\n", 167 | "O2_ZMIN_DEPTH\n", 168 | "Datasets contain a total of 48 time samples\n", 169 | "Last average written at 0005-01-01 00:00:00\n", 170 | "O2\n", 171 | "Datasets contain a total of 48 time samples\n", 172 | "Last average written at 0005-01-01 00:00:00\n", 173 | "PO4\n", 174 | "Datasets contain a total of 48 time samples\n", 175 | "Last average written at 0005-01-01 00:00:00\n", 176 | "PO4\n", 177 | "Datasets contain a total of 48 time samples\n", 178 | "Last average written at 0005-01-01 00:00:00\n", 179 | "NO3\n", 180 | "Datasets contain a total of 48 time samples\n", 181 | "Last average written at 0005-01-01 00:00:00\n", 182 | "NO3\n", 183 | "Datasets contain a total of 48 time samples\n", 184 | "Last average written at 0005-01-01 00:00:00\n", 185 | "SiO3\n", 186 | "Datasets contain a total of 48 time samples\n", 187 | "Last average written at 0005-01-01 00:00:00\n", 188 | "SiO3\n" 189 | ] 190 | } 191 | ], 192 | "source": [ 193 | "with dask.distributed.Client(cluster) as client:\n", 194 | " stream = \"pop.h\"\n", 195 | " for diag_metadata in diag_metadata_list:\n", 196 | " ds = case.gen_dataset(diag_metadata[\"varname\"], stream)\n", 197 | " summary_plots(ds, diag_metadata, save_pngs=True)" 198 | ] 199 | }, 200 | { 201 | "cell_type": "code", 202 | "execution_count": 7, 203 | "metadata": {}, 204 | "outputs": [], 205 | "source": [ 206 | "cluster.close()" 207 | ] 208 | } 209 | ], 210 | "metadata": { 211 | "kernelspec": { 212 | "display_name": "Python [conda env:miniconda3-hires-marbl]", 213 | "language": "python", 214 | "name": "conda-env-miniconda3-hires-marbl-py" 215 | }, 216 | "language_info": { 217 | "codemirror_mode": { 218 | "name": "ipython", 219 | "version": 3 220 | }, 221 | "file_extension": ".py", 222 | "mimetype": "text/x-python", 223 | "name": "python", 224 | "nbconvert_exporter": "python", 225 | "pygments_lexer": "ipython3", 226 | "version": "3.7.8" 227 | } 228 | }, 229 | "nbformat": 4, 230 | "nbformat_minor": 4 231 | } 232 | -------------------------------------------------------------------------------- /notebooks/plot_suite_004.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import os\n", 10 | "import warnings\n", 11 | "\n", 12 | "import dask\n", 13 | "import ncar_jobqueue\n", 14 | "import yaml\n", 15 | "\n", 16 | "import utils\n", 17 | "\n", 18 | "%matplotlib inline" 19 | ] 20 | }, 21 | { 22 | "cell_type": "code", 23 | "execution_count": 2, 24 | "metadata": {}, 25 | "outputs": [], 26 | "source": [ 27 | "with open(\"diag_metadata.yaml\", mode=\"r\") as fptr:\n", 28 | " diag_metadata_list = yaml.safe_load(fptr)\n", 29 | "\n", 30 | "# varnames = utils.get_varnames_from_metadata_list(diag_metadata_list)" 31 | ] 32 | }, 33 | { 34 | "cell_type": "code", 35 | "execution_count": 3, 36 | "metadata": {}, 37 | "outputs": [], 38 | "source": [ 39 | "def summary_plots(ds, diag_metadata, save_pngs=False):\n", 40 | " varname = diag_metadata[\"varname\"]\n", 41 | " print(varname)\n", 42 | " da = ds[varname].isel(diag_metadata.get(\"isel_dict\"))\n", 43 | "\n", 44 | " utils.summary_plot_global_ts(\n", 45 | " ds,\n", 46 | " da,\n", 47 | " diag_metadata,\n", 48 | " time_coarsen_len=12,\n", 49 | " save_pngs=save_pngs,\n", 50 | " savefig_kwargs={\"dpi\": 72}, # match default behavior of savefig\n", 51 | " )\n", 52 | "\n", 53 | " utils.summary_plot_histogram(\n", 54 | " ds,\n", 55 | " da,\n", 56 | " diag_metadata,\n", 57 | " save_pngs=save_pngs,\n", 58 | " savefig_kwargs={\"dpi\": 72}, # match default behavior of savefig\n", 59 | " )" 60 | ] 61 | }, 62 | { 63 | "cell_type": "code", 64 | "execution_count": 4, 65 | "metadata": {}, 66 | "outputs": [ 67 | { 68 | "name": "stdout", 69 | "output_type": "stream", 70 | "text": [ 71 | "dashboard_link=https://jupyterhub.hpc.ucar.edu/stable/user/mlevy/proxy/8787/status\n" 72 | ] 73 | } 74 | ], 75 | "source": [ 76 | "cluster = ncar_jobqueue.NCARCluster(\n", 77 | " cores=2, memory=\"64 GB\", processes=2, walltime=\"6:00:00\"\n", 78 | ")\n", 79 | "cluster.scale(n=8) # n = number of workers\n", 80 | "print(f\"dashboard_link={cluster.dashboard_link}\")" 81 | ] 82 | }, 83 | { 84 | "cell_type": "code", 85 | "execution_count": 5, 86 | "metadata": {}, 87 | "outputs": [], 88 | "source": [ 89 | "casename = \"g.e22.G1850ECO_JRA_HR.TL319_t13.004\"\n", 90 | "campaign_root = os.path.join(\n", 91 | " os.sep,\n", 92 | " \"glade\",\n", 93 | " \"campaign\",\n", 94 | " \"cesm\",\n", 95 | " \"development\",\n", 96 | " \"bgcwg\",\n", 97 | " \"projects\",\n", 98 | " \"hi-res_JRA\",\n", 99 | " \"cases\",\n", 100 | ")\n", 101 | "\n", 102 | "# Set up CaseClass object\n", 103 | "case = utils.CaseClass(\n", 104 | " casename, os.path.join(campaign_root, casename, \"output\")\n", 105 | ")" 106 | ] 107 | }, 108 | { 109 | "cell_type": "code", 110 | "execution_count": 6, 111 | "metadata": {}, 112 | "outputs": [ 113 | { 114 | "name": "stdout", 115 | "output_type": "stream", 116 | "text": [ 117 | "Datasets contain a total of 204 time samples\n", 118 | "Last average written at 0018-01-01 00:00:00\n", 119 | "POC_FLUX_100m\n", 120 | "Datasets contain a total of 204 time samples\n", 121 | "Last average written at 0018-01-01 00:00:00\n", 122 | "CaCO3_FLUX_100m\n", 123 | "Datasets contain a total of 204 time samples\n", 124 | "Last average written at 0018-01-01 00:00:00\n", 125 | "FG_CO2\n", 126 | "Datasets contain a total of 204 time samples\n", 127 | "Last average written at 0018-01-01 00:00:00\n", 128 | "DpCO2\n", 129 | "Datasets contain a total of 204 time samples\n", 130 | "Last average written at 0018-01-01 00:00:00\n", 131 | "PH\n", 132 | "Datasets contain a total of 204 time samples\n", 133 | "Last average written at 0018-01-01 00:00:00\n", 134 | "spChl\n", 135 | "Datasets contain a total of 204 time samples\n", 136 | "Last average written at 0018-01-01 00:00:00\n", 137 | "photoC_sp_zint\n", 138 | "Datasets contain a total of 204 time samples\n", 139 | "Last average written at 0018-01-01 00:00:00\n", 140 | "coccoChl\n", 141 | "Datasets contain a total of 204 time samples\n", 142 | "Last average written at 0018-01-01 00:00:00\n", 143 | "photoC_cocco_zint\n", 144 | "Datasets contain a total of 204 time samples\n", 145 | "Last average written at 0018-01-01 00:00:00\n", 146 | "diatChl\n", 147 | "Datasets contain a total of 204 time samples\n", 148 | "Last average written at 0018-01-01 00:00:00\n", 149 | "photoC_diat_zint\n", 150 | "Datasets contain a total of 204 time samples\n", 151 | "Last average written at 0018-01-01 00:00:00\n", 152 | "diazChl\n", 153 | "Datasets contain a total of 204 time samples\n", 154 | "Last average written at 0018-01-01 00:00:00\n", 155 | "photoC_diaz_zint\n", 156 | "Datasets contain a total of 204 time samples\n", 157 | "Last average written at 0018-01-01 00:00:00\n", 158 | "NHx_SURFACE_EMIS\n", 159 | "Datasets contain a total of 204 time samples\n", 160 | "Last average written at 0018-01-01 00:00:00\n", 161 | "NH4\n", 162 | "Datasets contain a total of 204 time samples\n", 163 | "Last average written at 0018-01-01 00:00:00\n", 164 | "O2_ZMIN\n", 165 | "Datasets contain a total of 204 time samples\n", 166 | "Last average written at 0018-01-01 00:00:00\n", 167 | "O2_ZMIN_DEPTH\n", 168 | "Datasets contain a total of 204 time samples\n", 169 | "Last average written at 0018-01-01 00:00:00\n", 170 | "O2\n", 171 | "Datasets contain a total of 204 time samples\n", 172 | "Last average written at 0018-01-01 00:00:00\n", 173 | "PO4\n", 174 | "Datasets contain a total of 204 time samples\n", 175 | "Last average written at 0018-01-01 00:00:00\n", 176 | "PO4\n", 177 | "Datasets contain a total of 204 time samples\n", 178 | "Last average written at 0018-01-01 00:00:00\n", 179 | "NO3\n", 180 | "Datasets contain a total of 204 time samples\n", 181 | "Last average written at 0018-01-01 00:00:00\n", 182 | "NO3\n", 183 | "Datasets contain a total of 204 time samples\n", 184 | "Last average written at 0018-01-01 00:00:00\n", 185 | "SiO3\n", 186 | "Datasets contain a total of 204 time samples\n", 187 | "Last average written at 0018-01-01 00:00:00\n", 188 | "SiO3\n" 189 | ] 190 | } 191 | ], 192 | "source": [ 193 | "with dask.distributed.Client(cluster) as client:\n", 194 | " stream = \"pop.h\"\n", 195 | " # ds = case.gen_dataset(varnames, stream)\n", 196 | " for diag_metadata in diag_metadata_list:\n", 197 | " ds = case.gen_dataset(diag_metadata[\"varname\"], stream, end_year=17)\n", 198 | " summary_plots(ds, diag_metadata, save_pngs=True)" 199 | ] 200 | }, 201 | { 202 | "cell_type": "code", 203 | "execution_count": 7, 204 | "metadata": {}, 205 | "outputs": [], 206 | "source": [ 207 | "cluster.close()" 208 | ] 209 | } 210 | ], 211 | "metadata": { 212 | "kernelspec": { 213 | "display_name": "Python [conda env:miniconda3-hires-marbl]", 214 | "language": "python", 215 | "name": "conda-env-miniconda3-hires-marbl-py" 216 | }, 217 | "language_info": { 218 | "codemirror_mode": { 219 | "name": "ipython", 220 | "version": 3 221 | }, 222 | "file_extension": ".py", 223 | "mimetype": "text/x-python", 224 | "name": "python", 225 | "nbconvert_exporter": "python", 226 | "pygments_lexer": "ipython3", 227 | "version": "3.7.8" 228 | } 229 | }, 230 | "nbformat": 4, 231 | "nbformat_minor": 4 232 | } 233 | -------------------------------------------------------------------------------- /notebooks/compare_ts_and_hist_004.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import os\n", 10 | "\n", 11 | "import utils" 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": 2, 17 | "metadata": {}, 18 | "outputs": [ 19 | { 20 | "name": "stdout", 21 | "output_type": "stream", 22 | "text": [ 23 | "Checking year 0001...\n", 24 | "... checking stream pop.h.nyear1 ...\n", 25 | "... checking stream pop.h.nday1 ...\n", 26 | "... checking stream pop.h ...\n", 27 | "... checking stream cice.h ...\n", 28 | "All variables available in time series for year 0001\n", 29 | "----\n", 30 | "Checking year 0002...\n", 31 | "... checking stream pop.h.nyear1 ...\n", 32 | "... checking stream pop.h.nday1 ...\n", 33 | "... checking stream pop.h ...\n", 34 | "... checking stream cice.h1 ...\n", 35 | "... checking stream cice.h ...\n", 36 | "All variables available in time series for year 0002\n", 37 | "----\n", 38 | "Checking year 0003...\n", 39 | "... checking stream pop.h.nyear1 ...\n", 40 | "... checking stream pop.h.nday1 ...\n", 41 | "... checking stream pop.h ...\n", 42 | "... checking stream cice.h1 ...\n", 43 | "... checking stream cice.h ...\n", 44 | "All variables available in time series for year 0003\n", 45 | "----\n", 46 | "Checking year 0004...\n", 47 | "... checking stream pop.h.nyear1 ...\n", 48 | "... checking stream pop.h.nday1 ...\n", 49 | "... checking stream pop.h ...\n", 50 | "... checking stream cice.h1 ...\n", 51 | "... checking stream cice.h ...\n", 52 | "All variables available in time series for year 0004\n", 53 | "----\n", 54 | "Checking year 0005...\n", 55 | "... checking stream pop.h.nyear1 ...\n", 56 | "... checking stream pop.h.nday1 ...\n", 57 | "... checking stream pop.h ...\n", 58 | "... checking stream cice.h1 ...\n", 59 | "... checking stream cice.h ...\n", 60 | "All variables available in time series for year 0005\n", 61 | "----\n", 62 | "Checking year 0006...\n", 63 | "... checking stream pop.h.nyear1 ...\n", 64 | "... checking stream pop.h.nday1 ...\n", 65 | "... checking stream pop.h ...\n", 66 | "... checking stream cice.h1 ...\n", 67 | "... checking stream cice.h ...\n", 68 | "All variables available in time series for year 0006\n", 69 | "----\n", 70 | "Checking year 0007...\n", 71 | "... checking stream pop.h.nyear1 ...\n", 72 | "... checking stream pop.h.nday1 ...\n", 73 | "... checking stream pop.h ...\n", 74 | "... checking stream cice.h1 ...\n", 75 | "... checking stream cice.h ...\n", 76 | "All variables available in time series for year 0007\n", 77 | "----\n", 78 | "Checking year 0008...\n", 79 | "... checking stream pop.h.nyear1 ...\n", 80 | "... checking stream pop.h.nday1 ...\n", 81 | "... checking stream pop.h ...\n", 82 | "... checking stream cice.h1 ...\n", 83 | "... checking stream cice.h ...\n", 84 | "All variables available in time series for year 0008\n", 85 | "----\n", 86 | "Checking year 0009...\n", 87 | "... checking stream pop.h.nyear1 ...\n", 88 | "... checking stream pop.h.nday1 ...\n", 89 | "... checking stream pop.h ...\n", 90 | "... checking stream cice.h1 ...\n", 91 | "... checking stream cice.h ...\n", 92 | "All variables available in time series for year 0009\n", 93 | "----\n", 94 | "Checking year 0010...\n", 95 | "... checking stream pop.h.nyear1 ...\n", 96 | "... checking stream pop.h.nday1 ...\n", 97 | "... checking stream pop.h ...\n", 98 | "... checking stream cice.h1 ...\n", 99 | "... checking stream cice.h ...\n", 100 | "All variables available in time series for year 0010\n", 101 | "----\n", 102 | "Checking year 0011...\n", 103 | "... checking stream pop.h.nyear1 ...\n", 104 | "... checking stream pop.h.nday1 ...\n", 105 | "... checking stream pop.h ...\n", 106 | "... checking stream cice.h1 ...\n", 107 | "... checking stream cice.h ...\n", 108 | "All variables available in time series for year 0011\n", 109 | "----\n", 110 | "Checking year 0012...\n", 111 | "... checking stream pop.h.nyear1 ...\n", 112 | "... checking stream pop.h.nday1 ...\n", 113 | "... checking stream pop.h ...\n", 114 | "... checking stream cice.h1 ...\n", 115 | "... checking stream cice.h ...\n", 116 | "All variables available in time series for year 0012\n", 117 | "----\n", 118 | "Checking year 0013...\n", 119 | "... checking stream pop.h.nyear1 ...\n", 120 | "... checking stream pop.h.nday1 ...\n", 121 | "... checking stream pop.h ...\n", 122 | "... checking stream cice.h1 ...\n", 123 | "... checking stream cice.h ...\n", 124 | "All variables available in time series for year 0013\n", 125 | "----\n", 126 | "Checking year 0014...\n", 127 | "... checking stream pop.h.nyear1 ...\n", 128 | "... checking stream pop.h.nday1 ...\n", 129 | "... checking stream pop.h ...\n", 130 | "... checking stream cice.h1 ...\n", 131 | "... checking stream cice.h ...\n", 132 | "All variables available in time series for year 0014\n", 133 | "----\n", 134 | "Checking year 0015...\n", 135 | "... checking stream pop.h.nyear1 ...\n", 136 | "... checking stream pop.h.nday1 ...\n", 137 | "... checking stream pop.h ...\n", 138 | "... checking stream cice.h1 ...\n", 139 | "... checking stream cice.h ...\n", 140 | "All variables available in time series for year 0015\n", 141 | "----\n", 142 | "Checking year 0016...\n", 143 | "... checking stream pop.h.nyear1 ...\n", 144 | "... checking stream pop.h.nday1 ...\n", 145 | "... checking stream pop.h ...\n", 146 | "... checking stream cice.h1 ...\n", 147 | "... checking stream cice.h ...\n", 148 | "All variables available in time series for year 0016\n", 149 | "----\n", 150 | "Checking year 0017...\n", 151 | "... checking stream pop.h.nyear1 ...\n", 152 | "... checking stream pop.h.nday1 ...\n", 153 | "... checking stream pop.h ...\n", 154 | "... checking stream cice.h1 ...\n", 155 | "... checking stream cice.h ...\n", 156 | "All variables available in time series for year 0017\n", 157 | "----\n", 158 | "Checking year 0018...\n", 159 | "... checking stream pop.h.nyear1 ...\n", 160 | "Could not find time series for year 0018\n", 161 | "CPU times: user 4min 46s, sys: 3min 16s, total: 8min 3s\n", 162 | "Wall time: 11min 3s\n" 163 | ] 164 | } 165 | ], 166 | "source": [ 167 | "%%time\n", 168 | "\n", 169 | "casename = \"g.e22.G1850ECO_JRA_HR.TL319_t13.004\"\n", 170 | "\n", 171 | "# Directories to search for netCDF files\n", 172 | "caseroot = os.path.join(os.sep, \"glade\", \"work\", \"mlevy\", \"hi-res_BGC_JRA\", \"cases\")\n", 173 | "campaign_root = os.path.join(os.sep, \"glade\", \"campaign\", \"cesm\", \"development\", \"bgcwg\", \"projects\", \"hi-res_JRA\", \"cases\")\n", 174 | "output_roots = [os.path.join(campaign_root, casename, \"output\")]\n", 175 | "output_roots += utils.gen_output_roots_from_caseroot(os.path.join(caseroot, casename))\n", 176 | "\n", 177 | "utils.timeseries_and_history_comparison(casename, output_roots)" 178 | ] 179 | } 180 | ], 181 | "metadata": { 182 | "kernelspec": { 183 | "display_name": "Python [conda env:hires-marbl]", 184 | "language": "python", 185 | "name": "conda-env-hires-marbl-py" 186 | }, 187 | "language_info": { 188 | "codemirror_mode": { 189 | "name": "ipython", 190 | "version": 3 191 | }, 192 | "file_extension": ".py", 193 | "mimetype": "text/x-python", 194 | "name": "python", 195 | "nbconvert_exporter": "python", 196 | "pygments_lexer": "ipython3", 197 | "version": "3.7.8" 198 | } 199 | }, 200 | "nbformat": 4, 201 | "nbformat_minor": 4 202 | } 203 | -------------------------------------------------------------------------------- /notebooks/plot_suite_1deg.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import os\n", 10 | "import warnings\n", 11 | "\n", 12 | "import dask\n", 13 | "import ncar_jobqueue\n", 14 | "import yaml\n", 15 | "\n", 16 | "import utils\n", 17 | "\n", 18 | "%matplotlib inline" 19 | ] 20 | }, 21 | { 22 | "cell_type": "code", 23 | "execution_count": 2, 24 | "metadata": {}, 25 | "outputs": [], 26 | "source": [ 27 | "with open(\"diag_metadata.yaml\", mode=\"r\") as fptr:\n", 28 | " diag_metadata_list = yaml.safe_load(fptr)\n", 29 | "\n", 30 | "# varnames = utils.get_varnames_from_metadata_list(diag_metadata_list)" 31 | ] 32 | }, 33 | { 34 | "cell_type": "code", 35 | "execution_count": 3, 36 | "metadata": {}, 37 | "outputs": [], 38 | "source": [ 39 | "def summary_plots(ds, diag_metadata, save_pngs=False):\n", 40 | " varname = diag_metadata[\"varname\"]\n", 41 | " print(varname)\n", 42 | " da = ds[varname].isel(diag_metadata.get(\"isel_dict\"))\n", 43 | "\n", 44 | " utils.summary_plot_global_ts(\n", 45 | " ds,\n", 46 | " da,\n", 47 | " diag_metadata,\n", 48 | " time_coarsen_len=12,\n", 49 | " save_pngs=save_pngs,\n", 50 | " savefig_kwargs={\"dpi\": 72}, # match default behavior of savefig\n", 51 | " )\n", 52 | "\n", 53 | " utils.summary_plot_histogram(\n", 54 | " ds,\n", 55 | " da,\n", 56 | " diag_metadata,\n", 57 | " save_pngs=save_pngs,\n", 58 | " savefig_kwargs={\"dpi\": 72}, # match default behavior of savefig\n", 59 | " )" 60 | ] 61 | }, 62 | { 63 | "cell_type": "code", 64 | "execution_count": 4, 65 | "metadata": {}, 66 | "outputs": [ 67 | { 68 | "name": "stdout", 69 | "output_type": "stream", 70 | "text": [ 71 | "dashboard_link=https://jupyterhub.hpc.ucar.edu/stable/user/mlevy/proxy/36124/status\n" 72 | ] 73 | }, 74 | { 75 | "name": "stderr", 76 | "output_type": "stream", 77 | "text": [ 78 | "/glade/work/mlevy/miniconda3/envs/hires-marbl/lib/python3.7/site-packages/distributed/node.py:155: UserWarning: Port 8787 is already in use.\n", 79 | "Perhaps you already have a cluster running?\n", 80 | "Hosting the HTTP server on port 36124 instead\n", 81 | " http_address[\"port\"], self.http_server.port\n" 82 | ] 83 | } 84 | ], 85 | "source": [ 86 | "cluster = ncar_jobqueue.NCARCluster(\n", 87 | " cores=2, memory=\"64 GB\", processes=2, walltime=\"6:00:00\"\n", 88 | ")\n", 89 | "cluster.scale(n=8) # n = number of workers\n", 90 | "print(f\"dashboard_link={cluster.dashboard_link}\")" 91 | ] 92 | }, 93 | { 94 | "cell_type": "code", 95 | "execution_count": 5, 96 | "metadata": {}, 97 | "outputs": [], 98 | "source": [ 99 | "casename = \"g.e22b05.G1850ECOIAF_JRA.TL319_g17.cocco.001\"\n", 100 | "year = 95\n", 101 | "campaign_root = os.path.join(\n", 102 | " os.path.sep,\n", 103 | " \"glade\",\n", 104 | " \"campaign\",\n", 105 | " \"cesm\",\n", 106 | " \"development\",\n", 107 | " \"bgcwg\",\n", 108 | " \"projects\",\n", 109 | " \"1deg_cocco_JRA\",\n", 110 | " \"cases\",\n", 111 | ")\n", 112 | "\n", 113 | "# Set up CaseClass object\n", 114 | "case = utils.CaseClass(casename, os.path.join(campaign_root, casename))" 115 | ] 116 | }, 117 | { 118 | "cell_type": "code", 119 | "execution_count": 6, 120 | "metadata": {}, 121 | "outputs": [ 122 | { 123 | "name": "stdout", 124 | "output_type": "stream", 125 | "text": [ 126 | "Datasets contain a total of 120 time samples\n", 127 | "Last average written at 0105-01-01 00:00:00\n", 128 | "POC_FLUX_100m\n", 129 | "Datasets contain a total of 120 time samples\n", 130 | "Last average written at 0105-01-01 00:00:00\n", 131 | "CaCO3_FLUX_100m\n", 132 | "Datasets contain a total of 120 time samples\n", 133 | "Last average written at 0105-01-01 00:00:00\n", 134 | "FG_CO2\n", 135 | "Datasets contain a total of 120 time samples\n", 136 | "Last average written at 0105-01-01 00:00:00\n", 137 | "DpCO2\n", 138 | "Datasets contain a total of 120 time samples\n", 139 | "Last average written at 0105-01-01 00:00:00\n", 140 | "PH\n", 141 | "Datasets contain a total of 120 time samples\n", 142 | "Last average written at 0105-01-01 00:00:00\n", 143 | "spChl\n", 144 | "Datasets contain a total of 120 time samples\n", 145 | "Last average written at 0105-01-01 00:00:00\n", 146 | "photoC_sp_zint\n", 147 | "Datasets contain a total of 120 time samples\n", 148 | "Last average written at 0105-01-01 00:00:00\n", 149 | "coccoChl\n", 150 | "Datasets contain a total of 120 time samples\n", 151 | "Last average written at 0105-01-01 00:00:00\n", 152 | "photoC_cocco_zint\n", 153 | "Datasets contain a total of 120 time samples\n", 154 | "Last average written at 0105-01-01 00:00:00\n", 155 | "diatChl\n", 156 | "Datasets contain a total of 120 time samples\n", 157 | "Last average written at 0105-01-01 00:00:00\n", 158 | "photoC_diat_zint\n", 159 | "Datasets contain a total of 120 time samples\n", 160 | "Last average written at 0105-01-01 00:00:00\n", 161 | "diazChl\n", 162 | "Datasets contain a total of 120 time samples\n", 163 | "Last average written at 0105-01-01 00:00:00\n", 164 | "photoC_diaz_zint\n", 165 | "Datasets contain a total of 120 time samples\n", 166 | "Last average written at 0105-01-01 00:00:00\n", 167 | "NHx_SURFACE_EMIS\n", 168 | "Datasets contain a total of 120 time samples\n", 169 | "Last average written at 0105-01-01 00:00:00\n", 170 | "NH4\n", 171 | "Datasets contain a total of 120 time samples\n", 172 | "Last average written at 0105-01-01 00:00:00\n", 173 | "O2_ZMIN\n", 174 | "Datasets contain a total of 120 time samples\n", 175 | "Last average written at 0105-01-01 00:00:00\n", 176 | "O2_ZMIN_DEPTH\n", 177 | "Datasets contain a total of 120 time samples\n", 178 | "Last average written at 0105-01-01 00:00:00\n", 179 | "O2\n", 180 | "Datasets contain a total of 120 time samples\n", 181 | "Last average written at 0105-01-01 00:00:00\n", 182 | "PO4\n", 183 | "Datasets contain a total of 120 time samples\n", 184 | "Last average written at 0105-01-01 00:00:00\n", 185 | "PO4\n", 186 | "Datasets contain a total of 120 time samples\n", 187 | "Last average written at 0105-01-01 00:00:00\n", 188 | "NO3\n", 189 | "Datasets contain a total of 120 time samples\n", 190 | "Last average written at 0105-01-01 00:00:00\n", 191 | "NO3\n", 192 | "Datasets contain a total of 120 time samples\n", 193 | "Last average written at 0105-01-01 00:00:00\n", 194 | "SiO3\n", 195 | "Datasets contain a total of 120 time samples\n", 196 | "Last average written at 0105-01-01 00:00:00\n", 197 | "SiO3\n" 198 | ] 199 | } 200 | ], 201 | "source": [ 202 | "with dask.distributed.Client(cluster) as client:\n", 203 | " stream = \"pop.h\"\n", 204 | " # ds = case.gen_dataset(varnames, stream)\n", 205 | " for diag_metadata in diag_metadata_list:\n", 206 | " ds = case.gen_dataset(\n", 207 | " diag_metadata[\"varname\"], stream, start_year=95, end_year=104\n", 208 | " )\n", 209 | " summary_plots(ds, diag_metadata, save_pngs=True)" 210 | ] 211 | }, 212 | { 213 | "cell_type": "code", 214 | "execution_count": 7, 215 | "metadata": {}, 216 | "outputs": [], 217 | "source": [ 218 | "cluster.close()" 219 | ] 220 | } 221 | ], 222 | "metadata": { 223 | "kernelspec": { 224 | "display_name": "Python [conda env:miniconda3-hires-marbl]", 225 | "language": "python", 226 | "name": "conda-env-miniconda3-hires-marbl-py" 227 | }, 228 | "language_info": { 229 | "codemirror_mode": { 230 | "name": "ipython", 231 | "version": 3 232 | }, 233 | "file_extension": ".py", 234 | "mimetype": "text/x-python", 235 | "name": "python", 236 | "nbconvert_exporter": "python", 237 | "pygments_lexer": "ipython3", 238 | "version": "3.7.8" 239 | } 240 | }, 241 | "nbformat": 4, 242 | "nbformat_minor": 4 243 | } 244 | -------------------------------------------------------------------------------- /notebooks/utils/utils.py: -------------------------------------------------------------------------------- 1 | """utility functions""" 2 | 3 | import math 4 | 5 | import cftime 6 | import numpy as np 7 | import xarray as xr 8 | import pathlib 9 | import pandas as pd 10 | import json 11 | 12 | from .compare_ts_and_hist import compare_ts_and_hist 13 | from .cime import cime_xmlquery 14 | 15 | ################################################################################ 16 | 17 | 18 | def repl_coord(coordname, ds1, ds2): 19 | """ 20 | Return copy of d2 with coordinate coordname replaced, using coordname from ds1. 21 | Drop ds2.coordname.attrs['bounds'] in result, if ds2.coordname has bounds attribute. 22 | Add ds1.coordname.attrs['bounds'] to result, if ds1.coordname has bounds attribute. 23 | Except for coordname, the returned Dataset is a non-deep copy of ds2. 24 | """ 25 | if "bounds" in ds2[coordname].attrs: 26 | tb_name = ds2[coordname].attrs["bounds"] 27 | ds_out = ds2.drop(tb_name).assign_coords({coordname: ds1[coordname]}) 28 | else: 29 | ds_out = ds2.assign_coords({coordname: ds1[coordname]}) 30 | if "bounds" in ds1[coordname].attrs: 31 | tb_name = ds1[coordname].attrs["bounds"] 32 | ds_out = xr.merge([ds_out, ds1[tb_name]]) 33 | return ds_out 34 | 35 | 36 | ################################################################################ 37 | 38 | 39 | def time_set_mid(ds, time_name, deep=False): 40 | """ 41 | Return copy of ds with values of ds[time_name] replaced with midpoints of 42 | ds[time_name].attrs['bounds'], if bounds attribute exists. 43 | Except for time_name, the returned Dataset is a copy of ds2. 44 | The copy is deep or not depending on the argument deep. 45 | """ 46 | 47 | ds_out = ds.copy(deep=deep) 48 | 49 | if "bounds" not in ds[time_name].attrs: 50 | return ds_out 51 | 52 | tb_name = ds[time_name].attrs["bounds"] 53 | tb = ds[tb_name] 54 | bounds_dim = next(dim for dim in tb.dims if dim != time_name) 55 | 56 | # Use da = da.copy(data=...), in order to preserve attributes and encoding. 57 | 58 | # If tb is an array of datetime objects then encode time before averaging. 59 | # Do this because computing the mean on datetime objects with xarray fails 60 | # if the time span is 293 or more years. 61 | # https://github.com/klindsay28/CESM2_coup_carb_cycle_JAMES/issues/7 62 | if tb.dtype == np.dtype("O"): 63 | units = "days since 0001-01-01" 64 | calendar = "noleap" 65 | tb_vals = cftime.date2num(ds[tb_name].values, units=units, calendar=calendar) 66 | tb_mid_decode = cftime.num2date( 67 | tb_vals.mean(axis=1), units=units, calendar=calendar 68 | ) 69 | ds_out[time_name] = ds[time_name].copy(data=tb_mid_decode) 70 | else: 71 | ds_out[time_name] = ds[time_name].copy(data=tb.mean(bounds_dim)) 72 | 73 | return ds_out 74 | 75 | 76 | ################################################################################ 77 | 78 | 79 | def time_year_plus_frac(ds, time_name): 80 | """return time variable, as numpy array of year plus fraction of year values""" 81 | 82 | # this is straightforward if time has units='days since 0000-01-01' and calendar='noleap' 83 | # so convert specification of time to that representation 84 | 85 | # get time values as an np.ndarray of cftime objects 86 | if np.dtype(ds[time_name]) == np.dtype("O"): 87 | tvals_cftime = ds[time_name].values 88 | else: 89 | tvals_cftime = cftime.num2date( 90 | ds[time_name].values, 91 | ds[time_name].attrs["units"], 92 | ds[time_name].attrs["calendar"], 93 | ) 94 | 95 | # convert cftime objects to representation mentioned above 96 | tvals_days = cftime.date2num( 97 | tvals_cftime, "days since 0000-01-01", calendar="noleap" 98 | ) 99 | 100 | return tvals_days / 365.0 101 | 102 | 103 | ################################################################################ 104 | 105 | 106 | def round_sig(x, ndigits): 107 | """round x to ndigits precision""" 108 | if x == 0: 109 | return x 110 | ndigits_offset = math.floor(math.log10(abs(x))) 111 | return round(x, ndigits - 1 - ndigits_offset) 112 | 113 | 114 | ################################################################################ 115 | 116 | 117 | def get_varnames_from_metadata_list(diag_metadata_list): 118 | varnames = [] 119 | for diag_metadata in diag_metadata_list: 120 | if diag_metadata["varname"] not in varnames: 121 | varnames.append(diag_metadata["varname"]) 122 | return varnames 123 | 124 | 125 | ################################################################################ 126 | 127 | 128 | def gen_output_roots_from_caseroot(caseroot): 129 | if type(caseroot) == str: 130 | caseroot = [caseroot] 131 | if type(caseroot) != list: 132 | raise TypeError("caseroot must be a str or list, {caseroot} is not acceptable") 133 | 134 | output_roots = [] 135 | for single_root in caseroot: 136 | vars_to_check = ["RUNDIR"] 137 | if cime_xmlquery(single_root, "DOUT_S") == "TRUE": 138 | vars_to_check.append("DOUT_S_ROOT") 139 | for xml_var_to_query in vars_to_check: 140 | output_roots.append(cime_xmlquery(single_root, xml_var_to_query)) 141 | 142 | return output_roots 143 | 144 | 145 | ################################################################################ 146 | 147 | 148 | def timeseries_and_history_comparison(casename, output_roots): 149 | for year in range(1, 62): 150 | has_ts = True 151 | found_all = True 152 | print(f"Checking year {year:04}...") 153 | for stream in ["pop.h.nyear1", "pop.h.nday1", "pop.h", "cice.h1", "cice.h"]: 154 | has_hist = True 155 | # There is no cice.h1 time series for 0001 so skip check 156 | if stream == "cice.h1" and year == 1: 157 | continue 158 | # Run test 159 | print(f"... checking stream {stream} ...") 160 | comp_test = compare_ts_and_hist(casename, output_roots, stream, year) 161 | # Check ends when there are no history files for comparison 162 | if comp_test == "no time series": 163 | has_ts = False 164 | break 165 | 166 | # Skip years when there are no history files 167 | # (Assume those years were already checked prior to deleting history files) 168 | if comp_test == "no history": 169 | print( 170 | f"Skipping stream {stream} for year {year:04} because there are no history files" 171 | ) 172 | has_hist = False 173 | continue 174 | 175 | found_all = found_all and (comp_test == "same") 176 | 177 | if not has_ts: 178 | print(f"Could not find time series for year {year:04}") 179 | break 180 | if has_hist and found_all: 181 | print(f"All variables available in time series for year {year:04}") 182 | else: 183 | print(f"Could not find time series for all variables in year {year:04}") 184 | print("----") 185 | 186 | 187 | ################################################################################ 188 | 189 | 190 | def dict_copy_vals(src, dst, keys, abort_on_mismatch=True): 191 | for key in keys if type(keys) == list else [keys]: 192 | if key in src: 193 | if key in dst and abort_on_mismatch: 194 | if dst[key] != src[key]: 195 | raise ValueError( 196 | f"{key} exists in dst and src and dst values mismatch" 197 | ) 198 | else: 199 | dst[key] = src[key] 200 | 201 | 202 | ################################################################################ 203 | 204 | 205 | def print_key_metadata(ds, msg=None): 206 | print(64 * "*") 207 | if msg is not None: 208 | print(msg) 209 | print(64 * "*") 210 | for attr_name in ["chunks", "attrs", "encoding"]: 211 | print("ds." + attr_name) 212 | print(getattr(ds, attr_name)) 213 | print(32 * "*") 214 | for attr_name in ["chunks", "attrs", "encoding"]: 215 | print("ds['time']." + attr_name) 216 | print(getattr(ds["time"], attr_name)) 217 | print(32 * "*") 218 | 219 | 220 | ################################################################################ 221 | 222 | 223 | def generate_plot_catalog( 224 | root_dir, image_dir_name="images", extension=".json", use_full_path=True 225 | ): 226 | """ 227 | Generate a single dataframe from plot attributes saved in json files. 228 | Parameters 229 | ---------- 230 | root_dir : str, pathlib.Path 231 | The root directory 232 | extension : str, default `.json.` 233 | file extension to look for. 234 | 235 | Returns 236 | ------- 237 | df : pd.DataFrame 238 | """ 239 | root_dir = pathlib.Path(root_dir) 240 | image_dir = root_dir / image_dir_name 241 | image_dir.exists() 242 | files = sorted(image_dir.rglob(f"**/*{extension}")) 243 | data = [] 244 | if files: 245 | for file in files: 246 | metadata = json.load(file.open()) 247 | if use_full_path: 248 | metadata["filepath"] = ( 249 | (root_dir / metadata["filepath"]).absolute().as_posix() 250 | ) 251 | data.append(metadata) 252 | return pd.DataFrame(data) 253 | else: 254 | print(f"Found 0 files with extension={extension} in {image_dir}.") 255 | return pd.DataFrame() 256 | -------------------------------------------------------------------------------- /notebooks/utils/PlotTypeClass.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import pathlib 4 | 5 | 6 | class _PlotTypeBaseClass(object): 7 | def __init__(self, *args, **kwargs): 8 | raise NotImplementedError("This must be implemented in child class") 9 | 10 | def get_filepaths(self, *args, **kwargs): 11 | raise NotImplementedError("This must be implemented in child class") 12 | 13 | def get_isel_str(self, da, isel_dict): 14 | """ 15 | If diag metadata passes isel_dict option, we need that reflected in file name. 16 | 17 | This subroutine produces an additional string of the form var1_val1.var2_val2..., 18 | where isel_dict is equivalent to da.sel(var1=val1,var2=val2). Note the switch 19 | from .isel to .sel -- val1 should be da.var1[isel_dict[var1]] (the true value, 20 | rather than the index) 21 | """ 22 | # Return empty string if isel_dict is empty dictionary 23 | isel_list = [] 24 | for varname in isel_dict: 25 | value = da[varname].data 26 | try: 27 | # Use two digits after decimal for floats 28 | isel_list.append(f"{varname}--{value:.2f}") 29 | except: 30 | # Otherwise just include the variable value (e.g. strings) 31 | isel_list.append(f"{varname}--{value}") 32 | isel_str = "__".join(isel_list) 33 | if len(isel_str) > 0: 34 | isel_str = "." + isel_str 35 | return isel_str 36 | 37 | def savefig(self, fig, root_dir="images", **kwargs): 38 | """ 39 | Saves fig as a PNG, with the file name determined by the other parameters. 40 | 41 | Also writes metadata about image file to a JSON file 42 | """ 43 | 44 | # Always use tight_layout 45 | fig.tight_layout() 46 | 47 | # Remove trailing slash from root_dir 48 | if root_dir[-1] == "/": 49 | root_dir = root_dir[:-1] 50 | 51 | # Set up dictionary for metadata 52 | metadata = self.metadata 53 | filepath, jsonpath = self.get_filepaths() 54 | metadata["filepath"] = os.path.join( 55 | self.metadata["plot_type"], f"{filepath}.png" 56 | ) 57 | filepath = os.path.join( 58 | root_dir, self.metadata["casename"], metadata["filepath"] 59 | ) 60 | jsonpath = os.path.join( 61 | root_dir, 62 | self.metadata["casename"], 63 | self.metadata["plot_type"], 64 | f"{jsonpath}.json", 65 | ) 66 | 67 | for path in [filepath, jsonpath]: 68 | parent_dir = pathlib.Path(path).parent 69 | parent_dir.mkdir(parents=True, exist_ok=True) 70 | 71 | fig.savefig(filepath, **kwargs) 72 | with open(jsonpath, "w") as fp: 73 | json.dump(metadata, fp) 74 | 75 | 76 | ################################################################################ 77 | 78 | 79 | class SummaryMapClass(_PlotTypeBaseClass): 80 | def __init__(self, da, casename, datestamp, apply_log10, isel_dict): 81 | self.metadata = dict() 82 | self.metadata["plot_type"] = "summary_map" 83 | self.metadata["varname"] = da.name 84 | self.metadata["casename"] = casename 85 | self.metadata["date"] = datestamp 86 | self.metadata["apply_log10"] = apply_log10 87 | self.metadata["sel_dict"] = dict() 88 | for varname in isel_dict: 89 | value = da[varname].data 90 | try: 91 | # Use two digits after decimal for floats 92 | str_val = f"{value:.2f}" 93 | except: 94 | # Otherwise just include the variable value (e.g. strings) 95 | str_val = f"{value}" 96 | self.metadata["sel_dict"][varname] = str_val 97 | self.isel_str = self.get_isel_str(da, isel_dict) 98 | 99 | def get_filepaths(self): 100 | log_str = "" if not self.metadata["apply_log10"] else ".log10" 101 | file_prefix = f"{self.metadata['varname']}.{self.metadata['date']}{self.isel_str}{log_str}" 102 | filepath = file_prefix 103 | jsonpath = os.path.join("metadata", file_prefix) 104 | 105 | return filepath, jsonpath 106 | 107 | 108 | ################################################################################ 109 | 110 | 111 | class SummaryTSClass(_PlotTypeBaseClass): 112 | def __init__(self, da, casename, start_date, end_date, isel_dict): 113 | self.metadata = dict() 114 | self.metadata["plot_type"] = "time_series" 115 | self.metadata["varname"] = da.name 116 | self.metadata["casename"] = casename 117 | self.metadata["time_period"] = f"{start_date}_{end_date}" 118 | self.metadata["sel_dict"] = dict() 119 | for varname in isel_dict: 120 | value = da[varname].data 121 | try: 122 | # Use two digits after decimal for floats 123 | str_val = f"{value:.2f}" 124 | except: 125 | # Otherwise just include the variable value (e.g. strings) 126 | str_val = f"{value}" 127 | self.metadata["sel_dict"][varname] = str_val 128 | self.isel_str = self.get_isel_str(da, isel_dict) 129 | 130 | def get_filepaths(self): 131 | file_prefix = ( 132 | f"{self.metadata['varname']}.{self.metadata['time_period']}{self.isel_str}" 133 | ) 134 | filepath = file_prefix 135 | jsonpath = os.path.join("metadata", file_prefix) 136 | 137 | return filepath, jsonpath 138 | 139 | 140 | ################################################################################ 141 | 142 | 143 | class SummaryHistClass(_PlotTypeBaseClass): 144 | def __init__(self, da, casename, apply_log10, start_date, end_date, isel_dict): 145 | self.metadata = dict() 146 | self.metadata["plot_type"] = "histogram" 147 | self.metadata["varname"] = da.name 148 | self.metadata["casename"] = casename 149 | self.metadata["apply_log10"] = apply_log10 150 | self.metadata["time_period"] = f"{start_date}_{end_date}" 151 | self.metadata["sel_dict"] = dict() 152 | for varname in isel_dict: 153 | value = da[varname].data 154 | try: 155 | # Use two digits after decimal for floats 156 | str_val = f"{value:.2f}" 157 | except: 158 | # Otherwise just include the variable value (e.g. strings) 159 | str_val = f"{value}" 160 | self.metadata["sel_dict"][varname] = str_val 161 | self.isel_str = self.get_isel_str(da, isel_dict) 162 | 163 | def get_filepaths(self): 164 | log_str = "" if not self.metadata["apply_log10"] else ".log10" 165 | file_prefix = f"{self.metadata['varname']}.{self.metadata['time_period']}{self.isel_str}{log_str}" 166 | filepath = file_prefix 167 | jsonpath = os.path.join("metadata", file_prefix) 168 | 169 | return filepath, jsonpath 170 | 171 | 172 | ################################################################################ 173 | 174 | 175 | class TrendMapClass(_PlotTypeBaseClass): 176 | def __init__(self, da, casename, start_date, end_date, isel_dict): 177 | self.metadata = dict() 178 | self.metadata["plot_type"] = "trend_map" 179 | self.metadata["varname"] = da.name 180 | self.metadata["casename"] = casename 181 | self.metadata["time_period"] = f"{start_date}_{end_date}" 182 | self.metadata["sel_dict"] = dict() 183 | for varname in isel_dict: 184 | value = da[varname].data 185 | try: 186 | # Use two digits after decimal for floats 187 | str_val = f"{value:.2f}" 188 | except: 189 | # Otherwise just include the variable value (e.g. strings) 190 | str_val = f"{value}" 191 | self.metadata["sel_dict"][varname] = str_val 192 | self.isel_str = self.get_isel_str(da, isel_dict) 193 | 194 | def get_filepaths(self): 195 | file_prefix = ( 196 | f"{self.metadata['varname']}.{self.metadata['time_period']}{self.isel_str}" 197 | ) 198 | filepath = os.path.join(file_prefix) 199 | jsonpath = os.path.join("metadata", file_prefix) 200 | 201 | return filepath, jsonpath 202 | 203 | 204 | ################################################################################ 205 | 206 | 207 | class TrendHistClass(_PlotTypeBaseClass): 208 | def __init__(self, da, casename, start_date, end_date, isel_dict): 209 | self.metadata = dict() 210 | self.metadata["plot_type"] = "trend_hist" 211 | self.metadata["varname"] = da.name 212 | self.metadata["casename"] = casename 213 | self.metadata["time_period"] = f"{start_date}_{end_date}" 214 | self.metadata["sel_dict"] = dict() 215 | for varname in isel_dict: 216 | value = da[varname].data 217 | try: 218 | # Use two digits after decimal for floats 219 | str_val = f"{value:.2f}" 220 | except: 221 | # Otherwise just include the variable value (e.g. strings) 222 | str_val = f"{value}" 223 | self.metadata["sel_dict"][varname] = str_val 224 | self.isel_str = self.get_isel_str(da, isel_dict) 225 | 226 | def get_filepaths(self): 227 | file_prefix = ( 228 | f"{self.metadata['varname']}.{self.metadata['time_period']}{self.isel_str}" 229 | ) 230 | filepath = os.path.join(file_prefix) 231 | jsonpath = os.path.join("metadata", file_prefix) 232 | 233 | return filepath, jsonpath 234 | -------------------------------------------------------------------------------- /notebooks/gen_csv.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "from utils import generate_plot_catalog\n", 10 | "import pandas as pd" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": 2, 16 | "metadata": {}, 17 | "outputs": [ 18 | { 19 | "name": "stdout", 20 | "output_type": "stream", 21 | "text": [ 22 | "Generating csv for histogram plots in g.e22b05.G1850ECOIAF_JRA.TL319_g17.cocco.001\n", 23 | "Generating csv for summary_map plots in g.e22b05.G1850ECOIAF_JRA.TL319_g17.cocco.001\n", 24 | "Generating csv for time_series plots in g.e22b05.G1850ECOIAF_JRA.TL319_g17.cocco.001\n", 25 | "Generating csv for trend_hist plots in g.e22b05.G1850ECOIAF_JRA.TL319_g17.cocco.001\n", 26 | "Found 0 files with extension=.json in images/g.e22b05.G1850ECOIAF_JRA.TL319_g17.cocco.001/trend_hist.\n", 27 | "Generating csv for trend_map plots in g.e22b05.G1850ECOIAF_JRA.TL319_g17.cocco.001\n", 28 | "Found 0 files with extension=.json in images/g.e22b05.G1850ECOIAF_JRA.TL319_g17.cocco.001/trend_map.\n", 29 | "Generating csv for histogram plots in g.e22.G1850ECO_JRA_HR.TL319_t13.003\n", 30 | "Generating csv for summary_map plots in g.e22.G1850ECO_JRA_HR.TL319_t13.003\n", 31 | "Generating csv for time_series plots in g.e22.G1850ECO_JRA_HR.TL319_t13.003\n", 32 | "Generating csv for trend_hist plots in g.e22.G1850ECO_JRA_HR.TL319_t13.003\n", 33 | "Generating csv for trend_map plots in g.e22.G1850ECO_JRA_HR.TL319_t13.003\n", 34 | "Generating csv for histogram plots in g.e22.G1850ECO_JRA_HR.TL319_t13.004\n", 35 | "Generating csv for summary_map plots in g.e22.G1850ECO_JRA_HR.TL319_t13.004\n", 36 | "Generating csv for time_series plots in g.e22.G1850ECO_JRA_HR.TL319_t13.004\n", 37 | "Generating csv for trend_hist plots in g.e22.G1850ECO_JRA_HR.TL319_t13.004\n", 38 | "Generating csv for trend_map plots in g.e22.G1850ECO_JRA_HR.TL319_t13.004\n" 39 | ] 40 | } 41 | ], 42 | "source": [ 43 | "df = dict()\n", 44 | "for casename in [\n", 45 | " \"g.e22b05.G1850ECOIAF_JRA.TL319_g17.cocco.001\",\n", 46 | " \"g.e22.G1850ECO_JRA_HR.TL319_t13.003\",\n", 47 | " \"g.e22.G1850ECO_JRA_HR.TL319_t13.004\",\n", 48 | "]:\n", 49 | " df[casename] = pd.DataFrame({})\n", 50 | " for plottype in [\n", 51 | " \"histogram\",\n", 52 | " \"summary_map\",\n", 53 | " \"time_series\",\n", 54 | " \"trend_hist\",\n", 55 | " \"trend_map\",\n", 56 | " ]:\n", 57 | " print(f\"Generating csv for {plottype} plots in {casename}\")\n", 58 | " df[casename] = pd.concat(\n", 59 | " [\n", 60 | " df[casename],\n", 61 | " generate_plot_catalog(\n", 62 | " \"./\",\n", 63 | " image_dir_name=f\"images/{casename}/{plottype}\",\n", 64 | " use_full_path=False,\n", 65 | " ),\n", 66 | " ]\n", 67 | " )\n", 68 | " df[casename].to_csv(\n", 69 | " f\"images/{casename}/png_catalog.csv\",\n", 70 | " compression=None,\n", 71 | " index=False,\n", 72 | " )" 73 | ] 74 | }, 75 | { 76 | "cell_type": "code", 77 | "execution_count": 3, 78 | "metadata": {}, 79 | "outputs": [ 80 | { 81 | "data": { 82 | "text/html": [ 83 | "
| \n", 101 | " | plot_type | \n", 102 | "varname | \n", 103 | "casename | \n", 104 | "apply_log10 | \n", 105 | "time_period | \n", 106 | "sel_dict | \n", 107 | "filepath | \n", 108 | "date | \n", 109 | "
|---|---|---|---|---|---|---|---|---|
| 0 | \n", 114 | "histogram | \n", 115 | "CaCO3_FLUX_100m | \n", 116 | "g.e22.G1850ECO_JRA_HR.TL319_t13.004 | \n", 117 | "False | \n", 118 | "0001-01-01_0001-12-31 | \n", 119 | "{} | \n", 120 | "histogram/CaCO3_FLUX_100m.0001-01-01_0001-12-3... | \n", 121 | "NaN | \n", 122 | "
| 1 | \n", 125 | "histogram | \n", 126 | "CaCO3_FLUX_100m | \n", 127 | "g.e22.G1850ECO_JRA_HR.TL319_t13.004 | \n", 128 | "True | \n", 129 | "0001-01-01_0001-12-31 | \n", 130 | "{} | \n", 131 | "histogram/CaCO3_FLUX_100m.0001-01-01_0001-12-3... | \n", 132 | "NaN | \n", 133 | "
| 2 | \n", 136 | "histogram | \n", 137 | "CaCO3_FLUX_100m | \n", 138 | "g.e22.G1850ECO_JRA_HR.TL319_t13.004 | \n", 139 | "False | \n", 140 | "0002-01-01_0002-12-31 | \n", 141 | "{} | \n", 142 | "histogram/CaCO3_FLUX_100m.0002-01-01_0002-12-3... | \n", 143 | "NaN | \n", 144 | "
| 3 | \n", 147 | "histogram | \n", 148 | "CaCO3_FLUX_100m | \n", 149 | "g.e22.G1850ECO_JRA_HR.TL319_t13.004 | \n", 150 | "True | \n", 151 | "0002-01-01_0002-12-31 | \n", 152 | "{} | \n", 153 | "histogram/CaCO3_FLUX_100m.0002-01-01_0002-12-3... | \n", 154 | "NaN | \n", 155 | "
| 4 | \n", 158 | "histogram | \n", 159 | "CaCO3_FLUX_100m | \n", 160 | "g.e22.G1850ECO_JRA_HR.TL319_t13.004 | \n", 161 | "False | \n", 162 | "0003-01-01_0003-12-31 | \n", 163 | "{} | \n", 164 | "histogram/CaCO3_FLUX_100m.0003-01-01_0003-12-3... | \n", 165 | "NaN | \n", 166 | "
| ... | \n", 169 | "... | \n", 170 | "... | \n", 171 | "... | \n", 172 | "... | \n", 173 | "... | \n", 174 | "... | \n", 175 | "... | \n", 176 | "... | \n", 177 | "
| 25 | \n", 180 | "trend_map | \n", 181 | "SiO3 | \n", 182 | "g.e22.G1850ECO_JRA_HR.TL319_t13.004 | \n", 183 | "NaN | \n", 184 | "0002-01-01_0004-12-31 | \n", 185 | "{'basins': 'Atlantic'} | \n", 186 | "trend_map/SiO3.0002-01-01_0004-12-31.basins--A... | \n", 187 | "NaN | \n", 188 | "
| 26 | \n", 191 | "trend_map | \n", 192 | "SiO3 | \n", 193 | "g.e22.G1850ECO_JRA_HR.TL319_t13.004 | \n", 194 | "NaN | \n", 195 | "0002-01-01_0004-12-31 | \n", 196 | "{'basins': 'Global'} | \n", 197 | "trend_map/SiO3.0002-01-01_0004-12-31.basins--G... | \n", 198 | "NaN | \n", 199 | "
| 27 | \n", 202 | "trend_map | \n", 203 | "SiO3 | \n", 204 | "g.e22.G1850ECO_JRA_HR.TL319_t13.004 | \n", 205 | "NaN | \n", 206 | "0002-01-01_0004-12-31 | \n", 207 | "{'basins': 'Indian'} | \n", 208 | "trend_map/SiO3.0002-01-01_0004-12-31.basins--I... | \n", 209 | "NaN | \n", 210 | "
| 28 | \n", 213 | "trend_map | \n", 214 | "SiO3 | \n", 215 | "g.e22.G1850ECO_JRA_HR.TL319_t13.004 | \n", 216 | "NaN | \n", 217 | "0002-01-01_0004-12-31 | \n", 218 | "{'basins': 'Pacific'} | \n", 219 | "trend_map/SiO3.0002-01-01_0004-12-31.basins--P... | \n", 220 | "NaN | \n", 221 | "
| 29 | \n", 224 | "trend_map | \n", 225 | "SiO3 | \n", 226 | "g.e22.G1850ECO_JRA_HR.TL319_t13.004 | \n", 227 | "NaN | \n", 228 | "0002-01-01_0004-12-31 | \n", 229 | "{'z_t': '35109.35'} | \n", 230 | "trend_map/SiO3.0002-01-01_0004-12-31.z_t--3510... | \n", 231 | "NaN | \n", 232 | "
1244 rows × 8 columns
\n", 236 | "