.xxx} will do the same as
28 | # keyword 1 above, but specifies which sub-section the
29 | # variable is coming from, which is necessary for variables
30 | # that are repeated in different subsections. For example:
31 | #
32 | # diag_basic_info:
33 | # cam_climo_loc: /some/where/${diag_cam_climo.start_year}
34 | #
35 | # diag_cam_climo:
36 | # start_year: 1850
37 | #
38 | # will set "cam_climo_loc" in the diagnostics package to:
39 | # /some/where/1850
40 | #
41 | #Please note that for both 1 and 2 the keywords must be lowercase.
42 | #This is because future developments will hopefully use other keywords
43 | #that are uppercase.
44 | #--------------------
45 | #
46 | ##==============================
47 |
48 | test_var: yay!
49 |
50 | another_var: 5
51 |
52 | good_dict:
53 |
54 | good_var: It says ${test_var} and ${another_var}.
55 |
56 | good_dict_two:
57 |
58 | good_var: ${good_dict.good_var}
59 |
60 | bad_dict:
61 |
62 | bad_var: ${good_var}
63 |
64 | bad_dict_two:
65 |
66 | bad_var_two: ${no_var}
67 |
68 | #END OF FILE
69 |
--------------------------------------------------------------------------------
/lib/externals/CVDP/ncl_scripts/runTasks.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 | import sys
3 | import time
4 | import os
5 |
6 | #--------------------------- BEGIN USER MODIFICATIONS ---------------------------
7 | EXEC_STR = "ncl -Q"
8 | POLL_INTERVAL = 15. # seconds
9 | MAX_CONCURRENT = int(os.environ.get('MAX_TASKS')) # previous settings: = 4 or = sys.maxint
10 | #--------------------------- END USER MODIFICATIONS -----------------------------
11 |
12 | def launchTask(script):
13 | # print "Launching: ", script
14 | task = subprocess.Popen(EXEC_STR + " " + script, shell=True, executable="/bin/bash")
15 | return task
16 |
17 | # ------------------------- main -----------------------------------------------
18 |
19 | # get command-line args, strip out 1st element, which is the name of this script...
20 | scripts = sys.argv[1:]
21 | #print scripts # debugging -- remove or comment out
22 |
23 | # fire off up-to MAX_CONCURRENT subprocesses...
24 | tasks = list()
25 | for i,script in enumerate(scripts):
26 | if i >= MAX_CONCURRENT:
27 | break
28 | tasks.append( launchTask(script) )
29 |
30 | #print scripts
31 | scripts = scripts[len(tasks):] # remove those scripts we've just launched...
32 | #print scripts
33 |
34 | #for task in tasks: # debugging -- remove or comment out
35 | # print(task.pid)
36 |
37 | while len(tasks) > 0:
38 | finishedList = []
39 | for task in tasks:
40 | retCode = task.poll()
41 | if retCode != None:
42 | # print "Task status ", task.pid, ": ", task.poll()
43 | finishedList.append(task)
44 |
45 | # more scripts to be run?
46 | if len(scripts) > 0:
47 | tasks.append( launchTask(scripts[0]) )
48 | del scripts[0]
49 |
50 | for task in finishedList:
51 | tasks.remove(task)
52 |
53 | time.sleep(POLL_INTERVAL)
54 | # print "." # Feedback to show the script is doing something; not necessary
55 |
56 | print("runTasks.py: Done with CVDP calculation scripts")
57 |
58 |
59 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | # Ignore editor temporaries and backups
3 | *~
4 | .#*
5 | \#*#
6 | *.swp
7 |
8 | # Ignore non-source python files:
9 | __pycache__
10 | *.py[cod]
11 | *$py.class
12 |
13 | # Distribution / packaging
14 | .Python build/
15 | develop-eggs/
16 | dist/
17 | downloads/
18 | eggs/
19 | .eggs/
20 | lib64/
21 | parts/
22 | sdist/
23 | var/
24 | wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | *.manifest
29 | *.spec
30 |
31 | # Log files
32 | pip-log.txt
33 | pip-delete-this-directory.txt
34 | *.log
35 |
36 | # Unit test / coverage reports
37 | htmlcov/
38 | .tox/
39 | .coverage
40 | .coverage.*
41 | .cache
42 | .pytest_cache/
43 | nosetests.xml
44 | coverage.xml
45 | *.cover
46 | .hypothesis/
47 |
48 | # Translations
49 | *.mo
50 | *.pot
51 |
52 | # PyBuilder
53 | target/
54 |
55 | # Jupyter Notebook
56 | .ipynb_checkpoints
57 |
58 | # IPython
59 | profile_default/
60 | ipython_config.py
61 |
62 | # pyenv
63 | .python-version
64 |
65 | # pyflow
66 | __pypackages__/
67 |
68 | # Environment
69 | .env
70 | .venv
71 | venv/
72 | ENV/
73 |
74 | # If you are using PyCharm #
75 | .idea/**/workspace.xml
76 | .idea/**/tasks.xml
77 | .idea/dictionaries
78 | .idea/**/dataSources/
79 | .idea/**/dataSources.ids
80 | .idea/**/dataSources.xml
81 | .idea/**/dataSources.local.xml
82 | .idea/**/sqlDataSources.xml
83 | .idea/**/dynamic.xml
84 | .idea/**/uiDesigner.xml
85 | .idea/**/gradle.xml
86 | .idea/**/libraries
87 | *.iws /out/
88 |
89 | # Sublime Text
90 | *.tmlanguage.cache
91 | *.tmPreferences.cache
92 | *.stTheme.cache
93 | *.sublime-workspace
94 | *.sublime-project
95 |
96 | # sftp configuration file
97 | sftp-config.json
98 |
99 | # Package control specific files Package
100 | Control.last-run
101 | Control.ca-list
102 | Control.ca-bundle
103 | Control.system-ca-bundle
104 | GitHub.sublime-settings
105 |
106 | # Visual Studio Code #
107 | .vscode/*
108 | !.vscode/settings.json
109 | !.vscode/tasks.json
110 | !.vscode/launch.json
111 | !.vscode/extensions.json
112 | .history
113 |
114 |
115 |
116 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.yaml:
--------------------------------------------------------------------------------
1 | name: Bug Report
2 | description: File a bug report
3 | title: "Put bug title here!"
4 | labels: ["bug"]
5 | body:
6 | - type: markdown
7 | attributes:
8 | value: |
9 | Thanks for taking the time to fill out this bug report!
10 | - type: dropdown
11 | id: adf-run-type
12 | attributes:
13 | label: ADF run type
14 | description: Model vs. Model or Model vs Obs?
15 | multiple: false
16 | options:
17 | - Model vs. Model
18 | - Model vs. Obs
19 | validations:
20 | required: true
21 | - type: textarea
22 | id: what-happened
23 | attributes:
24 | label: What happened?
25 | description: Also tell us, what did you expect to happen?
26 | placeholder: Tell us what you see!
27 | value: "A bug happened!"
28 | validations:
29 | required: true
30 | - type: input
31 | id: adf-hash
32 | attributes:
33 | label: ADF Hash you are using
34 | description: Type "git rev-parse --short HEAD" and copy the provided set of letters/numbers here
35 | placeholder: ex. 1a2b3c
36 | validations:
37 | required: true
38 | - type: dropdown
39 | id: machine
40 | attributes:
41 | label: What machine were you running the ADF on?
42 | multiple: true
43 | options:
44 | - CISL machine
45 | - CGD machine
46 | - Personal Computer
47 | - Other (please explain below)
48 | validations:
49 | required: true
50 | - type: dropdown
51 | id: environment
52 | attributes:
53 | label: What python environment were you using?
54 | multiple: true
55 | options:
56 | - NPL (CISL machines only)
57 | - ADF-provided Conda env
58 | - ADF-provided developmental Conda env
59 | - Personl Conda env
60 | - Other (please explain below)
61 | validations:
62 | required: true
63 | - type: textarea
64 | id: extra-info
65 | attributes:
66 | label: Extra info
67 | description: Please provide any additional information here that you think might be relevant
68 | placeholder: ex. I am running ADF on the Cloud. A very dark and ominous cloud.
69 | validations:
70 | required: false
71 |
--------------------------------------------------------------------------------
/lib/website_templates/template_run_info.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | ADF Run Info
5 |
6 |
7 |
8 |
40 |
41 |
42 |
ADF Run Information
43 |
44 |
45 |
46 |
47 |
48 |
49 | {{ run_info|safe }}
50 |
51 |
52 |
53 |
54 |
--------------------------------------------------------------------------------
/lib/website_templates/template_table.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | ADF Mean Tables
5 |
6 |
7 |
8 |
40 |
41 |
42 |
AMWG Tables
43 |
44 |
45 |
46 |
47 | {% for case_name, html_file in amwg_tables.items() %}
48 |
49 | {{ case_name }}
50 |
51 | {% endfor %}
52 |
53 |
54 |
55 |
{{ table_name }}
56 | {{ table_html }}
57 |
58 |
59 |
60 |
--------------------------------------------------------------------------------
/lib/website_templates/template_mean_tables.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | ADF Mean Tables
5 |
6 |
7 |
8 |
40 |
41 |
42 |
AMWG Tables
43 |
44 |
45 |
46 |
47 | {% for case_name, html_file in amwg_tables.items() %}
48 |
49 | {{ case_name }}
50 |
51 | {% endfor %}
52 |
53 |
54 |
55 |
{{ disp_table_name }}
56 | {{ disp_table_html }}
57 |
58 |
59 |
60 |
--------------------------------------------------------------------------------
/lib/adf_base.py:
--------------------------------------------------------------------------------
1 | """
2 | Base class for the Atmospheric
3 | Diagnostics Framework (ADF). All
4 | other ADF classes inherit from this
5 | class.
6 |
7 | Currently this class only does two things:
8 |
9 | 1. Creates a debug logger, if requested.
10 |
11 | 2. Defines an ADF-specific function to end
12 | the diagnostics program, if need be.
13 | """
14 |
15 | #++++++++++++++++++++++++++++++
16 | #Import standard python modules
17 | #++++++++++++++++++++++++++++++
18 |
19 | import logging
20 | from datetime import datetime
21 |
22 | #+++++++++++++++++++++++++
23 | # ADF Error-handling class
24 | #+++++++++++++++++++++++++
25 |
26 | class AdfError(RuntimeError):
27 | """Class used to handle ADF value errors
28 | (e.g., log user errors without backtrace)"""
29 |
30 | #+++++++++++++++++
31 | #Define base class
32 | #+++++++++++++++++
33 |
34 | class AdfBase:
35 |
36 | """
37 | Base class for the ADF
38 | """
39 |
40 | def __init__(self, debug = False):
41 |
42 | """
43 | Initalize CAM diagnostics object.
44 | """
45 |
46 | # Check that debug is in fact a boolean,
47 | # in order to avoid accidental boolean evaluation:
48 | if not isinstance(debug, bool):
49 | raise TypeError("'debug' must be a boolean type (True or False)")
50 |
51 | # Format the datetime object to a string without microseconds
52 | self.__debug_fname = f"ADF_debug_{datetime.now().strftime('%Y-%m-%d-%H:%M:%S')}.log"
53 |
54 | # Create debug log, if requested:
55 | if debug:
56 | logging.basicConfig(filename=self.__debug_fname, level=logging.DEBUG)
57 | self.__debug_log = logging.getLogger("ADF")
58 | else:
59 | self.__debug_log = None
60 |
61 |
62 |
63 | #########
64 |
65 | # Create property needed to return the name of the debug log file (debug_fname) to user:
66 | @property
67 | def debug_fname(self):
68 | """Return the "debug_fname" string to the user."""
69 | return self.__debug_fname
70 |
71 | def debug_log(self, msg: str):
72 |
73 | """
74 | Write message to debug log, if enabled.
75 | """
76 |
77 | #If debug log exists, then write message to log:
78 | if self.__debug_log:
79 | self.__debug_log.debug(msg)
80 |
81 | #########
82 |
83 | def end_diag_fail(self, msg: str):
84 |
85 | """
86 | Prints message to log and screen,
87 | and then exits program with an
88 | ADF-specific error.
89 | """
90 |
91 | #Print message to log, if applicable:
92 | self.debug_log(msg)
93 |
94 | print("\n")
95 | raise AdfError(msg)
96 |
97 | #++++++++++++++++++++
98 | #End Class definition
99 | #++++++++++++++++++++
--------------------------------------------------------------------------------
/lib/website_templates/template_index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | ADF Diagnostics
5 |
6 |
7 |
8 |
33 |
34 |
35 |
Plot Types
36 |
37 |
38 |
39 | {% for avail_type in avail_plot_types %}
40 | {% if avail_type in plot_types.keys() %}
41 |
44 | {% else %}
45 |
46 |
{{ avail_type }}
47 |
48 | {% endif %}
49 | {% endfor %}
50 |
51 |
52 |
53 |
External Diagnostic Packages
54 |
55 |
56 |
57 | {% for avail_type in avail_external_packages %}
58 | {% if avail_type in external_package_links.keys() %}
59 |
62 | {% else %}
63 |
64 |
{{ avail_type }}
65 |
66 | {% endif %}
67 | {% endfor %}
68 |
69 |
70 |
71 |
72 |
--------------------------------------------------------------------------------
/lib/externals/CVDP/namelist_obs:
--------------------------------------------------------------------------------
1 | TS | ERSST v5 | /glade/campaign/cgd/cas/asphilli/CVDP-OBS/ersstv5.185401-201912.nc | 1920 | 2018
2 | PSL | ERA20C_ERAI | /glade/campaign/cgd/cas/asphilli/CVDP-OBS/era20c_erai.mon.mean.msl.190001-201901.nc | 1920 | 2018
3 | TREFHT | BEST | /glade/campaign/cgd/cas/asphilli/CVDP-OBS/best.tas.185001-201902.nc | 1920 | 2018
4 | PRECT | GPCC | /glade/campaign/cgd/cas/asphilli/CVDP-OBS/gpcc.pr.10.comb_v2018v6mon.189101-201903.nc | 1920 | 2018
5 | aice_nh | Walsh and Chapman | /glade/campaign/cgd/cas/asphilli/CVDP-OBS/walsh_chapman.NH.seaice.187001-201112.nc | 1953 | 2011
6 | aice_sh | NASA Bootstrap SH | /glade/campaign/cgd/cas/asphilli/CVDP-OBS/seaice_conc_monthly_sh_NASA_Bootstrap.nsidc.v03r01.197811-201702.nc | 1979 | 2016
7 | MOC | CESM1 Forced Ocean Simulation | /glade/campaign/cgd/cas/asphilli/CVDP-OBS/g.e11_LENS.GECOIAF.T62_g16.009.pop.h.MOC.194801-201512.nc | 1948 | 2015
8 |
9 | TS | HadISST | /glade/campaign/cgd/cas/asphilli/CVDP-OBS/hadisst.187001-201912.nc | 1920 | 2018
10 | PSL | CERA20C_ERAI | /glade/campaign/cgd/cas/asphilli/CVDP-OBS/cera20c_erai.mon.mean.msl.190101-201901.nc | 1920 | 2018
11 | TREFHT | GISTEMP | /glade/campaign/cgd/cas/asphilli/CVDP-OBS/gistemp.tas.188001-201912.nc | 1920 | 2018
12 | PRECT | GPCC | /glade/campaign/cgd/cas/asphilli/CVDP-OBS/gpcc.pr.10.comb_v2018v6mon.189101-201903.nc | 1920 | 2018
13 | aice_nh | Walsh and Chapman | /glade/campaign/cgd/cas/asphilli/CVDP-OBS/walsh_chapman.NH.seaice.187001-201112.nc | 1953 | 2011
14 | aice_sh | NASA Bootstrap SH | /glade/campaign/cgd/cas/asphilli/CVDP-OBS/seaice_conc_monthly_sh_NASA_Bootstrap.nsidc.v03r01.197811-201702.nc | 1979 | 2016
15 | MOC | CESM1 Forced Ocean Simulation | /glade/campaign/cgd/cas/asphilli/CVDP-OBS/g.e11_LENS.GECOIAF.T62_g16.009.pop.h.MOC.194801-201512.nc | 1948 | 2015
16 |
17 | TS | ERSST v5 | /glade/campaign/cgd/cas/asphilli/CVDP-OBS/ersstv5.185401-201912.nc | 1979 | 2018
18 | PSL | ERAI | /glade/campaign/cgd/cas/asphilli/CVDP-OBS/erai.mon.mean.msl.197901-201901.nc | 1979 | 2018
19 | TREFHT | BEST | /glade/campaign/cgd/cas/asphilli/CVDP-OBS/best.tas.185001-201902.nc | 1979 | 2018
20 | PRECT | GPCP | /glade/campaign/cgd/cas/asphilli/CVDP-OBS/gpcp.mon.mean.197901-201903.nc | 1979 | 2018
21 | aice_nh | NASA CDR NH | /glade/campaign/cgd/cas/asphilli/CVDP-OBS/seaice_conc_monthly_nh_NOAA_NSIDC_CDR.v03r01.197811-201702.nc | 1979 | 2016
22 | aice_sh | NASA CDR SH | /glade/campaign/cgd/cas/asphilli/CVDP-OBS/seaice_conc_monthly_sh_NOAA_NSIDC_CDR.v03r01.197811-201702.nc | 1979 | 2016
23 | MOC | CESM1 Forced Ocean Simulation | /glade/campaign/cgd/cas/asphilli/CVDP-OBS/g.e11_LENS.GECOIAF.T62_g16.009.pop.h.MOC.194801-201512.nc | 1979 | 2015
24 |
25 | TS | HadISST | /glade/campaign/cgd/cas/asphilli/CVDP-OBS/hadisst.187001-201912.nc | 1980 | 2017
26 | PSL | MERRA2 | /glade/campaign/cgd/cas/asphilli/CVDP-OBS/merra2.mon.SLP.198001-201803.nc | 1980 | 2017
27 | TREFHT | GISTEMP | /glade/campaign/cgd/cas/asphilli/CVDP-OBS/gistemp.tas.188001-201912.nc | 1980 | 2017
28 | PRECT | GPCP | /glade/campaign/cgd/cas/asphilli/CVDP-OBS/gpcp.mon.mean.197901-201903.nc | 1980 | 2017
29 | aice_nh | NASA Bootstrap NH | /glade/campaign/cgd/cas/asphilli/CVDP-OBS/seaice_conc_monthly_nh_NASA_Bootstrap.nsidc.v03r01.197811-201702.nc | 1980 | 2016
30 | aice_sh | NASA Bootstrap SH | /glade/campaign/cgd/cas/asphilli/CVDP-OBS/seaice_conc_monthly_sh_NASA_Bootstrap.nsidc.v03r01.197811-201702.nc | 1980 | 2016
31 | MOC | CESM1 Forced Ocean Simulation | /glade/campaign/cgd/cas/asphilli/CVDP-OBS/g.e11_LENS.GECOIAF.T62_g16.009.pop.h.MOC.194801-201512.nc | 1980 | 2015
32 |
33 |
--------------------------------------------------------------------------------
/lib/externals/CVDP/namelist:
--------------------------------------------------------------------------------
1 | CESM2 LENS 1001.001 | /glade/campaign/cgd/cesm/CESM2-LE/{atm,lnd,ocn,ice}/proc/tseries/month_1/{MOC,SNOWDP,TS,TREFHT,PSL,PRECT,aice}/b.e21.B{HIST,SSP370}cmip6.f09_g17.LE2-1001.001.{cam.h0,clm2.h0,pop.h,cice.h}* | 1950 | 2049
2 | CESM2 LENS 1021.002 | /glade/campaign/cgd/cesm/CESM2-LE/{atm,lnd,ocn,ice}/proc/tseries/month_1/{MOC,SNOWDP,TS,TREFHT,PSL,PRECT,aice}/b.e21.B{HIST,SSP370}cmip6.f09_g17.LE2-1021.002.{cam.h0,clm2.h0,pop.h,cice.h}* | 1950 | 2049
3 | CESM2 LENS 1041.003 | /glade/campaign/cgd/cesm/CESM2-LE/{atm,lnd,ocn,ice}/proc/tseries/month_1/{MOC,SNOWDP,TS,TREFHT,PSL,PRECT,aice}/b.e21.B{HIST,SSP370}cmip6.f09_g17.LE2-1041.003.{cam.h0,clm2.h0,pop.h,cice.h}* | 1950 | 2049
4 | CESM2 LENS 1061.004 | /glade/campaign/cgd/cesm/CESM2-LE/{atm,lnd,ocn,ice}/proc/tseries/month_1/{MOC,SNOWDP,TS,TREFHT,PSL,PRECT,aice}/b.e21.B{HIST,SSP370}cmip6.f09_g17.LE2-1061.004.{cam.h0,clm2.h0,pop.h,cice.h}* | 1950 | 2049
5 | CESM2 LENS 1081.005 | /glade/campaign/cgd/cesm/CESM2-LE/{atm,lnd,ocn,ice}/proc/tseries/month_1/{MOC,SNOWDP,TS,TREFHT,PSL,PRECT,aice}/b.e21.B{HIST,SSP370}cmip6.f09_g17.LE2-1081.005.{cam.h0,clm2.h0,pop.h,cice.h}* | 1950 | 2049
6 | CESM2 LENS 1101.006 | /glade/campaign/cgd/cesm/CESM2-LE/{atm,lnd,ocn,ice}/proc/tseries/month_1/{MOC,SNOWDP,TS,TREFHT,PSL,PRECT,aice}/b.e21.B{HIST,SSP370}cmip6.f09_g17.LE2-1101.006.{cam.h0,clm2.h0,pop.h,cice.h}* | 1950 | 2049
7 | CESM2 LENS 1121.007 | /glade/campaign/cgd/cesm/CESM2-LE/{atm,lnd,ocn,ice}/proc/tseries/month_1/{MOC,SNOWDP,TS,TREFHT,PSL,PRECT,aice}/b.e21.B{HIST,SSP370}cmip6.f09_g17.LE2-1121.007.{cam.h0,clm2.h0,pop.h,cice.h}* | 1950 | 2049
8 | CESM2 LENS 1141.008 | /glade/campaign/cgd/cesm/CESM2-LE/{atm,lnd,ocn,ice}/proc/tseries/month_1/{MOC,SNOWDP,TS,TREFHT,PSL,PRECT,aice}/b.e21.B{HIST,SSP370}cmip6.f09_g17.LE2-1141.008.{cam.h0,clm2.h0,pop.h,cice.h}* | 1950 | 2049
9 | CESM2 LENS 1161.009 | /glade/campaign/cgd/cesm/CESM2-LE/{atm,lnd,ocn,ice}/proc/tseries/month_1/{MOC,SNOWDP,TS,TREFHT,PSL,PRECT,aice}/b.e21.B{HIST,SSP370}cmip6.f09_g17.LE2-1161.009.{cam.h0,clm2.h0,pop.h,cice.h}* | 1950 | 2049
10 | CESM2 LENS 1181.010 | /glade/campaign/cgd/cesm/CESM2-LE/{atm,lnd,ocn,ice}/proc/tseries/month_1/{MOC,SNOWDP,TS,TREFHT,PSL,PRECT,aice}/b.e21.B{HIST,SSP370}cmip6.f09_g17.LE2-1181.010.{cam.h0,clm2.h0,pop.h,cice.h}* | 1950 | 2049
11 |
12 | CESM1 LENS #1 | /glade/campaign/cesm/collections/cesmLE/CESM-CAM5-BGC-LE/{atm,ice,lnd,ocn}/proc/tseries/monthly/*/b.e11.B*C5CNBDRD.f09_g16.001.*.nc | 1950 | 2049
13 | CESM1 LENS #2 | /glade/campaign/cesm/collections/cesmLE/CESM-CAM5-BGC-LE/{atm,ice,lnd,ocn}/proc/tseries/monthly/*/b.e11.B*C5CNBDRD.f09_g16.002.*.nc | 1950 | 2049
14 | CESM1 LENS #3 | /glade/campaign/cesm/collections/cesmLE/CESM-CAM5-BGC-LE/{atm,ice,lnd,ocn}/proc/tseries/monthly/*/b.e11.B*C5CNBDRD.f09_g16.003.*.nc | 1950 | 2049
15 | CESM1 LENS #4 | /glade/campaign/cesm/collections/cesmLE/CESM-CAM5-BGC-LE/{atm,ice,lnd,ocn}/proc/tseries/monthly/*/b.e11.B*C5CNBDRD.f09_g16.004.*.nc | 1950 | 2049
16 | CESM1 LENS #5 | /glade/campaign/cesm/collections/cesmLE/CESM-CAM5-BGC-LE/{atm,ice,lnd,ocn}/proc/tseries/monthly/*/b.e11.B*C5CNBDRD.f09_g16.005.*.nc | 1950 | 2049
17 | CESM1 LENS #6 | /glade/campaign/cesm/collections/cesmLE/CESM-CAM5-BGC-LE/{atm,ice,lnd,ocn}/proc/tseries/monthly/*/b.e11.B*C5CNBDRD.f09_g16.006.*.nc | 1950 | 2049
18 | CESM1 LENS #7 | /glade/campaign/cesm/collections/cesmLE/CESM-CAM5-BGC-LE/{atm,ice,lnd,ocn}/proc/tseries/monthly/*/b.e11.B*C5CNBDRD.f09_g16.007.*.nc | 1950 | 2049
19 | CESM1 LENS #8 | /glade/campaign/cesm/collections/cesmLE/CESM-CAM5-BGC-LE/{atm,ice,lnd,ocn}/proc/tseries/monthly/*/b.e11.B*C5CNBDRD.f09_g16.008.*.nc | 1950 | 2049
20 | CESM1 LENS #9 | /glade/campaign/cesm/collections/cesmLE/CESM-CAM5-BGC-LE/{atm,ice,lnd,ocn}/proc/tseries/monthly/*/b.e11.B*C5CNBDRD.f09_g16.009.*.nc | 1950 | 2049
21 | CESM1 LENS #10 | /glade/campaign/cesm/collections/cesmLE/CESM-CAM5-BGC-LE/{atm,ice,lnd,ocn}/proc/tseries/monthly/*/b.e11.B*C5CNBDRD.f09_g16.010.*.nc | 1950 | 2049
22 |
--------------------------------------------------------------------------------
/lib/website_templates/template_mean_diag.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | ADF Mean Plots
5 |
6 |
7 |
8 |
40 |
41 |
42 |
{{ plottype_title }} Plots
43 |
44 |
45 |
46 | {% for category, var_seas in mydata.items() %}
47 |
48 |
49 |
{{ category }}
50 |
51 | {% for var_name, ptype_seas in var_seas.items() %}
52 | {% if non_seasons[category][var_name] == False %}
53 | {% for i,season in enumerate(ptype_seas.keys()) %}
54 | {% if i==0 %}
55 |
58 | {% endif %}
59 | {% endfor %}
60 |
61 | {% else %}
62 | {% for season in ptype_seas.keys() %}
63 | {% if season == list(ptype_seas.keys())[0] %}
64 |
67 | {% endif %}
68 | {% endfor %}
69 |
70 | {% endif %}
71 | {% endfor %}
72 |
73 |
74 |
75 |
76 | {% endfor %}
77 |
78 |
79 |
80 |
--------------------------------------------------------------------------------
/lib/website_templates/template.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | ADF {{var_title}}
5 |
6 |
7 |
8 |
40 |
41 |
42 |
43 |
44 |
{{plottype_title}} - {{var_title}}
45 |
46 |
49 |
50 |
51 |
52 |
53 |
54 | {% for category, var_seas in mydata.items() %}
55 | {% for var_name, ptype_seas in var_seas.items() %}
56 | {% if var_name == var_title %}
57 | {% if non_seasons[category][var_name] == False %}
58 | {% for season in seasons %}
59 | {% if season in ptype_seas.keys() %}
60 |
63 | {% else %}
64 |
67 | {% endif %}
68 | {% endfor %}
69 | {% else %}
70 | {% for season in ptype_seas.keys() %}
71 |
74 | {% endfor %}
75 | {% endif %}
76 | {% endif %}
77 | {% endfor %}
78 | {% endfor %}
79 |
80 |
81 |
88 |
89 |
90 |
91 |
--------------------------------------------------------------------------------
/.github/scripts/pylint_threshold_test.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | """
4 | Modle name: pylint_threshold_test.py
5 |
6 | Purpose: To test whether the provided list of python
7 | files pass a pylint check at the specified
8 | score threshold.
9 |
10 | Written by: Jesse Nusbaumer - November, 2020
11 | """
12 |
13 | #+++++++++++++++++++++
14 | #Import needed modules
15 | #+++++++++++++++++++++
16 |
17 | import argparse
18 | import io
19 | import pylint.lint as lint
20 |
21 | from pylint.reporters.text import TextReporter
22 |
23 | #################
24 | #HELPER FUNCTIONS
25 | #################
26 |
27 | #++++++++++++++++++++++++++++++
28 | #Input Argument parser function
29 | #++++++++++++++++++++++++++++++
30 |
31 | def parse_arguments():
32 |
33 | """
34 | Parses command-line input arguments using the argparse
35 | python module and outputs the final argument object.
36 | """
37 |
38 | #Create parser object:
39 | parser = argparse.ArgumentParser(description='Generate list of all files modified by pull request.')
40 |
41 | #Add input arguments to be parsed:
42 | parser.add_argument('--python_files', metavar='',
43 | nargs='+', action='store', type=str,
44 | help="list of python files to test")
45 |
46 | parser.add_argument('--rcfile', metavar='', action='store', type=str,
47 | help="location of pylintrc file (full path)")
48 |
49 | parser.add_argument('--pylint_level', metavar='', action='store', type=float,
50 | required=False, help="pylint score that file(s) must exceed")
51 |
52 | #Parse Argument inputs
53 | args = parser.parse_args()
54 | return args
55 |
56 | #################
57 | #Main test script
58 | #################
59 |
60 | def pylint_check(pyfile_list, rcfile, threshold=10.0):
61 |
62 | """
63 | Checks if the pylint scores of the provided
64 | python files are greater than a specified
65 | threshold.
66 | """
67 |
68 | #Creat empty list to store pylint output:
69 | lint_msgs = list()
70 |
71 | #Check if pyfile_list is empty. If so then exit
72 | #script, as their are no python files to test:
73 | if not pyfile_list:
74 | return lint_msgs
75 |
76 | #Create rcfile option string:
77 | rcstr = '--rcfile={}'.format(rcfile)
78 |
79 | #If files exist, then loop through the list:
80 | for pyfile in pyfile_list:
81 |
82 | #Create IO object to receive pylint messages:
83 | pylint_output = io.StringIO()
84 |
85 | #Create pylint reporter object using new IO object:
86 | pylint_report = TextReporter(pylint_output)
87 |
88 | #Run linter:
89 | lint_results = lint.Run([rcstr, '--exit-zero', pyfile],
90 | reporter=pylint_report, exit=False)
91 |
92 | #Extract linter score:
93 | lint_score = lint_results.linter.stats.global_note
94 |
95 | #Save pylint output as string:
96 | lint_msg = pylint_output.getvalue()
97 |
98 | #Close IO object:
99 | pylint_output.close()
100 |
101 | #Add file score and message to list if
102 | #below pylint threshold:
103 | if lint_score < threshold:
104 | lint_msgs.append(lint_msg)
105 |
106 | #Return plyint lists:
107 | return lint_msgs
108 |
109 | ####################
110 | #Command-line script
111 | ####################
112 |
113 | def _pylint_check_commandline():
114 |
115 | """
116 | Runs the "pylint_check" test using
117 | command line inputs. This will
118 | print the test results to stdout
119 | (usually the screen).
120 | """
121 |
122 | #Read in command-line arguments:
123 | args = parse_arguments()
124 |
125 | #Add argument values to variables:
126 | python_files = args.python_files
127 | pylintrc = args.rcfile
128 | pylint_level = args.pylint_level
129 |
130 | #run pylint threshold check:
131 | if pylint_level:
132 | msgs = pylint_check(python_files, pylintrc,
133 | threshold=pylint_level)
134 | else:
135 | msgs = pylint_check(python_files, pylintrc)
136 |
137 | #print pylint info to screen:
138 | if msgs:
139 | #If test(s) failed, then print pylint message(s):
140 | for msg in msgs:
141 | print(msg)
142 | else:
143 | print("All files scored above pylint threshold")
144 |
145 | #############################################
146 |
147 | #Run main script using provided command line arguments:
148 | if __name__ == "__main__":
149 | _pylint_check_commandline()
150 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # ADF diagnostics
2 |
3 | [](https://github.com/NCAR/ADF/actions/workflows/ADF_unit_tests.yaml) [](https://github.com/NCAR/ADF/actions/workflows/ADF_pre-commit.yaml) [![CC BY 4.0][cc-by-shield]][cc-by]
4 |
5 | This repository contains the Atmosphere Model Working Group (AMWG) Diagnostics Framework (ADF) diagnostics python package, which includes numerous different averaging,
6 | re-gridding, and plotting scripts, most of which are provided by users of CAM itself.
7 |
8 | Specifically, this package is currently designed to generate standard climatological comparisons between either two
9 | different CAM simulations, or between a CAM simulation and observational and reanalysis datasets. Ideally
10 | this will allow for a quick evaluation of a CAM simulation, without requiring the user to generate numerous
11 | different figures on their own.
12 |
13 | Currently, this package only uses standard CAM monthly time-slice (h0) outputs or single-variable monthly time series files. However, if there is user interest then
14 | additional model input options can be added.
15 |
16 | Finally, if you are interested in general (but non-supported) tools used by AMP scientists and engineers in their work, then please check out the [AMP Toolbox](https://github.com/NCAR/AMP_toolbox).
17 |
18 | ## Required software environment
19 |
20 | These diagnostics currently require Python version 3.6 or higher. They also require the following non-standard python libraries/modules:
21 |
22 | - PyYAML
23 | - Numpy
24 | - Xarray
25 | - Matplotlib
26 | - Cartopy
27 | - GeoCAT
28 |
29 | If one wants to generate the "AMWG" model variable statistics table as well, then these additional python libraries are also needed:
30 |
31 | - Scipy
32 | - Pandas
33 |
34 | On NCAR's CISL machines (cheyenne and casper), these can be loaded by running the following on the command line:
35 | ```
36 | module load conda
37 | conda activate npl
38 | ```
39 | If you are using conda on a non-CISL machine, then you can create and activate the appropriate python enviroment using the `env/conda_environment.yaml` file like so:
40 |
41 | ```
42 | conda env create -f env/conda_environment.yaml
43 | conda activate adf_v1.0.0
44 | ```
45 |
46 | Also, along with these python requirements, the `ncrcat` NetCDF Operator (NCO) is also needed. On the CISL machines this can be loaded by simply running:
47 | ```
48 | module load nco
49 | ```
50 | or on the CGD machines by simply running:
51 | ```
52 | module load tool/nco
53 | ```
54 | on the command line.
55 |
56 | Finally, if you also want to run the [Climate Variability Diagnostics Package](https://www.cesm.ucar.edu/working_groups/CVC/cvdp/) (CVDP) as part of the ADF then you'll also need NCL. On the CISL machines this can be done using the command:
57 | ```
58 | module load ncl
59 | ```
60 | or on the CGD machines by using the command:
61 | ```
62 | module load tool/ncl/6.6.2
63 | ```
64 | on the command line.
65 |
66 | ## Running ADF diagnostics
67 |
68 | Detailed instructions for users and developers are availabe on this repository's [wiki](https://github.com/NCAR/ADF/wiki).
69 |
70 |
71 | To run an example of the ADF diagnostics, simply download this repo, setup your computing environment as described in the [Required software environment](https://github.com/NCAR/CAM_diagnostics/blob/main/README.md#required-software-environment) section above, modify the `config_cam_baseline_example.yaml` file (or create one of your own) to point to the relevant diretories and run:
72 |
73 | `./run_adf_diag config_cam_baseline_example.yaml`
74 |
75 | This should generate a collection of time series files, climatology (climo) files, re-gridded climo files, and example ADF diagnostic figures, all in their respective directories.
76 |
77 | ### ADF Tutorial/Demo
78 |
79 | Jupyter Book detailing the ADF including ADF basics, guided examples, quick runs, and references
80 | - https://justin-richling.github.io/ADF-Tutorial/README.html
81 |
82 | ## Troubleshooting
83 |
84 | Any problems or issues with this software should be posted on the ADF discussions page located online [here](https://github.com/NCAR/ADF/discussions).
85 |
86 | Please note that registration may be required before a message can
87 | be posted. However, feel free to search the forums for similar issues
88 | (and possible solutions) without needing to register or sign in.
89 |
90 | Good luck, and have a great day!
91 |
92 | ##
93 |
94 | This work is licensed under a
95 | [Creative Commons Attribution 4.0 International License][cc-by].
96 |
97 | [![CC BY 4.0][cc-by-image]][cc-by]
98 |
99 | [cc-by]: http://creativecommons.org/licenses/by/4.0/
100 | [cc-by-image]: https://i.creativecommons.org/l/by/4.0/88x31.png
101 | [cc-by-shield]: https://img.shields.io/badge/License-CC%20BY%204.0-lightgrey.svg
102 |
--------------------------------------------------------------------------------
/lib/test/unit_tests/test_adf_base.py:
--------------------------------------------------------------------------------
1 | """
2 | Collection of python unit tests
3 | for the "AdfBase" class.
4 | """
5 |
6 | #+++++++++++++++++++++++
7 | #Import required modules
8 | #+++++++++++++++++++++++
9 |
10 | import unittest
11 | import sys
12 | import os
13 | import os.path
14 | import logging
15 | import glob
16 |
17 | #Set relevant path variables:
18 | _CURRDIR = os.path.abspath(os.path.dirname(__file__))
19 | _ADF_LIB_DIR = os.path.join(_CURRDIR, os.pardir, os.pardir)
20 |
21 | #Add ADF "lib" directory to python path:
22 | sys.path.append(_ADF_LIB_DIR)
23 |
24 | #Import AdfBase class
25 | from adf_base import AdfBase
26 | from adf_base import AdfError
27 |
28 | #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
29 | #Main AdfBase testing routine, used when script is run directly
30 | #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
31 |
32 | class AdfBaseTestRoutine(unittest.TestCase):
33 |
34 | """
35 | Runs all of the unit tests
36 | for the AdfBase class. Ideally
37 | this set of tests will provide
38 | complete code coverage for AdfBase.
39 | """
40 |
41 | #Set-up unit tests:
42 | def tearDown(self):
43 |
44 | """
45 | Remove log files (if they exist).
46 | """
47 |
48 | debug_list = glob.glob("ADF_debug*.log")
49 |
50 | for dfile in debug_list:
51 | #Remove log file if it exists:
52 | if os.path.exists(dfile):
53 | os.remove(dfile)
54 |
55 |
56 | #Close all log streams:
57 | logging.shutdown()
58 |
59 | def test_AdfBase_create(self):
60 |
61 | """
62 | Check that the Adfbase class can
63 | be initialized properly.
64 | """
65 |
66 | #Create AdfBase object:
67 | adf_test = AdfBase()
68 |
69 | #Assert that new object is of the "AdfBase" class:
70 | self.assertIsInstance(adf_test, AdfBase)
71 |
72 | def test_AdfBase_debug_create(self):
73 |
74 | """
75 | Check that the Adfbase class can
76 | be initialized properly when the
77 | debug flag is set, and that a
78 | debug log file is created.
79 | """
80 |
81 | #Create AdfBase object with debug setting:
82 | adf_test = AdfBase(debug=True)
83 |
84 | #Grab debug log name
85 | debug_fname = adf_test.debug_fname
86 |
87 | #Assert that new object is of the "AdfBase" class:
88 | self.assertIsInstance(adf_test, AdfBase)
89 |
90 | #Assert that ADF debug log file exists in local directory:
91 | self.assertTrue(os.path.exists(debug_fname))
92 |
93 | def test_AdfBase_bad_debug(self):
94 |
95 | """
96 | Check that the Adfbase class
97 | throws the proper error if a bad
98 | value is passed-in via the "debug" variable.
99 | """
100 |
101 | #Set error message:
102 | ermsg = "'debug' must be a boolean type (True or False)"
103 |
104 | #Expect a Type error:
105 | with self.assertRaises(TypeError) as typerr:
106 |
107 | #Create AdfBase object with bad debug setting:
108 | adf_test = AdfBase(debug=5)
109 |
110 | #Check that error message matches what's expected:
111 | self.assertEqual(ermsg, str(typerr.exception))
112 |
113 |
114 | def test_AdfBase_debug_nothing(self):
115 |
116 | """
117 | Check that using the "debug_log" method
118 | without debugging enabled does nothing.
119 | """
120 |
121 | #Create AdfBase object with no debug setting:
122 | adf_test = AdfBase()
123 |
124 | #Call "debug_log" method:
125 | adf_test.debug_log("test")
126 |
127 | #Grab debug log name
128 | debug_fname = adf_test.debug_fname
129 |
130 | #Check that no log file exists:
131 | self.assertFalse(os.path.exists(debug_fname))
132 |
133 | def test_AdfBase_debug_write(self):
134 |
135 | """
136 | Check that using the "debug_log" method
137 | with debugging enabled properly writes
138 | a message to the debug log file.
139 | """
140 |
141 | #Create AdfBase object with debug setting:
142 | adf_test = AdfBase(debug=True)
143 |
144 | #Grab debug log name
145 | debug_fname = adf_test.debug_fname
146 |
147 | #Call "debug_log" method:
148 | adf_test.debug_log("test")
149 |
150 | #Check that debug log exists:
151 | self.assertTrue(os.path.exists(debug_fname))
152 |
153 | #If debug log exists, then open file:
154 | if os.path.exists(debug_fname):
155 |
156 | #Open log file:
157 | with open(debug_fname) as logfil:
158 |
159 | #Extract file contents:
160 | log_text = logfil.read()
161 |
162 | #Check that log text matches what was written:
163 | self.assertEqual("DEBUG:ADF:test\n",log_text)
164 |
165 | def test_AdfBase_script_end_fail(self):
166 |
167 | """
168 | Check that using "end_diag_fail" raises
169 | the correct exception and error message
170 | """
171 |
172 | #Create AdfBase object:
173 | adf_test = AdfBase()
174 |
175 | #Expect a Type error:
176 | with self.assertRaises(AdfError) as adferr:
177 |
178 | #Call "end_diag_fail" method:
179 | adf_test.end_diag_fail("test")
180 |
181 | #Check that error message matches what's expected:
182 | self.assertEqual("test", str(adferr.exception))
183 |
184 | #++++++++++++++++++++++++++++++++++++++++++++++++
185 | #Run unit tests if this script is called directly
186 | #++++++++++++++++++++++++++++++++++++++++++++++++
187 |
188 | if __name__ == "__main__":
189 | unittest.main()
190 |
191 |
--------------------------------------------------------------------------------
/lib/externals/CVDP/ncl_scripts/ncfiles.append.ncl:
--------------------------------------------------------------------------------
1 | ; Concatenate all .nc files from same model/observational dataset
2 | ; into a single .nc file.
3 |
4 | load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/gsn_code.ncl"
5 | load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/gsn_csm.ncl"
6 | load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/contributed.ncl"
7 | load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/shea_util.ncl"
8 | ;load "$CVDP_SCRIPTS/functions.ncl"
9 |
10 | begin
11 | print("Starting: ncfiles.append.ncl")
12 |
13 | OUTDIR = getenv("OUTDIR")
14 | o = getenv("OBS")
15 | ;
16 | if (o.eq."True") then
17 | obsflag = True
18 | else
19 | obsflag = False
20 | end if
21 |
22 | nsim = numAsciiRow("namelist")
23 | na = asciiread("namelist",(/nsim/),"string")
24 |
25 | blankrow = ind(na.eq."")
26 | if (.not.any(ismissing(blankrow))) then
27 | goodrows = ind(na.ne."")
28 | na2 = na(goodrows)
29 | delete(na)
30 | na = na2
31 | delete(na2)
32 | nsim = dimsizes(na)
33 | end if
34 |
35 | nentry = numAsciiCol("namelist")
36 | names = new(nsim,"string")
37 | syear = new(nsim,"integer",-999)
38 | eyear = new(nsim,"integer",-999)
39 |
40 | delim = "|"
41 | do gg = 0,nsim-1
42 | names(gg) = str_sub_str(str_sub_str(str_sub_str(str_sub_str(str_sub_str(str_strip(str_get_field(na(gg),1,delim))," ","_"),"/","_"),"'","_"),"(","_"),")","_")
43 | syear(gg) = stringtointeger(str_strip(str_get_field(na(gg),3,delim)))
44 | eyear(gg) = stringtointeger(str_strip(str_get_field(na(gg),4,delim)))
45 | end do
46 |
47 | do gg = 0,nsim-1
48 | fils = systemfunc("ls "+OUTDIR+names(gg)+".*.nc 2> /dev/null")
49 | if (.not.ismissing(fils(0))) then
50 | dimf = dimsizes(fils)
51 | ofile = OUTDIR+names(gg)+".cvdp_data."+syear(gg)+"-"+eyear(gg)+".nc"
52 | if (dimf.eq.1) then
53 | system("mv "+fils(0)+" "+ofile)
54 | else
55 | if (isfilepresent(ofile)) then ; if file master is present append individual data files to file master.
56 | do hh = 0,dimf-1
57 | if (fils(hh).ne.ofile) then
58 | system("ncks -A -h "+fils(hh)+" "+ofile)
59 | end if
60 | end do
61 | else ; if file master is not present, append individual data files to last file in list,
62 | do hh = 0,dimf-2 ; and when done move the last file to be the master file
63 | system("ncks -A -h "+fils(hh)+" "+fils(dimf-1))
64 | end do
65 | system("mv "+fils(dimf-1)+" "+ofile)
66 | end if
67 | if (dimsizes(fils(:dimf-2)).ge.2) then
68 | system("rm "+str_sub_str(str_join(fils(:dimf-2)," "),ofile,"")) ; remove each script's file, but do not remove the master file (if present)
69 | end if
70 | end if
71 | system("ncks -O "+ofile+" "+ofile) ; done to alphabetize output variable
72 | delete([/dimf,ofile/])
73 | else
74 | ; print("NetCDF files not found for "+names+", skipping appending")
75 | end if
76 | delete(fils)
77 | end do
78 | delete([/nsim,na,blankrow,nentry,names,syear,eyear/])
79 | ;------------------------------------------------
80 | if (obsflag) then
81 | maxnumobs = asciiread("obs_maxnum",(/1/),"integer")
82 |
83 | namelist_files = (/"psl","prect","trefht","ts","snowdp","moc","aice_nh","aice_sh"/)
84 | delim = "|"
85 | cntr = 0
86 | namesB = new(maxnumobs*dimsizes(namelist_files),string)
87 | do gg = 0,dimsizes(namelist_files)-1 ; grab all observational dataset names from namelist_$var files
88 | na = asciiread("namelist_byvar/namelist_"+namelist_files(gg),(/maxnumobs/),"string")
89 | namesB(cntr:cntr+maxnumobs-1) = str_sub_str(str_sub_str(str_sub_str(str_sub_str(str_sub_str(str_strip(str_get_field(na,1,delim))," ","_"),"/","_"),"'","_"),"(","_"),")","_")
90 | cntr = cntr+maxnumobs
91 | delete(na)
92 | end do
93 |
94 | namesB = where(namesB.eq."",namesB@_FillValue,namesB) ; for blank names set them to _FillValue
95 | if (any(namesB.eq."missing")) then
96 | namesB(str_match_ind(namesB,"missing")) = namesB@_FillValue ; check for any names containing "missing", set to _FillValue
97 | end if
98 | delete([/delim,cntr,namelist_files/])
99 |
100 | do gg = 0,dimsizes(namesB)-1
101 | if (.not.ismissing(namesB(gg))) then
102 | fils = systemfunc("ls "+OUTDIR+namesB(gg)+".cvdp_data.*.nc 2> /dev/null")
103 | if (.not.ismissing(fils(0))) then
104 | dimf = dimsizes(fils)
105 | fil0 = tochar(fils(0))
106 | suffix = tostring(fil0(dimsizes(fil0)-12:dimsizes(fil0)-1))
107 | delete(fil0)
108 | ofi = OUTDIR+namesB(gg)+".cvdp_data."+suffix
109 | if (dimf.ge.2) then
110 | if (isfilepresent(ofi)) then ; if file master is present append individual data files to file master.
111 | do hh = 0,dimf-1
112 | if (fils(hh).ne.ofi) then
113 | system("ncks -A -h "+fils(hh)+" "+ofi)
114 | end if
115 | end do
116 | else ; if file master is not present, append individual data files to last file in list,
117 | do hh = 0,dimf-2 ; and when done move the last file to be the master file
118 | system("ncks -A -h "+fils(hh)+" "+fils(dimf-1))
119 | end do
120 | system("mv "+fils(dimf-1)+" "+ofi)
121 | end if
122 |
123 | if (dimsizes(fils(:dimf-2)).ge.2) then
124 | system("rm "+str_sub_str(str_join(fils(:dimf-2)," "),ofi,"")) ; remove each script's file, but do not remove the master file (if present)
125 | end if
126 | else
127 | if (fils(0).ne.ofi) then
128 | system("mv "+fils(0)+" "+ofi)
129 | end if
130 | end if
131 | system("ncks -O "+ofi+" "+ofi) ; done to alphabetize output variable
132 | delete([/dimf,ofi/])
133 | else
134 | ; print("NetCDF files not found for "+namesB(gg)+", skipping appending")
135 | end if
136 | delete(fils)
137 | end if
138 | end do
139 | delete([/namesB/])
140 | end if
141 | print("Finished: ncfiles.append.ncl")
142 | end
143 |
--------------------------------------------------------------------------------
/lib/website_templates/adf_diag.css:
--------------------------------------------------------------------------------
1 | nav.primary-navigation {
2 | margin: 0 auto;
3 | display: block;
4 | padding: 10px 0 0 0;
5 | text-align: center;
6 | font-size: 16px;
7 | }
8 |
9 | nav.primary-navigation ul{
10 | list-style: none;
11 | margin: 0;
12 | padding: 0;
13 | background: none;
14 | display: inline-block;
15 | align-items: center;
16 | justify-content: space-evenly;
17 | height: 100%;
18 | }
19 |
20 | nav.primary-navigation li {
21 | display: inline-flex;
22 | font-family: arvo, Tahoma, Geneva, Verdana, sans-serif;
23 | font-size: 16pt;
24 | text-align: center;
25 | text-transform: none;
26 | position: relative;
27 | color: black;
28 | min-width:max-content;
29 | }
30 |
31 | nav.primary-navigation li:hover {
32 | cursor: pointer;
33 | }
34 |
35 | nav.primary-navigation ul li ul {
36 | visibility: hidden;
37 | opacity: 0;
38 | position: absolute;
39 | left: 0;
40 | margin-top: 1rem;
41 | transition: all 0.25s ease;
42 | background: white;
43 | display: inline-block;
44 | min-width:max-content;
45 | }
46 |
47 | nav.primary-navigation ul li:hover > ul, nav.primary-navigation ul li ul:hover {
48 | visibility: visible;
49 | opacity: 1;
50 | display: block;
51 | width: auto;
52 | text-align: left;
53 | margin-top: 2rem;
54 | background: white;
55 | }
56 |
57 | nav.primary-navigation ul li ul li {
58 | clear: both;
59 | text-align: left;
60 | margin-bottom:0px;
61 | border-style: none;
62 | background: white;
63 | display: block;
64 | }
65 |
66 | nav.primary-navigation li a {
67 | color: #174ea6;
68 | display: block;
69 | padding: 8px;
70 | }
71 | nav.primary-navigation li a:hover {
72 | color: #3ca0e7;
73 | }
74 |
75 | nav.primary-navigation ul li ul li a:hover {
76 | padding-left: 10px;
77 | border-left: 2px solid #3ca0e7;
78 | transition: all 0.3s ease;
79 | display: block;
80 | }
81 |
82 |
83 | .center {
84 | text-align: center;
85 | }
86 |
87 | a {
88 | text-decoration: none;
89 | }
90 | a-blocked {
91 | text-decoration: none;
92 | color: black;
93 | }
94 | a:visited {
95 | color: #1A658F;
96 | }
97 | a:hover {
98 | color: #00C1D5;
99 | }
100 |
101 | body {
102 | padding-bottom: 50px;
103 | }
104 |
105 | ul li ul li a {
106 | transition: all 0.5s ease;
107 | }
108 |
109 | h1 {
110 | color: rgb(67, 67, 67);
111 | font-family: Tahoma, Geneva, Verdana, sans-serif;
112 | }
113 |
114 | h2 {
115 | color: gray;
116 | font-family: Tahoma, Geneva, Verdana, sans-serif;
117 | }
118 |
119 | h3 {
120 | color: black;
121 | font-family: Tahoma, Geneva, Verdana, sans-serif;
122 | text-decoration: underline;
123 | }
124 |
125 | img {
126 | max-width: 75%;
127 | height: auto;
128 | }
129 |
130 | table.big-table{
131 | border-collapse: collapse;
132 | border-radius: 10px;
133 | background-color: #012169;
134 | }
135 |
136 | table.big-table ul,ol,dl,p,th {
137 | font-size: 1.0rem;
138 | font-family: Tahoma, Geneva, Verdana, sans-serif;
139 | color: #D9D9D6;
140 | text-align: left;
141 | }
142 | table.big-table td {
143 | font-family: Tahoma, Geneva, Verdana, sans-serif;
144 | color: #D9D9D6;
145 | font-weight: bold;
146 | padding: 10px;
147 | text-transform: uppercase;
148 | letter-spacing: 1px;
149 | border-top: 1px solid #fff;
150 | border-bottom: 1px solid #ccc;
151 | }
152 |
153 | table.big-table td a {
154 | color: #D9D9D6;
155 | }
156 |
157 | table.big-table td a:hover {
158 | color: #00C1D5;
159 | }
160 |
161 |
162 | table.dataframe {
163 | font-size: 1.0rem;
164 | font-family: Tahoma, Geneva, Verdana, sans-serif;
165 | text-align: left;
166 | border-collapse: collapse;
167 | width: 100%;
168 | color: #53565A;
169 | border-radius: 10px;
170 | overflow: hidden;
171 | box-shadow: 0 0 20px rgba(0, 0, 0, 0.1);
172 | margin: auto;
173 | margin-top: 50px;
174 | margin-bottom: 50px;
175 | }
176 | table.dataframe tr:nth-child(even) td {
177 | font-family: Tahoma, Geneva, Verdana, sans-serif;
178 | padding-left: 10px;
179 | background-color: #DBE2E9;
180 | border: none;
181 | }
182 | table.dataframe tr:hover td {
183 | background-color: #BBBCBC;
184 | color: #53565A;
185 | border: none;
186 | }
187 | table.dataframe td {
188 | background-color: #fff;
189 | padding: 10px;
190 | border-bottom: 1px solid #ccc;
191 | font-weight: bold;
192 | border: none;
193 | }
194 | table.dataframe thead th{
195 | background-color: #C3D7EE;
196 | color: #1A658F;
197 | font-weight: bold;
198 | padding: 10px;
199 | text-transform: uppercase;
200 | letter-spacing: 1px;
201 | border: none;
202 | }
203 |
204 |
205 | .fixedDiv {
206 | position: fixed;
207 | top: 300px;
208 | right: 150px;
209 | }
210 |
211 | #footer {
212 | width: 100%;
213 | height: auto;
214 | text-align: center;
215 | -webkit-flex: 0 0 64px;
216 | flex: 0 0 64px;
217 | }
218 |
219 | .header {
220 | width: auto;
221 | height: 100%;
222 | }
223 |
224 | .grid-item {
225 | background-color: rgba(255, 255, 255, 0.6);
226 | font-family: Tahoma, Geneva, Verdana, sans-serif;
227 | border-collapse: collapse;
228 | border-radius: 15px; padding: 10px;
229 | font-size: 16px;
230 | text-align: center;
231 | }
232 |
233 | .grid-item-blocked {
234 | background-color: rgba(192, 192, 192, 0.6);
235 | font-family: Tahoma, Geneva, Verdana, sans-serif;
236 | border-collapse: collapse;
237 | border-radius: 15px; padding: 10px;
238 | font-size: 16px;
239 | text-align: center;
240 | }
241 |
242 | .grid-item-diag {
243 | background-color: rgba(255, 255, 255, 0.6);
244 | border: 1px solid rgba(0, 0, 0, 0.8);
245 | padding: 2px 4px;
246 | font-size: 12px;
247 | text-align: center;
248 | }
249 |
250 | .grid-container-ptype {
251 | display: grid;
252 | column-gap: 50px;
253 | row-gap: 50px;
254 | width: 1000px;
255 | grid-template-columns: repeat(3, auto);
256 | background-color: #e4eef0;
257 | padding: 65px 100px;
258 | }
259 |
260 | .grid-container {
261 | display: grid;
262 | column-gap: 50px;
263 | row-gap: 50px;
264 | grid-template-columns: repeat(3, auto);
265 | background-color: #e4eef0;
266 | padding: 85px;
267 | }
268 |
269 | .grid-container-template {
270 | display: grid;
271 | column-gap: 50px;
272 | row-gap: 50px;
273 | grid-auto-flow: column;
274 | background-color: #e4eef0;
275 | padding: 50px;
276 | }
277 |
278 | .dropdown {
279 | position: relative;
280 | display: inline-block;
281 | }
282 |
283 | .dropdown-vars {
284 | position: relative;
285 | display: inline-block;
286 | z-index: 4;
287 | }
288 |
289 | .dropdown-vars a {
290 | color: black;
291 | padding: 12px 12px;
292 | text-decoration: none;
293 | display: block;
294 | white-space: nowrap;
295 | }
296 |
297 | .block {
298 | display: block;
299 | width: 100%;
300 | border: none;
301 | font-family: Tahoma, Geneva, Verdana, sans-serif;
302 | color: rgb(67, 67, 67);
303 | background-color: rgba(206, 206, 206, 0.776);
304 | ;
305 | font-size: 16px;
306 | cursor: pointer;
307 | text-align: center;
308 | }
309 |
310 | .float-container {
311 | border: 3px solid #fff;
312 | padding: 20px;
313 | }
314 |
315 | .float-child {
316 | width: 50%;
317 | float: left;
318 | padding: 20px;
319 | }
--------------------------------------------------------------------------------
/run_adf_diag:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | """
4 | script: run_adf_diag
5 |
6 | goal: To allow a user to easily run the CAM diagnostics package from
7 | the command line.
8 |
9 | Inputs: The ADF diagnostics config file (in YAML format). If the file is
10 | located in a different directory then the system path must also be
11 | included.
12 |
13 | Any problems or issues with this script should be posted on the
14 | ADF Github Discussions page located online here:
15 |
16 | https://github.com/NCAR/ADF/discussions
17 |
18 | Please note that registration may be required before a message can
19 | be posted. However, feel free to search the forums for similar issues
20 | (and possible solutions) without needing to register or sign in.
21 |
22 | Good luck, and may all your plots be helpful!
23 | """
24 |
25 | #++++++++++++++++++++++++++++++
26 | #Import standard python modules
27 | #++++++++++++++++++++++++++++++
28 |
29 | import os.path
30 | import sys
31 | import argparse
32 |
33 | #+++++++++++++++++++++++++++++
34 | #Import ADF diagnostics module
35 | #+++++++++++++++++++++++++++++
36 |
37 | #Determine local directory path:
38 | _LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
39 |
40 | #Add lib directory to path:
41 | _DIAG_LIB_PATH = os.path.join(_LOCAL_PATH,"lib")
42 |
43 | #Check that path actually exists:
44 | if os.path.isdir(_DIAG_LIB_PATH):
45 | #If true, then add to Python path:
46 | sys.path.append(_DIAG_LIB_PATH)
47 | else:
48 | #If not, then raise error:
49 | raise FileNotFoundError("'./lib' directory not found. Has 'run_adf_diag' been moved?")
50 |
51 | #Import ADF diagnostics object:
52 | from adf_diag import AdfDiag
53 |
54 | #Import ADF diagnostics error class:
55 | from adf_base import AdfError
56 |
57 | #################
58 | #Helper functions
59 | #################
60 |
61 | #++++++++++++++++++++++++++++++
62 | #Input argument parser function
63 | #++++++++++++++++++++++++++++++
64 |
65 | def parse_arguments():
66 |
67 | """
68 | Parses command-line input arguments using the argparse
69 | python module and outputs the final argument object.
70 | """
71 |
72 | #Create parser object:
73 | parser = argparse.ArgumentParser(description='Command-line wrapper to run the ADF diagnostics package.')
74 |
75 | #Add input arguments to be parsed:
76 | #--------------------------------
77 |
78 | #Config file:
79 | parser.add_argument('configure_file', nargs='?', action='store', type=str,
80 | help="YAML file used to configure CAM diagnostics.")
81 |
82 | #Flag method to point to config file. Will be removed at some point in the future.
83 | parser.add_argument('--config-file', '--config_file', metavar='', action='store', type=str,
84 | help="YAML file used to configure CAM diagnostics (deprecated).")
85 |
86 | #Debug setting:
87 | parser.add_argument('--debug', action='store_true', help="Turn on debug output.")
88 |
89 | #--------------------------------
90 |
91 | #Parse Argument inputs
92 | args = parser.parse_args()
93 |
94 | #If no config file argument is present, then throw an error:
95 | if (not args.configure_file) and (not args.config_file):
96 | emsg = "No Config file found, please run 'run_diag '.\n"
97 | emsg +="Where is the name of the yaml file used to configure"
98 | emsg +="the diagnostics package."
99 | parser.error(emsg)
100 |
101 | return args
102 |
103 | ############################
104 | #Main CAM diagnostics script
105 | ############################
106 |
107 | #Run code below if command is called
108 | #directly from the command line:
109 | if __name__ == "__main__":
110 |
111 | #+++++++++++++++++++++
112 | #Begin ADF diag script
113 | #+++++++++++++++++++++
114 | print('ADF diagnostics is starting...')
115 |
116 | #+++++++++++++++++++++++++++++++++++++++++++
117 | #Check that python is version 3.6 or greater
118 | #+++++++++++++++++++++++++++++++++++++++++++
119 |
120 | if sys.version_info[0] < 3:
121 | raise AdfError("Script only works with Python 3. Please switch python versions.")
122 |
123 | if sys.version_info[1] < 6:
124 | raise AdfError("Script only works with Python version 3.6 or greater. Please update python.")
125 |
126 | #++++++++++++++++++++++++++++
127 | #Parse command-line arguments
128 | #++++++++++++++++++++++++++++
129 |
130 | args = parse_arguments()
131 |
132 | #Extract YAML config file name/path:
133 | if args.configure_file:
134 | config_yaml = args.configure_file
135 | else:
136 | config_yaml = args.config_file
137 |
138 | #Extract debug flag:
139 | config_debug = args.debug
140 |
141 | #+++++++++++++++++++++++++++++++++
142 | #Call main ADF diagnostics methods
143 | #+++++++++++++++++++++++++++++++++
144 |
145 | #Initalize CAM diagnostics object:
146 | diag = AdfDiag(config_yaml, debug=config_debug)
147 |
148 | #Create model time series.
149 | #Please note that this is an internal ADF function:
150 | diag.create_time_series()
151 |
152 | #Create model baseline time series (if needed):
153 | if not diag.compare_obs:
154 | diag.create_time_series(baseline=True)
155 |
156 | #Call the CVDP:
157 | if diag.get_cvdp_info('cvdp_run'):
158 | diag.setup_run_cvdp()
159 |
160 | #Call the MDTF:
161 | if diag.get_mdtf_info('mdtf_run'):
162 | mdtf_proc = diag.setup_run_mdtf() #returns mdtf_proc for subprocess control
163 | else:
164 | mdtf_proc = None
165 |
166 | #Create model climatology (climo) files.
167 | #This call uses the "time_averaging_scripts" specified
168 | #in the config file:
169 | diag.create_climo()
170 |
171 | #If a user is doing a model vs obs comparison, but
172 | #no observations were found, then stop here:
173 | if diag.compare_obs and not diag.var_obs_dict:
174 | print('\nADF diagnostics has completed successfully.')
175 | sys.exit(0)
176 |
177 | #Regrid model climatology files to match either
178 | #observations or CAM baseline climatologies.
179 | #This call uses the "regridding_scripts" specified
180 | #in the config file:
181 | diag.regrid_climo()
182 |
183 | #Perform analyses on the simulation(s).
184 | #This call uses the "analysis_scripts" specified in the
185 | #config file:
186 | diag.perform_analyses()
187 |
188 | #Create plots.
189 | #This call uses the "plotting_scripts" specified
190 | #in the config file:
191 | diag.create_plots()
192 |
193 | #Create website.
194 | #Please note that this is an internal ADF function:
195 | if diag.create_html:
196 | diag.create_website()
197 |
198 | # Check if sub-processes are still running (CVDP, MDTF)
199 | if mdtf_proc:
200 | mdtf_status = mdtf_proc.wait(timeout=None)
201 | if (mdtf_status != 0):
202 | print(f"ERROR: MDTF finished with code {mdtf_status}")
203 | else:
204 | print("MDTF finished successfully")
205 |
206 | #+++++++++++++++
207 | #End diag script
208 | #+++++++++++++++
209 | print('\nADF diagnostics has completed successfully.')
210 | sys.exit(0)
211 |
--------------------------------------------------------------------------------
/lib/adf_obs.py:
--------------------------------------------------------------------------------
1 | """
2 | Observations (obs) class for the Atmospheric
3 | Diagnostics Framework (ADF).
4 | This class inherits from the AdfInfo class.
5 |
6 | Currently this class does three things:
7 |
8 | 1. Initializes an instance of AdfInfo.
9 |
10 | 2. Sets the "variable_defaults" ADF variable.
11 |
12 | 3. Checks whether any requested variable is supposed
13 | to have a land or ocean mask, and if so then
14 | adds land and ocean fractions to the variable
15 | list.
16 |
17 | 4. If a model vs obs run, then creates a
18 | dictionary of what observational dataset
19 | is associated with each requested variable,
20 | along with any relevant observational meta-data.
21 |
22 | This class also provide methods for extracting
23 | the observational data and meta-data for use
24 | in various scripts.
25 | """
26 |
27 | #++++++++++++++++++++++++++++++
28 | #Import standard python modules
29 | #++++++++++++++++++++++++++++++
30 |
31 | import copy
32 |
33 | from pathlib import Path
34 |
35 | #+++++++++++++++++++++++++++++++++++++++++++++++++
36 | #import non-standard python modules, including ADF
37 | #+++++++++++++++++++++++++++++++++++++++++++++++++
38 |
39 | import yaml
40 |
41 | #ADF modules:
42 | from adf_info import AdfInfo
43 |
44 | #+++++++++++++++++++
45 | #Define Obs class
46 | #+++++++++++++++++++
47 |
48 | class AdfObs(AdfInfo):
49 |
50 | """
51 | Observations class, which initializes
52 | an AdfInfo object and provides
53 | additional variables and methods
54 | needed for managing observational data.
55 | """
56 |
57 | def __init__(self, config_file, debug=False):
58 |
59 | """
60 | Initalize ADF Obs object.
61 | """
62 |
63 | #Initialize Config attributes:
64 | super().__init__(config_file, debug=debug)
65 |
66 | #Determine local directory:
67 | _adf_lib_dir = Path(__file__).parent
68 |
69 | # Check whether user wants to use defaults:
70 | #-----------------------------------------
71 | #Determine whether to use adf defaults or custom:
72 | _defaults_file = self.get_basic_info('defaults_file')
73 | if _defaults_file is None:
74 | _defaults_file = _adf_lib_dir/'adf_variable_defaults.yaml'
75 | else:
76 | print(f"\n\t Not using ADF default variables yaml file, instead using {_defaults_file}\n")
77 | #End if
78 |
79 | #Open YAML file:
80 | with open(_defaults_file, encoding='UTF-8') as dfil:
81 | self.__variable_defaults = yaml.load(dfil, Loader=yaml.SafeLoader)
82 |
83 | _variable_defaults = self.__variable_defaults
84 | #-----------------------------------------
85 |
86 | #Check if land or ocean mask is requested, and if so then add OCNFRAC
87 | #to the variable list. Note that this setting, and the defaults_file
88 | #code above, should probably be moved to AdfInfo, or somewhere else
89 | #farther down in the ADF inheritance chain:
90 | #----------------------------------------
91 | if self.__variable_defaults:
92 | #Variable defaults exist, so check if any want a land or ocean mask:
93 | for var in self.diag_var_list:
94 | #Check if any variable wants a land or ocean mask:
95 | if var in self.__variable_defaults:
96 | if 'mask' in self.__variable_defaults[var]:
97 | #Variable needs a mask, so add "OCNFRAC" to
98 | #the variable list:
99 | self.add_diag_var('OCNFRAC')
100 | break
101 | #End if
102 | #End if
103 | #End for
104 | #End if
105 | #-----------------------------------------
106 |
107 | #Initialize observations dictionary:
108 | self.__var_obs_dict = {}
109 |
110 | #If this is not a model vs obs run, then stop here:
111 | if not self.compare_obs:
112 | return
113 | #End if
114 |
115 | #Extract the "obs_data_loc" default observational data location:
116 | obs_data_loc = self.get_basic_info("obs_data_loc")
117 |
118 | #Loop over variable list:
119 | for var in self.diag_var_list:
120 |
121 | #Check if variable is in defaults dictionary:
122 | if var in _variable_defaults:
123 | #Extract variable sub-dictionary:
124 | default_var_dict = _variable_defaults[var]
125 |
126 | #Check if an observations file is specified:
127 | if "obs_file" in default_var_dict:
128 | #Set found variable:
129 | found = False
130 |
131 | #Extract path/filename:
132 | obs_file_path = Path(default_var_dict["obs_file"])
133 |
134 | #Check if file exists:
135 | if not obs_file_path.is_file():
136 | #If not, then check if it is in "obs_data_loc"
137 | if obs_data_loc:
138 | obs_file_path = Path(obs_data_loc)/obs_file_path
139 |
140 | if obs_file_path.is_file():
141 | found = True
142 |
143 | else:
144 | #File was found:
145 | found = True
146 | #End if
147 |
148 | #If found, then set observations dataset and variable names:
149 | if found:
150 | #Check if observations dataset name is specified:
151 | if "obs_name" in default_var_dict:
152 | obs_name = default_var_dict["obs_name"]
153 | else:
154 | #If not, then just use obs file name:
155 | obs_name = obs_file_path.name
156 |
157 | #Check if observations variable name is specified:
158 | if "obs_var_name" in default_var_dict:
159 | #If so, then set obs_var_name variable:
160 | obs_var_name = default_var_dict["obs_var_name"]
161 | else:
162 | #Assume observation variable name is the same ad model variable:
163 | obs_var_name = var
164 | #End if
165 |
166 | #Add variable to observations dictionary:
167 | self.__var_obs_dict[var] = \
168 | {"obs_file" : obs_file_path,
169 | "obs_name" : obs_name,
170 | "obs_var" : obs_var_name}
171 |
172 | else:
173 | #If not found, then print to log and skip variable:
174 | msg = f'''Unable to find obs file '{default_var_dict["obs_file"]}' '''
175 | msg += f"for variable '{var}'."
176 | self.debug_log(msg)
177 | continue
178 | #End if
179 |
180 | else:
181 | #No observation file was specified, so print
182 | #to log and skip variable:
183 | self.debug_log(f"No observations file was listed for variable '{var}'.")
184 | continue
185 | else:
186 | #Variable not in defaults file, so print to log and skip variable:
187 | msg = f"Variable '{var}' not found in variable defaults file: `{_defaults_file}`"
188 | self.debug_log(msg)
189 | #End if
190 | #End for (var)
191 |
192 | #If variable dictionary is still empty, then print warning to screen:
193 | if not self.__var_obs_dict:
194 | wmsg = "!!!!WARNING!!!!\n"
195 | wmsg += "No observations found for any variables, but this is a model vs obs run!\n"
196 | wmsg += "ADF will still calculate time series and climatologies if requested,"
197 | wmsg += " but will stop there.\n"
198 | wmsg += "If this result is unexpected, then run with '--debug'"
199 | wmsg += " and check the log for messages.\n"
200 | wmsg += "!!!!!!!!!!!!!!!\n"
201 | print(wmsg)
202 | #End if
203 |
204 | #########
205 |
206 | # Create property needed to return "variable_defaults" variable to user:
207 | @property
208 | def variable_defaults(self):
209 | """Return a copy of the '__variable_defaults' dictionary to the user if requested."""
210 | #Note that a copy is needed in order to avoid having a script mistakenly
211 | #modify this variable, as it is mutable and thus passed by reference:
212 | return copy.copy(self.__variable_defaults)
213 |
214 | # Create property needed to return "var_obs_dict" dictionary to user:
215 | @property
216 | def var_obs_dict(self):
217 | """Return a copy of the "var_obs_dict" list to the user if requested."""
218 | #Note that a copy is needed in order to avoid having a script mistakenly
219 | #modify this variable, as it is mutable and thus passed by reference:
220 | return copy.copy(self.__var_obs_dict)
221 |
222 | #++++++++++++++++++++
223 | #End Class definition
224 | #++++++++++++++++++++
225 |
--------------------------------------------------------------------------------
/.github/scripts/pr_mod_file_tests.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | """
4 | Script name: pr_mod_file_tests.py
5 |
6 | Goal: To generate a list of files modified in the associated
7 | Github Pull Request (PR), using the PyGithub interface,
8 | and then to run tests on those files when appropriate.
9 |
10 | Note: This version currently limit the tests to a subset of files,
11 | in order to avoid running pylint on non-core python source files.
12 |
13 | Written by: Jesse Nusbaumer - November, 2020
14 | """
15 |
16 | #+++++++++++++++++++++
17 | #Import needed modules
18 | #+++++++++++++++++++++
19 |
20 | import sys
21 | import os
22 | import subprocess
23 | import shlex
24 | import argparse
25 |
26 | from stat import S_ISREG
27 | from github import Github
28 |
29 | #Local scripts:
30 | from pylint_threshold_test import pylint_check
31 |
32 | #################
33 |
34 | class PrModTestFail(ValueError):
35 | """Class used to handle file test failures
36 | (i.e., raise test failures without backtrace)"""
37 |
38 | #################
39 | #HELPER FUNCTIONS
40 | #################
41 |
42 | def _file_is_python(filename):
43 |
44 | """
45 | Checks whether a given file
46 | is a python script or
47 | python source code.
48 | """
49 |
50 | #Initialize return logical:
51 | is_python = False
52 |
53 | #Extract status of provided file:
54 | file_stat = os.stat(filename)
55 |
56 | #Check if it is a "regular" file:
57 | if S_ISREG(file_stat.st_mode):
58 |
59 | #Next, check if file ends in ".py":
60 | file_ext = os.path.splitext(filename)[1]
61 |
62 | if file_ext.strip() == ".py":
63 | #Assume file is python:
64 | is_python = True
65 | elif file_ext.strip() == ".gif":
66 | #This is an image file, so skip it:
67 | is_python = False
68 | else:
69 | #If no ".py" extension exists, then
70 | #open the file and look for a shabang
71 | #that contains the word "python".
72 | with open(filename, "r", encoding='UTF-8') as mod_file:
73 | #Loop over lines in file:
74 | for line in mod_file:
75 |
76 | #Ignore blank lines:
77 | if line.strip():
78 |
79 | #Check that first non-blank
80 | #line is a shabang:
81 | if line[0:2] == '#!':
82 | #If so, then check that the word
83 | #"python" is also present:
84 | if line.find("python") != -1:
85 | #If the word exists, then assume
86 | #it is a python file:
87 | is_python = True
88 |
89 | #Exit loop, as only the first non-blank
90 | #line should be examined:
91 | break
92 |
93 | #Return file type result:
94 | return is_python
95 |
96 | #++++++++++++++++++++++++++++++
97 | #Input Argument parser function
98 | #++++++++++++++++++++++++++++++
99 |
100 | def parse_arguments():
101 |
102 | """
103 | Parses command-line input arguments using the argparse
104 | python module and outputs the final argument object.
105 | """
106 |
107 | #Create parser object:
108 | parser = argparse.ArgumentParser(description='Generate list of all files modified by pull request.')
109 |
110 | #Add input arguments to be parsed:
111 | parser.add_argument('--access_token', metavar='', action='store', type=str,
112 | help="access token used to access GitHub API")
113 |
114 | parser.add_argument('--pr_num', metavar='', action='store', type=int,
115 | help="pull request number")
116 |
117 | parser.add_argument('--rcfile', metavar='', action='store', type=str,
118 | help="location of pylintrc file (full path)")
119 |
120 | parser.add_argument('--pylint_level', metavar='', action='store', type=float,
121 | required=False, help="pylint score that file(s) must exceed")
122 |
123 | #Parse Argument inputs
124 | args = parser.parse_args()
125 | return args
126 |
127 | #############
128 | #MAIN PROGRAM
129 | #############
130 |
131 | def _main_prog():
132 |
133 | # pylint: disable=too-many-locals
134 | # pylint: disable=too-many-branches
135 | # pylint: disable=too-many-statements
136 |
137 | #++++++++++++
138 | #Begin script
139 | #++++++++++++
140 |
141 | print("Generating list of modified files...")
142 |
143 | #This should eventually be passed in via a command-line
144 | #argument, and include everything inside the "lib" directory -JN:
145 | testable_files = {"lib/adf_base.py",
146 | "lib/adf_config.py",
147 | "lib/adf_info.py",
148 | "lib/adf_obs.py",
149 | "lib/adf_web.py",
150 | "lib/adf_diag.py"}
151 |
152 | #+++++++++++++++++++++++
153 | #Read in input arguments
154 | #+++++++++++++++++++++++
155 |
156 | args = parse_arguments()
157 |
158 | #Add argument values to variables:
159 | token = args.access_token
160 | pr_num = args.pr_num
161 | rcfile = args.rcfile
162 | pylev = args.pylint_level
163 |
164 | #++++++++++++++++++++++++++++++++
165 | #Log-in to github API using token
166 | #++++++++++++++++++++++++++++++++
167 |
168 | ghub = Github(token)
169 |
170 | #++++++++++++++++++++
171 | #Open ESCOMP/CAM repo
172 | #++++++++++++++++++++
173 |
174 | #Official CAM repo:
175 | cam_repo = ghub.get_repo("NCAR/ADF")
176 |
177 | #++++++++++++++++++++++++++++++++++++++++++
178 | #Open Pull Request which triggered workflow
179 | #++++++++++++++++++++++++++++++++++++++++++
180 |
181 | pull_req = cam_repo.get_pull(pr_num)
182 |
183 | #++++++++++++++++++++++++++++++
184 | #Extract list of modified files
185 | #++++++++++++++++++++++++++++++
186 |
187 | #Create empty list to store python files:
188 | pyfiles = list()
189 |
190 | #Extract Github file objects:
191 | file_obj_list = pull_req.get_files()
192 |
193 | for file_obj in file_obj_list:
194 |
195 | #Check if file exists. If not,
196 | #then it was likely deleted in the
197 | #PR itself, so don't check its file type:
198 | if os.path.exists(file_obj.filename):
199 |
200 | #Check if it is a python file:
201 | if _file_is_python(file_obj.filename):
202 | #If so, then add to python list:
203 | pyfiles.append(file_obj.filename)
204 |
205 | #++++++++++++++++++++++++++++++++++++++++++++
206 | #Check if any python files are being modified:
207 | #++++++++++++++++++++++++++++++++++++++++++++
208 | if pyfiles:
209 |
210 | #Create list of files to be linted, and notify
211 | # users of python files that will be tested:
212 | lint_files = []
213 | for pyfile in pyfiles:
214 | if pyfile in testable_files:
215 | lint_files.append(pyfile)
216 | else:
217 | continue
218 |
219 | #++++++++++++++++++++++++++++++++++++++++++++++
220 | #Check if any python files are in testable list
221 | #++++++++++++++++++++++++++++++++++++++++++++++
222 | if lint_files:
223 |
224 | #Notify users of python files that will be tested:
225 | print("The following modified python files will be tested:")
226 | for lint_file in lint_files:
227 | print(lint_file)
228 |
229 | #+++++++++++++++++++++++++
230 | #Run pylint threshold test
231 | #+++++++++++++++++++++++++
232 |
233 | lint_msgs = pylint_check(lint_files, rcfile,
234 | threshold=pylev)
235 |
236 | #++++++++++++++++++
237 | #Check test results
238 | #++++++++++++++++++
239 |
240 | #If pylint check lists are non-empty, then
241 | #a test has failed, and an exception should
242 | #be raised with the relevant pytlint info:
243 | if lint_msgs:
244 | #Print pylint results for failed tests to screen:
245 | print("+++++++++++PYLINT FAILURE MESSAGES+++++++++++++")
246 | for lmsg in lint_msgs:
247 | print(lmsg)
248 | print("+++++++++++++++++++++++++++++++++++++++++++++++")
249 |
250 | #Raise test failure exception:
251 | fail_msg = "One or more files are below allowed pylint " \
252 | "score of {}.\nPlease see pylint message(s) " \
253 | "above for possible fixes.".format(pylev)
254 | raise PrModTestFail(fail_msg)
255 | else:
256 | #All tests have passed, so exit normally:
257 | print("All pylint tests passed!")
258 | sys.exit(0)
259 |
260 | #If no python files in set of testable_files, then exit script:
261 | else:
262 | print("No ADF classes were modified in PR, so there is nothing to test.")
263 | sys.exit(0)
264 |
265 | #End if (lint_files)
266 |
267 | #If no python files exist in PR, then exit script:
268 | else:
269 | print("No python files present in PR, so there is nothing to test.")
270 | sys.exit(0)
271 |
272 | #End if (pyfiles)
273 |
274 | #############################################
275 |
276 | #Run the main script program:
277 | if __name__ == "__main__":
278 | _main_prog()
279 |
--------------------------------------------------------------------------------
/scripts/plotting/polar_map.py:
--------------------------------------------------------------------------------
1 | """Module to make polar stereographic maps."""
2 | from pathlib import Path
3 | import numpy as np
4 |
5 | # ADF library
6 | import plotting_functions as pf
7 | import adf_utils as utils
8 |
9 | def get_hemisphere(hemi_type):
10 | """Helper function to convert plot type to hemisphere code.
11 |
12 | Parameters
13 | ----------
14 | hemi_type : str
15 | if `NHPolar` set NH, otherwise SH
16 |
17 | Returns
18 | -------
19 | str
20 | NH or SH
21 | """
22 | return "NH" if hemi_type == "NHPolar" else "SH"
23 |
24 | def process_seasonal_data(mdata, odata, season):
25 | """Helper function to calculate seasonal means and differences.
26 | Parameters
27 | ----------
28 | mdata : xarray.DataArray
29 | test case data
30 | odata : xarray.DataArray
31 | reference case data
32 | season : str
33 | season (JJA, DJF, MAM, SON)
34 |
35 | Returns
36 | -------
37 | mseason : xarray.DataArray
38 | oseason : xarray.DataArray
39 | dseason : xarray.DataArray
40 | pseason : xarray.DataArray
41 | Seasonal means for test, reference, difference, and percent difference
42 | """
43 | mseason = utils.seasonal_mean(mdata, season=season, is_climo=True)
44 | oseason = utils.seasonal_mean(odata, season=season, is_climo=True)
45 |
46 | # Calculate differences
47 | dseason = mseason - oseason
48 | dseason.attrs['units'] = mseason.attrs['units']
49 |
50 | # Calculate percent change
51 | pseason = (mseason - oseason) / np.abs(oseason) * 100.0
52 | pseason.attrs['units'] = '%'
53 | pseason = pseason.where(np.isfinite(pseason), np.nan)
54 | pseason = pseason.fillna(0.0)
55 |
56 | return mseason, oseason, dseason, pseason
57 |
58 | def polar_map(adfobj):
59 | """Generate polar maps of model fields with continental overlays."""
60 | #Notify user that script has started:
61 | msg = "\n Generating polar maps..."
62 | print(f"{msg}\n {'-' * (len(msg)-3)}")
63 |
64 | var_list = adfobj.diag_var_list
65 |
66 | #Special ADF variable which contains the output paths for
67 | #all generated plots and tables for each case:
68 | plot_locations = adfobj.plot_location
69 |
70 | #CAM simulation variables (this is always assumed to be a list):
71 | case_names = adfobj.get_cam_info("cam_case_name", required=True)
72 |
73 | #Grab case years
74 | syear_cases = adfobj.climo_yrs["syears"]
75 | eyear_cases = adfobj.climo_yrs["eyears"]
76 |
77 | # if doing comparison to obs, but no observations are found, quit
78 | if adfobj.get_basic_info("compare_obs"):
79 | var_obs_dict = adfobj.var_obs_dict
80 | if not var_obs_dict:
81 | print("\t No observations found to plot against, so no polar maps will be generated.")
82 | return
83 |
84 |
85 | #Grab baseline years (which may be empty strings if using Obs):
86 | syear_baseline = adfobj.climo_yrs["syear_baseline"]
87 | eyear_baseline = adfobj.climo_yrs["eyear_baseline"]
88 |
89 | #Grab all case nickname(s)
90 | test_nicknames = adfobj.case_nicknames["test_nicknames"]
91 | base_nickname = adfobj.case_nicknames["base_nickname"]
92 |
93 | res = adfobj.variable_defaults # will be dict of variable-specific plot preferences
94 | # or an empty dictionary if use_defaults was not specified in YAML.
95 |
96 | #Set plot file type:
97 | # -- this should be set in basic_info_dict, but is not required
98 | # -- So check for it, and default to png
99 | basic_info_dict = adfobj.read_config_var("diag_basic_info")
100 | plot_type = basic_info_dict.get('plot_type', 'png')
101 | print(f"\t NOTE: Plot type is set to {plot_type}")
102 |
103 | # check if existing plots need to be redone
104 | redo_plot = adfobj.get_basic_info('redo_plot')
105 | print(f"\t NOTE: redo_plot is set to {redo_plot}")
106 | #-----------------------------------------
107 |
108 |
109 | #Determine if user wants to plot 3-D variables on
110 | #pressure levels:
111 | pres_levs = adfobj.get_basic_info("plot_press_levels")
112 |
113 | #Set seasonal ranges:
114 | seasons = {"ANN": np.arange(1,13,1),
115 | "DJF": [12, 1, 2],
116 | "JJA": [6, 7, 8],
117 | "MAM": [3, 4, 5],
118 | "SON": [9, 10, 11]
119 | }
120 |
121 | # probably want to do this one variable at a time:
122 | for var in var_list:
123 | print(f"\t - polar maps for {var}")
124 |
125 | if var not in adfobj.data.ref_var_nam:
126 | dmsg = f"\t WARNING: No reference data found for variable `{var}`, polar lat/lon mean plotting skipped."
127 | adfobj.debug_log(dmsg)
128 | print(dmsg)
129 | continue
130 |
131 | if not adfobj.compare_obs:
132 | base_name = adfobj.data.ref_labels[var]
133 | else:
134 | base_name = adfobj.data.ref_case_label
135 |
136 |
137 | # Get variable-specific settings
138 | vres = res.get(var, {})
139 | web_category = vres.get("category", None)
140 |
141 | # Get all plot info and check existence
142 | plot_info = []
143 | all_plots_exist = True
144 |
145 | for case_idx, case_name in enumerate(case_names):
146 | plot_loc = Path(plot_locations[case_idx])
147 |
148 | tmp_ds = adfobj.data.load_regrid_dataset(case_name, var)
149 | if tmp_ds is None:
150 | continue
151 |
152 | has_lev = "lev" in tmp_ds.dims
153 |
154 | for s in seasons:
155 | for hemi_type in ["NHPolar", "SHPolar"]:
156 | if pres_levs and has_lev: # 3-D variable & pressure levels specified
157 | for pres in pres_levs:
158 | plot_name = plot_loc / f"{var}_{pres}hpa_{s}_{hemi_type}_Mean.{plot_type}"
159 | info = {
160 | 'path': plot_name,
161 | 'var': f"{var}_{pres}hpa",
162 | 'case': case_name,
163 | 'case_idx': case_idx,
164 | 'season': s,
165 | 'type': hemi_type,
166 | 'pressure': pres,
167 | 'exists': plot_name.is_file()
168 | }
169 | plot_info.append(info)
170 | if (redo_plot is False) and info['exists']:
171 | adfobj.add_website_data(info['path'], info['var'],
172 | info['case'], category=web_category,
173 | season=s, plot_type=hemi_type)
174 | else:
175 | all_plots_exist = False
176 | elif (not has_lev): # 2-D variable
177 | plot_name = plot_loc / f"{var}_{s}_{hemi_type}_Mean.{plot_type}"
178 | info = {
179 | 'path': plot_name,
180 | 'var': var,
181 | 'case': case_name,
182 | 'case_idx': case_idx,
183 | 'season': s,
184 | 'type': hemi_type,
185 | 'exists': plot_name.is_file()
186 | }
187 | plot_info.append(info)
188 | if (redo_plot is False) and info['exists']:
189 | adfobj.add_website_data(info['path'], info['var'],
190 | info['case'], category=web_category,
191 | season=s, plot_type=hemi_type)
192 | else:
193 | all_plots_exist = False
194 |
195 | if all_plots_exist:
196 | print(f"\t Skipping {var} - all plots already exist")
197 | continue
198 |
199 | odata = adfobj.data.load_reference_regrid_da(base_name, var)
200 | if odata is None:
201 | print(f"\t WARNING: No reference data found for {var}")
202 | continue
203 |
204 | # Process each case
205 | for plot in plot_info:
206 | if plot['exists'] and not redo_plot:
207 | continue
208 |
209 | case_name = plot['case']
210 | case_idx = plot['case_idx']
211 | plot_loc = Path(plot_locations[case_idx])
212 |
213 | # Ensure plot directory exists
214 | plot_loc.mkdir(parents=True, exist_ok=True)
215 |
216 | # Load and validate model data (units transformation included in load_regrid_da)
217 | mdata = adfobj.data.load_regrid_da(case_name, var)
218 | if mdata is None:
219 | continue
220 |
221 | # Process data based on dimensionality
222 | if "lev" in mdata.dims:
223 | has_lev = True
224 | else:
225 | has_lev = False
226 |
227 | if has_lev and pres_levs and plot.get('pressure'):
228 | if not all(dim in mdata.dims for dim in ['lat', 'lev']):
229 | continue
230 | mdata = mdata.sel(lev=plot['pressure'])
231 | odata_level = odata.sel(lev=plot['pressure'])
232 | else:
233 | if not utils.lat_lon_validate_dims(mdata):
234 | continue
235 |
236 | # Calculate seasonal means and differences
237 | use_odata = odata_level if has_lev else odata
238 | mseason, oseason, dseason, pseason = process_seasonal_data(
239 | mdata,
240 | use_odata,
241 | plot['season']
242 | )
243 |
244 | # Create plot
245 | if plot['path'].exists():
246 | plot['path'].unlink()
247 |
248 | pf.make_polar_plot(
249 | plot['path'], test_nicknames[case_idx], base_nickname,
250 | [syear_cases[case_idx], eyear_cases[case_idx]],
251 | [syear_baseline, eyear_baseline],
252 | mseason, oseason, dseason, pseason,
253 | hemisphere=get_hemisphere(plot['type']),
254 | obs=adfobj.compare_obs, **vres
255 | )
256 |
257 | # Add to website
258 | adfobj.add_website_data(
259 | plot['path'], plot['var'], case_name,
260 | category=web_category, season=plot['season'],
261 | plot_type=plot['type']
262 | )
263 |
264 | print(" ...polar maps have been generated successfully.")
265 |
266 | ##############
267 | #END OF `polar_map` function
268 |
269 | ##############
270 | # END OF FILE
--------------------------------------------------------------------------------
/scripts/plotting/meridional_mean.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | import numpy as np
3 | import xarray as xr
4 | import plotting_functions as pf
5 |
6 | import adf_utils as utils
7 | import warnings # use to warn user about missing files.
8 | warnings.formatwarning = utils.my_formatwarning
9 |
10 | def meridional_mean(adfobj):
11 |
12 | """
13 | This script plots meridional averages.
14 | Follows the old AMWG convention of plotting 5S to 5N.
15 | **Note:** the constraint of 5S to 5N is easily changed;
16 | the function that calculates the average can take any range of latitudes.
17 | Compare CAM climatologies against
18 | other climatological data (observations or baseline runs).
19 | """
20 |
21 | #Notify user that script has started:
22 | msg = "\n Generating meridional mean plots..."
23 | print(f"{msg}\n {'-' * (len(msg)-3)}")
24 |
25 | #Extract needed quantities from ADF object:
26 | #-----------------------------------------
27 | var_list = adfobj.diag_var_list
28 | model_rgrid_loc = adfobj.get_basic_info("cam_regrid_loc", required=True)
29 |
30 | #Special ADF variable which contains the output paths for
31 | #all generated plots and tables:
32 | plot_locations = adfobj.plot_location
33 |
34 | #CAM simulation variables (this is always assumed to be a list):
35 | case_names = adfobj.get_cam_info("cam_case_name", required=True)
36 |
37 | #Grab case years
38 | syear_cases = adfobj.climo_yrs["syears"]
39 | eyear_cases = adfobj.climo_yrs["eyears"]
40 |
41 | # CAUTION:
42 | # "data" here refers to either obs or a baseline simulation,
43 | # Until those are both treated the same (via intake-esm or similar)
44 | # we will do a simple check and switch options as needed:
45 | if adfobj.get_basic_info("compare_obs"):
46 | #Set obs call for observation details for plot titles
47 | obs = True
48 |
49 | #Extract variable-obs dictionary:
50 | var_obs_dict = adfobj.var_obs_dict
51 |
52 | #If dictionary is empty, then there are no observations to regrid to,
53 | #so quit here:
54 | if not var_obs_dict:
55 | print("\t No observations found to plot against, so no meridional-mean maps will be generated.")
56 | return
57 | else:
58 | obs = False
59 | data_name = adfobj.get_baseline_info("cam_case_name", required=True) # does not get used, is just here as a placemarker
60 | data_list = [data_name] # gets used as just the name to search for climo files HAS TO BE LIST
61 | data_loc = model_rgrid_loc #Just use the re-gridded model data path
62 | #End if
63 |
64 | #Grab baseline years (which may be empty strings if using Obs):
65 | syear_baseline = adfobj.climo_yrs["syear_baseline"]
66 | eyear_baseline = adfobj.climo_yrs["eyear_baseline"]
67 |
68 | #Grab all case nickname(s)
69 | test_nicknames = adfobj.case_nicknames["test_nicknames"]
70 | base_nickname = adfobj.case_nicknames["base_nickname"]
71 |
72 | res = adfobj.variable_defaults # will be dict of variable-specific plot preferences
73 | # or an empty dictionary if use_defaults was not specified in the config YAML file.
74 |
75 | #Set plot file type:
76 | # -- this should be set in basic_info_dict, but is not required
77 | # -- So check for it, and default to png
78 | basic_info_dict = adfobj.read_config_var("diag_basic_info")
79 | plot_type = basic_info_dict.get('plot_type', 'png')
80 | print(f"\t NOTE: Plot type is set to {plot_type}")
81 |
82 | # check if existing plots need to be redone
83 | redo_plot = adfobj.get_basic_info('redo_plot')
84 | print(f"\t NOTE: redo_plot is set to {redo_plot}")
85 | #-----------------------------------------
86 |
87 | #Set data path variables:
88 | #-----------------------
89 | mclimo_rg_loc = Path(model_rgrid_loc)
90 | if not adfobj.compare_obs:
91 | dclimo_loc = Path(data_loc)
92 | #-----------------------
93 |
94 | #Set seasonal ranges:
95 | seasons = {"ANN": np.arange(1,13,1),
96 | "DJF": [12, 1, 2],
97 | "JJA": [6, 7, 8],
98 | "MAM": [3, 4, 5],
99 | "SON": [9, 10, 11]}
100 |
101 | #Loop over variables:
102 | for var in var_list:
103 | #Notify user of variable being plotted:
104 | print(f"\t - meridional mean plots for {var}")
105 |
106 | if adfobj.compare_obs:
107 | #Check if obs exist for the variable:
108 | if var in var_obs_dict:
109 | #Note: In the future these may all be lists, but for
110 | #now just convert the target_list.
111 | #Extract target file:
112 | dclimo_loc = var_obs_dict[var]["obs_file"]
113 | #Extract target list (eventually will be a list, for now need to convert):
114 | data_list = [var_obs_dict[var]["obs_name"]]
115 | #Extract target variable name:
116 | data_var = var_obs_dict[var]["obs_var"]
117 | else:
118 | dmsg = f"No obs found for variable `{var}`, meridional mean plotting skipped."
119 | adfobj.debug_log(dmsg)
120 | continue
121 | #End if
122 | else:
123 | #Set "data_var" for consistent use below:
124 | data_var = var
125 | #End if
126 |
127 | # Check res for any variable specific options that need to be used BEFORE going to the plot:
128 | if var in res:
129 | vres = res[var]
130 | #If found then notify user, assuming debug log is enabled:
131 | adfobj.debug_log(f"meridional_mean: Found variable defaults for {var}")
132 |
133 | else:
134 | vres = {}
135 | #End if
136 |
137 | #loop over different data sets to plot model against:
138 | for data_src in data_list:
139 | # load data (observational) comparison files
140 | # (we should explore intake as an alternative to having this kind of repeated code):
141 | if adfobj.compare_obs:
142 | #For now, only grab one file (but convert to list for use below)
143 | oclim_fils = [dclimo_loc]
144 | else:
145 | oclim_fils = sorted(dclimo_loc.glob(f"{data_src}_{var}_baseline.nc"))
146 | #End if
147 | oclim_ds = utils.load_dataset(oclim_fils)
148 |
149 | #Loop over model cases:
150 | for case_idx, case_name in enumerate(case_names):
151 |
152 | #Set case nickname:
153 | case_nickname = test_nicknames[case_idx]
154 |
155 | #Set output plot location:
156 | plot_loc = Path(plot_locations[case_idx])
157 |
158 | #Check if plot output directory exists, and if not, then create it:
159 | if not plot_loc.is_dir():
160 | print(f" {plot_loc} not found, making new directory")
161 | plot_loc.mkdir(parents=True)
162 |
163 | # load re-gridded model files:
164 | mclim_fils = sorted(mclimo_rg_loc.glob(f"{data_src}_{case_name}_{var}_*.nc"))
165 | mclim_ds = utils.load_dataset(mclim_fils)
166 |
167 | # stop if data is invalid:
168 | if (oclim_ds is None) or (mclim_ds is None):
169 | warnings.warn(f"invalid data, skipping meridional mean plot of {var}")
170 | continue
171 |
172 | #Extract variable of interest
173 | odata = oclim_ds[data_var].squeeze() # squeeze in case of degenerate dimensions
174 | mdata = mclim_ds[var].squeeze()
175 |
176 | # APPLY UNITS TRANSFORMATION IF SPECIFIED:
177 | # NOTE: looks like our climo files don't have all their metadata
178 | mdata = mdata * vres.get("scale_factor",1) + vres.get("add_offset", 0)
179 | # update units
180 | mdata.attrs['units'] = vres.get("new_unit", mdata.attrs.get('units', 'none'))
181 |
182 | # Do the same for the baseline case if need be:
183 | if not adfobj.compare_obs:
184 | odata = odata * vres.get("scale_factor",1) + vres.get("add_offset", 0)
185 | # update units
186 | odata.attrs['units'] = vres.get("new_unit", odata.attrs.get('units', 'none'))
187 | # Or for observations
188 | else:
189 | odata = odata * vres.get("obs_scale_factor",1) + vres.get("obs_add_offset", 0)
190 | # Note: we are going to assume that the specification ensures the conversion makes the units the same.
191 | # Doesn't make sense to add a different unit.
192 |
193 | # determine whether it's 2D or 3D
194 | # 3D triggers search for surface pressure
195 | validate_lat_lev = utils.validate_dims(mdata, ['lat', 'lev']) # keys=> 'has_lat', 'has_lev', with T/F values
196 |
197 | #Notify user of level dimension:
198 | if validate_lat_lev['has_lev']:
199 | print(f"\t INFO: {var} has lev dimension.")
200 | has_lev = True
201 | else:
202 | has_lev = False
203 |
204 | #
205 | # Seasonal Averages
206 | # Note: xarray can do seasonal averaging, but depends on having time accessor,
207 | # which these prototype climo files don't.
208 | #
209 |
210 | #Create new dictionaries:
211 | mseasons = {}
212 | oseasons = {}
213 |
214 | #Loop over season dictionary:
215 | for s in seasons:
216 | plot_name = plot_loc / f"{var}_{s}_Meridional_Mean.{plot_type}"
217 |
218 | # Check redo_plot. If set to True: remove old plot, if it already exists:
219 | if (not redo_plot) and plot_name.is_file():
220 | #Add already-existing plot to website (if enabled):
221 | adfobj.debug_log(f"'{plot_name}' exists and clobber is false.")
222 | adfobj.add_website_data(plot_name, var, case_name, season=s,
223 | plot_type="Meridional")
224 | #Continue to next iteration:
225 | continue
226 | elif (redo_plot) and plot_name.is_file():
227 | plot_name.unlink()
228 |
229 | mseasons[s] = utils.seasonal_mean(mdata, season=s, is_climo=True)
230 | oseasons[s] = utils.seasonal_mean(odata, season=s, is_climo=True)
231 |
232 |
233 | #Create new plot:
234 | pf.plot_meridional_mean_and_save(plot_name, case_nickname, base_nickname,
235 | [syear_cases[case_idx],eyear_cases[case_idx]],
236 | [syear_baseline,eyear_baseline],
237 | mseasons[s], oseasons[s], has_lev, latbounds=slice(-5,5), obs=obs, **vres)
238 |
239 | #Add plot to website (if enabled):
240 | adfobj.add_website_data(plot_name, var, case_name, season=s,
241 | plot_type="Meridional")
242 |
243 | #End for (seasons loop)
244 | #End for (case names loop)
245 | #End for (obs/baseline loop)
246 | #End for (variables loop)
247 |
248 | #Notify user that script has ended:
249 | print(" ...Meridional mean plots have been generated successfully.")
250 |
251 |
252 | #########
253 | # Helpers
254 | #########
255 |
256 |
257 | ##############
258 | #END OF SCRIPT
--------------------------------------------------------------------------------
/lib/externals/CVDP/driver.ncl:
--------------------------------------------------------------------------------
1 | ;
2 | ; CVDP driver script. To run the CVDP at the command line type: ncl driver.ncl
3 | ; To run the CVDP at the command line, put it in background mode, and write the terminal output
4 | ; to a file named file.out, type: ncl driver.ncl >&! file.out &
5 | ;
6 | ;============================================================================================
7 | outdir = "/project/CVDP/" ; location of output files (must end in a "/")
8 | ; It is recommended that a new or empty directory be pointed to here
9 | ; as existing files in outdir can get removed.
10 |
11 | namelists_only = "False" ; Set to True to only create the variable namelists. Useful
12 | ; upon running the package for the first time to verify that the correct
13 | ; files are being selected by the package. (See files in namelist_byvar/ directory)
14 | ; Set to False to run the entire package.
15 |
16 | obs = "True" ; True = analyze and plot observations (specified in namelist_obs), False = do not
17 | scale_timeseries = "False" ; True = scale timeseries so that x-axis length is comparable across timeseries, False = do not
18 | output_data = "True" ; True = output selected calculated data to a netCDF file. Make sure .nc files from previous CVDP
19 | ; runs are not in outdir or they will get added to or modified.
20 | compute_modes_mon = "True" ; True = compute DJF, MAM, JJA, SON, Annual and Monthly Atmospheric Modes of Variability
21 | ; False = do not compute the Monthly Atmospheric Modes of Variability (saves computation time)
22 | ;- - - - - - - - - - - - - - - - - -
23 | opt_climo = "Full" ; Full = remove climatology based on full record of each simulation,
24 | ; Custom = set climatological period using climo_syear (climatological start year) and climo_eyear (climatological end year)
25 |
26 | if (opt_climo.eq."Custom") then ; When climo_syear and climo_eyear are positive, remove the climatology/annual cycle based on these years.
27 | climo_syear = -30 ; Both settings should be within the range of years of all specified model runs and observational datasets.
28 | climo_eyear = 0 ; When climo_syear is negative, remove the climatology/annual cycle relative to the end of each model run
29 | end if ; or observational dataset. Example: climo_syear = -25, climo_eyear = 0 will result in the climatology
30 | ; being removed from the last 26 years of each model run and observations.
31 | ;- - - - - - - - - - - - - - - - - -
32 | colormap = 0 ; 0 = default colormaps, 1 = colormaps better for color blindness
33 |
34 | output_type = "png" ; png = create png files, ps = create postscript files as well as png files (for web viewing).
35 |
36 | png_scale = 1.5 ; Set the output .png size. Value between .1->5. Any value > 1 (< 1) increases (decreases) png size.
37 | ; When output_type = "png" a value of 1 will result in a png sized 1500 (H) x 1500 (W) before automatic cropping of white space
38 | ; When output_type = "ps" a value of 1 will result in a png density setting of 144 before automatic cropping of white space
39 | webpage_title = "Title goes here" ; Set webpage title
40 |
41 | tar_output = "False" ; True = tar up all output in outdir and remove individual files, False = do not
42 | ; Note: ALL files in outdir will be tarred up and then removed from the outdir directory.
43 |
44 | ;---Advanced Options----------------------------------------------------------------------
45 | zp = "ncl_scripts/" ; directory path of CVDP NCL scripts. (must end in a "/")
46 | ; Examples: "ncl_scripts/" if all code is local, or on CGD or CISL systems: "~asphilli/CESM-diagnostics/CVDP/Release/v4.1.0/ncl_scripts/"
47 | ; Regardless of this setting the following files should be in one directory: namelist, driver.ncl, and namelist_obs.
48 | ; If pointing to code in ~asphilli make sure the driver script version #s match between this script and the script in ~asphilli.
49 |
50 | ncl_exec = "ncl" ; This can be changed to a different path if a different version of NCL needs to be used, such as "/different/path/to/bin/ncl"
51 |
52 | run_style = "parallel" ; parallel = allow simple python-based parallelization to occur. X number of CVDP NCL scripts will be called at once.
53 | ; X is set via max_num_tasks. Terminal output will be harder to follow.
54 | ; serial = call CVDP NCL scripts serially.
55 |
56 | max_num_tasks = 4 ; Set the number of CVDP NCL scripts that can be called at once. If greater than 1 the scripts will be called in parallel. (If unsure set to 3)
57 |
58 | modular = "False" ; True = Run only those CVDP scripts specified in modular_list.
59 | ; False = Run all CVDP scripts (Default)
60 |
61 | modular_list = "pdo,aice.trends_timeseries,sst.indices" ; When modular = "True" list the CVDP scripts that will be run.
62 | ; Example: modular_list = "amoc,amo,pr.trends_timeseries"
63 | ; For a list of available scripts see complete_list at line 72.
64 |
65 | machine_casesen = "True" ; True = Your filesystem is case sensitive (Default)
66 | ; False = Your filesystem is case insensitive
67 | ;========END USER MODIFICATIONS===========================================================
68 | version = "5.2.0"
69 |
70 | print("Starting: Climate Variability Diagnostics Package ("+systemfunc("date")+")")
71 |
72 | complete_list = "psl.nam_nao,psl.pna_npo,tas.trends_timeseries,snd.trends,psl.trends,amo,pdo,sst.indices,pr.trends_timeseries,"+\
73 | "psl.sam_psa,sst.mean_stddev,psl.mean_stddev,pr.mean_stddev,sst.trends_timeseries,amoc,tas.mean_stddev,"+\
74 | "snd.mean_stddev,aice.mean_stddev,aice.trends_timeseries,ipo"
75 |
76 | loadscript(zp+"functions.ncl")
77 | outfiles = (/"ts","trefht","psl","prect","snowdp","moc","maxnum","aice_nh","aice_sh"/)
78 | rm_obsfiles(outfiles)
79 |
80 | if (isfilepresent2(outdir+"metrics_orig.txt")) then ; remove metrics_orig.txt file if present
81 | system("rm "+outdir+"metrics_orig.txt")
82 | end if
83 |
84 | if (opt_climo.eq."Custom") then
85 | if (climo_syear.ge.climo_eyear) then
86 | print("Specified custom climatology start year (climo_syear) cannot be greater than or equal to the specified end year (climo_eyear), exiting CVDP.")
87 | exit
88 | end if
89 | else
90 | climo_syear = -999
91 | climo_eyear = -999
92 | end if
93 |
94 | if (.not.isfilepresent2(outdir)) then
95 | system("mkdir -p "+outdir)
96 | end if
97 | envvar_str = " export OUTDIR="+outdir+"; export OBS="+obs+"; export SCALE_TIMESERIES="+scale_timeseries+"; "+\
98 | "export OUTPUT_DATA="+output_data+"; export VERSION="+version+"; export PNG_SCALE="+png_scale+"; "+\
99 | "export OPT_CLIMO="+opt_climo+"; export CLIMO_SYEAR="+climo_syear+"; export CLIMO_EYEAR="+climo_eyear+"; "+\
100 | "export COMPUTE_MODES_MON="+compute_modes_mon+"; export OUTPUT_TYPE="+output_type+"; export MACHINE="+machine_casesen+"; "+\
101 | "export COLORMAP="+colormap+"; export CVDP_SCRIPTS="+zp+"; export MAX_TASKS="+max_num_tasks+";"
102 | ncl_exec = ncl_exec+" -n -Q"
103 |
104 | system(envvar_str + " "+str_sub_str(ncl_exec," -Q","")+" "+zp+"namelist.ncl") ; create variable namelists
105 | if (namelists_only.eq."True") then
106 | print("Variable namelists have been created. Examine files in namelist_byvar/ directory to verify CVDP file selection.")
107 | print("Finished: Climate Variability Diagnostics Package ("+systemfunc("date")+")")
108 | rm_obsfiles(outfiles)
109 | exit
110 | end if
111 | ;------------------------------
112 | ; Call CVDP calculation scripts
113 | ;
114 | if (modular.eq."True") then
115 | cm_list = str_sub_str(modular_list," ","") ; remove spaces if present
116 | cm_list = str_sub_str(cm_list,",",".ncl,") ; add .ncl in to end of each script name
117 | else
118 | cm_list = str_sub_str(complete_list,",",".ncl,") ; add .ncl in to end of each script name
119 | end if
120 | cm_list = cm_list+".ncl" ; add .ncl in to last script name
121 |
122 | if (run_style.eq."parallel") then
123 | cm_list = str_sub_str(cm_list,","," "+zp)
124 | system(envvar_str+" python "+zp+"runTasks.py "+zp+cm_list)
125 | else
126 | cm_list = str_sub_str(cm_list,","," "+ncl_exec+" "+zp)
127 | cm_list = str_sub_str(cm_list,".ncl",".ncl;")
128 | system(envvar_str+" "+ncl_exec+" "+zp+cm_list)
129 | end if
130 | ;-------------------------------
131 | ; Finalize netCDF files, create metrics tables, and finalize images.
132 |
133 | if (output_data.eq."True") then ; finalize output nc files
134 | system(envvar_str + " "+ncl_exec+" "+zp+"ncfiles.append.ncl")
135 | end if
136 |
137 | met_files = systemfunc("ls "+outdir+"metrics.*.txt 2> /dev/null")
138 | if (dimsizes(met_files).eq.9) then ; if all 9 metrics text files are present, create metrics table(s)
139 | system(" export OUTDIR="+outdir+"; "+ncl_exec+" "+zp+"metrics.ncl")
140 | end if
141 |
142 | image_finalize(outdir,output_type,max_num_tasks,zp,toint(144*png_scale)) ; trim whitespace, convert to .png (if necessary) and apply watermarks to images
143 |
144 | system("cp "+zp+"cas-cvdp.png "+outdir)
145 | system("cp namelist_byvar/* "+outdir)
146 | system("cp namelist "+outdir)
147 | if (obs.eq."True") then
148 | system("cp namelist_obs "+outdir)
149 | end if
150 | ;-------------------------------
151 | ; Create webpages
152 | quote = str_get_dq()
153 | system(" export OUTDIR="+outdir+"; export VERSION="+version+"; export OUTPUT_DATA="+output_data+"; "+\
154 | "export OPT_CLIMO="+opt_climo+"; export CLIMO_SYEAR="+climo_syear+"; export CLIMO_EYEAR="+climo_eyear+"; "+\
155 | "export OBS="+obs+"; export CVDP_SCRIPTS="+zp+"; "+ncl_exec+" 'webtitle="+quote+webpage_title+quote+"' "+zp+"webpage.ncl")
156 | delete(quote)
157 | ;-------------------------------
158 | ; Create tar file
159 | if (tar_output.eq."True") then
160 | if (isfilepresent2(outdir+"cvdp.tar")) then
161 | system("rm "+outdir+"cvdp.tar")
162 | end if
163 | system("cd "+outdir+"; tar -cf cvdp.tar *")
164 | system("cd "+outdir+"; rm *.gif *.png *.ps *.txt *.html *.nc namelist*")
165 | end if
166 | ;-------------------------------
167 | ; Cleanup
168 | rm_obsfiles(outfiles)
169 | delete([/outfiles,outdir,obs,scale_timeseries,output_data,opt_climo,climo_syear,climo_eyear,\
170 | png_scale,webpage_title,compute_modes_mon,met_files/])
171 |
172 | print("Finished: Climate Variability Diagnostics Package ("+systemfunc("date")+")")
173 |
174 |
--------------------------------------------------------------------------------
/lib/externals/CVDP/ncl_scripts/psl.trends.ncl:
--------------------------------------------------------------------------------
1 | ; Calculates PSL global trends
2 | ;
3 | ; Variables used: psl
4 | ;
5 | load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/gsn_code.ncl"
6 | load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/gsn_csm.ncl"
7 | load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/contributed.ncl"
8 | load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/shea_util.ncl"
9 | load "$CVDP_SCRIPTS/functions.ncl"
10 |
11 | begin
12 | print("Starting: psl.trends.ncl")
13 |
14 | SCALE_TIMESERIES = getenv("SCALE_TIMESERIES")
15 | OUTPUT_DATA = getenv("OUTPUT_DATA")
16 | PNG_SCALE = tofloat(getenv("PNG_SCALE"))
17 | OPT_CLIMO = getenv("OPT_CLIMO")
18 | CLIMO_SYEAR = toint(getenv("CLIMO_SYEAR"))
19 | CLIMO_EYEAR = toint(getenv("CLIMO_EYEAR"))
20 | OUTPUT_TYPE = getenv("OUTPUT_TYPE")
21 | COLORMAP = getenv("COLORMAP")
22 |
23 | nsim = numAsciiRow("namelist_byvar/namelist_psl")
24 | na = asciiread("namelist_byvar/namelist_psl",(/nsim/),"string")
25 | names = new(nsim,"string")
26 | paths = new(nsim,"string")
27 | syear = new(nsim,"integer",-999)
28 | eyear = new(nsim,"integer",-999)
29 | delim = "|"
30 |
31 | do gg = 0,nsim-1
32 | names(gg) = str_strip(str_get_field(na(gg),1,delim))
33 | paths(gg) = str_strip(str_get_field(na(gg),2,delim))
34 | syear(gg) = stringtointeger(str_strip(str_get_field(na(gg),3,delim)))
35 | eyear(gg) = stringtointeger(str_strip(str_get_field(na(gg),4,delim)))
36 | end do
37 | nyr = eyear-syear+1
38 | nyr_max = max(nyr)
39 |
40 | pi=4.*atan(1.0)
41 | rad=(pi/180.)
42 |
43 | wks_type = OUTPUT_TYPE
44 | if (wks_type.eq."png") then
45 | wks_type@wkWidth = 1500*PNG_SCALE
46 | wks_type@wkHeight = 1500*PNG_SCALE
47 | end if
48 | wks_trends_djf = gsn_open_wks(wks_type,getenv("OUTDIR")+"psl.trends.djf")
49 | wks_trends_mam = gsn_open_wks(wks_type,getenv("OUTDIR")+"psl.trends.mam")
50 | wks_trends_jja = gsn_open_wks(wks_type,getenv("OUTDIR")+"psl.trends.jja")
51 | wks_trends_son = gsn_open_wks(wks_type,getenv("OUTDIR")+"psl.trends.son")
52 | wks_trends_ann = gsn_open_wks(wks_type,getenv("OUTDIR")+"psl.trends.ann")
53 | wks_trends_mon = gsn_open_wks(wks_type,getenv("OUTDIR")+"psl.trends.mon")
54 |
55 | if (COLORMAP.eq.0) then
56 | gsn_define_colormap(wks_trends_djf,"ncl_default")
57 | gsn_define_colormap(wks_trends_mam,"ncl_default")
58 | gsn_define_colormap(wks_trends_jja,"ncl_default")
59 | gsn_define_colormap(wks_trends_son,"ncl_default")
60 | gsn_define_colormap(wks_trends_ann,"ncl_default")
61 | gsn_define_colormap(wks_trends_mon,"ncl_default")
62 | end if
63 | if (COLORMAP.eq.1) then
64 | gsn_define_colormap(wks_trends_djf,"BlueDarkRed18")
65 | gsn_define_colormap(wks_trends_mam,"BlueDarkRed18")
66 | gsn_define_colormap(wks_trends_jja,"BlueDarkRed18")
67 | gsn_define_colormap(wks_trends_son,"BlueDarkRed18")
68 | gsn_define_colormap(wks_trends_ann,"BlueDarkRed18")
69 | gsn_define_colormap(wks_trends_mon,"BlueDarkRed18")
70 | end if
71 |
72 | map_djf = new(nsim,"graphic")
73 | map_mam = new(nsim,"graphic")
74 | map_jja = new(nsim,"graphic")
75 | map_son = new(nsim,"graphic")
76 | map_ann = new(nsim,"graphic")
77 | map_mon = new(nsim,"graphic")
78 |
79 | do ee = 0,nsim-1
80 | psl = data_read_in(paths(ee),"PSL",syear(ee),eyear(ee)) ; read in data, orient lats/lons correctly, set time coordinate variable up
81 | if (isatt(psl,"is_all_missing")) then
82 | delete(psl)
83 | continue
84 | end if
85 | if (OPT_CLIMO.eq."Full") then
86 | psl = rmMonAnnCycTLL(psl)
87 | else
88 | check_custom_climo(names(ee),syear(ee),eyear(ee),CLIMO_SYEAR,CLIMO_EYEAR)
89 | temp_arr = psl
90 | delete(temp_arr&time)
91 | temp_arr&time = cd_calendar(psl&time,-1)
92 | if (CLIMO_SYEAR.lt.0) then
93 | climo = clmMonTLL(temp_arr({(eyear(ee)+CLIMO_SYEAR)*100+1:(eyear(ee)+CLIMO_EYEAR)*100+12},:,:))
94 | else
95 | climo = clmMonTLL(temp_arr({CLIMO_SYEAR*100+1:CLIMO_EYEAR*100+12},:,:))
96 | end if
97 | delete(temp_arr)
98 | psl = calcMonAnomTLL(psl,climo)
99 | delete(climo)
100 | end if
101 |
102 | tttt = dtrend_msg_n(ispan(0,dimsizes(psl&time)-1,1),psl,False,True,0)
103 | psl_trends_mon = psl(0,:,:)
104 | psl_trends_mon = (/ onedtond(tttt@slope, (/dimsizes(psl&lat),dimsizes(psl&lon)/) ) /)
105 | psl_trends_mon = psl_trends_mon*dimsizes(psl&time)
106 | psl_trends_mon@units = psl@units+" "+nyr(ee)+"yr~S~-1~N~"
107 | delete(tttt)
108 |
109 | psl_seas = runave_n_Wrap(psl,3,0,0)
110 | psl_seas(0,:,:) = (/ dim_avg_n(psl(:1,:,:),0) /)
111 | psl_seas(dimsizes(psl&time)-1,:,:) = (/ dim_avg_n(psl(dimsizes(psl&time)-2:,:,:),0) /)
112 | psl_ann = runave_n_Wrap(psl,12,0,0)
113 | delete(psl)
114 |
115 | psl_trends_seas = psl_seas(:3,:,:)
116 | psl_trends_seas = psl_trends_seas@_FillValue
117 | psl_trends_ann = psl_trends_seas(0,:,:)
118 | do ff = 0,4
119 | if (ff.le.3) then
120 | tarr = psl_seas(ff*3::12,:,:)
121 | end if
122 | if (ff.eq.4) then
123 | tarr = psl_ann(5::12,:,:)
124 | end if
125 | tttt = dtrend_msg_n(ispan(0,dimsizes(tarr&time)-1,1),tarr,False,True,0)
126 | if (ff.le.3) then
127 | psl_trends_seas(ff,:,:) = (/ onedtond(tttt@slope, (/dimsizes(tarr&lat),dimsizes(tarr&lon)/) ) /)
128 | end if
129 | if (ff.eq.4) then
130 | psl_trends_ann = (/ onedtond(tttt@slope, (/dimsizes(tarr&lat),dimsizes(tarr&lon)/) ) /)
131 | end if
132 | delete([/tarr,tttt/])
133 | end do
134 | psl_trends_seas = psl_trends_seas*nyr(ee)
135 | psl_trends_seas@units = psl_seas@units+" "+nyr(ee)+"yr~S~-1~N~"
136 | psl_trends_ann = psl_trends_ann*nyr(ee)
137 | psl_trends_ann@units = psl_ann@units+" "+nyr(ee)+"yr~S~-1~N~"
138 | delete([/psl_seas,psl_ann/])
139 |
140 |
141 | if (OUTPUT_DATA.eq."True") then
142 | modname = str_sub_str(names(ee)," ","_")
143 | bc = (/"/","'","(",")"/)
144 | do gg = 0,dimsizes(bc)-1
145 | modname = str_sub_str(modname,bc(gg),"_")
146 | end do
147 | fn = getenv("OUTDIR")+modname+".cvdp_data.psl.trends."+syear(ee)+"-"+eyear(ee)+".nc"
148 | if (.not.isfilepresent2(fn)) then
149 | z = addfile(fn,"c")
150 | z@source = "NCAR Climate Analysis Section's Climate Variability Diagnostics Package v"+getenv("VERSION")
151 | z@notes = "Data from "+names(ee)+" from "+syear(ee)+"-"+eyear(ee)
152 | if (OPT_CLIMO.eq."Full") then
153 | z@climatology = syear(ee)+"-"+eyear(ee)+" climatology removed prior to all calculations (other than means)"
154 | else
155 | if (CLIMO_SYEAR.lt.0) then
156 | z@climatology = (eyear(ee)+CLIMO_SYEAR)+"-"+(eyear(ee)+CLIMO_EYEAR)+" climatology removed prior to all calculations (other than means)"
157 | else
158 | z@climatology = CLIMO_SYEAR+"-"+CLIMO_EYEAR+" climatology removed prior to all calculations (other than means)"
159 | end if
160 | end if
161 | z@Conventions = "CF-1.6"
162 | else
163 | z = addfile(fn,"w")
164 | end if
165 | z->psl_trends_djf = set_varAtts(psl_trends_seas(0,:,:),"psl linear trends (DJF)","","")
166 | z->psl_trends_mam = set_varAtts(psl_trends_seas(1,:,:),"psl linear trends (MAM)","","")
167 | z->psl_trends_jja = set_varAtts(psl_trends_seas(2,:,:),"psl linear trends (JJA)","","")
168 | z->psl_trends_son = set_varAtts(psl_trends_seas(3,:,:),"psl linear trends (SON)","","")
169 | z->psl_trends_ann = set_varAtts(psl_trends_ann,"psl linear trends (annual)","","")
170 | z->psl_trends_mon = set_varAtts(psl_trends_mon,"psl linear trends (monthly)","","")
171 | delete(z)
172 | delete([/modname,fn/])
173 | end if
174 |
175 | ;========================================================================
176 | res = True
177 | res@mpProjection = "WinkelTripel"
178 | res@mpGeophysicalLineColor = "gray42"
179 | res@mpGeophysicalLineThicknessF = 2.
180 | res@mpPerimOn = False
181 | res@mpGridLatSpacingF = 90 ; change latitude line spacing
182 | res@mpGridLonSpacingF = 180. ; change longitude line spacing
183 | res@mpGridLineColor = "transparent" ; trick ncl into drawing perimeter
184 | res@mpGridAndLimbOn = True ; turn on lat/lon lines
185 | res@mpFillOn = False
186 | res@mpCenterLonF = 210.
187 | res@mpOutlineOn = True
188 | res@gsnDraw = False
189 | res@gsnFrame = False
190 |
191 | res@cnLevelSelectionMode = "ExplicitLevels"
192 | res@cnLevels = ispan(-8,8,1)
193 | res@cnLineLabelsOn = False
194 | res@cnFillOn = True
195 | res@cnLinesOn = False
196 | res@lbLabelBarOn = False
197 |
198 | res@gsnLeftStringOrthogonalPosF = -0.05
199 | res@gsnLeftStringParallelPosF = .005
200 | res@gsnRightStringOrthogonalPosF = -0.05
201 | res@gsnRightStringParallelPosF = 0.96
202 | res@gsnRightString = ""
203 | res@gsnLeftString = ""
204 | res@gsnLeftStringFontHeightF = 0.014
205 | res@gsnCenterStringFontHeightF = 0.018
206 | res@gsnRightStringFontHeightF = 0.014
207 | res@gsnLeftString = syear(ee)+"-"+eyear(ee)
208 |
209 | res@gsnRightString = psl_trends_seas@units
210 | res@gsnCenterString = names(ee)
211 | map_djf(ee) = gsn_csm_contour_map(wks_trends_djf,psl_trends_seas(0,:,:),res)
212 | map_mam(ee) = gsn_csm_contour_map(wks_trends_mam,psl_trends_seas(1,:,:),res)
213 | map_jja(ee) = gsn_csm_contour_map(wks_trends_jja,psl_trends_seas(2,:,:),res)
214 | map_son(ee) = gsn_csm_contour_map(wks_trends_son,psl_trends_seas(3,:,:),res)
215 | map_ann(ee) = gsn_csm_contour_map(wks_trends_ann,psl_trends_ann,res)
216 | map_mon(ee) = gsn_csm_contour_map(wks_trends_mon,psl_trends_mon,res)
217 |
218 | delete([/psl_trends_seas,psl_trends_ann,psl_trends_mon,res/])
219 | end do
220 | panres = True
221 | panres@gsnMaximize = True
222 | panres@gsnPaperOrientation = "portrait"
223 | panres@gsnPanelLabelBar = True
224 | panres@gsnPanelYWhiteSpacePercent = 3.0
225 | panres@pmLabelBarHeightF = 0.05
226 | panres@pmLabelBarWidthF = 0.65
227 | panres@lbTitleOn = False
228 | panres@lbBoxLineColor = "gray70"
229 | panres@lbLabelFontHeightF = 0.013
230 | if (nsim.le.4) then
231 | if (nsim.eq.1) then
232 | panres@txFontHeightF = 0.022
233 | panres@gsnPanelBottom = 0.50
234 | else
235 | panres@txFontHeightF = 0.0145
236 | panres@gsnPanelBottom = 0.50
237 | end if
238 | else
239 | panres@txFontHeightF = 0.016
240 | panres@gsnPanelBottom = 0.05
241 | end if
242 | panres@lbLabelStride = 1
243 |
244 | panres@txString = "PSL Trends (DJF)"
245 | ncol = floattointeger(sqrt(nsim))
246 | nrow = (nsim/ncol)+mod(nsim,ncol)
247 | gsn_panel2(wks_trends_djf,map_djf,(/nrow,ncol/),panres)
248 | delete(wks_trends_djf)
249 |
250 | panres@txString = "PSL Trends (MAM)"
251 | gsn_panel2(wks_trends_mam,map_mam,(/nrow,ncol/),panres)
252 | delete(wks_trends_mam)
253 |
254 | panres@txString = "PSL Trends (JJA)"
255 | gsn_panel2(wks_trends_jja,map_jja,(/nrow,ncol/),panres)
256 | delete(wks_trends_jja)
257 |
258 | panres@txString = "PSL Trends (SON)"
259 | gsn_panel2(wks_trends_son,map_son,(/nrow,ncol/),panres)
260 | delete(wks_trends_son)
261 |
262 | panres@txString = "PSL Trends (Annual)"
263 | gsn_panel2(wks_trends_ann,map_ann,(/nrow,ncol/),panres)
264 | delete(wks_trends_ann)
265 |
266 | panres@txString = "PSL Trends (Monthly)"
267 | gsn_panel2(wks_trends_mon,map_mon,(/nrow,ncol/),panres)
268 | delete(wks_trends_mon)
269 | delete([/nrow,ncol,map_djf,map_mam,map_jja,map_son,map_ann,map_mon,panres/])
270 | print("Finished: psl.trends.ncl")
271 | end
272 |
--------------------------------------------------------------------------------
/scripts/plotting/global_mean_timeseries.py:
--------------------------------------------------------------------------------
1 | """Use time series files to produce global mean time series plots for ADF web site.
2 |
3 | Includes a minimal Class for bringing CESM2 LENS data
4 | from I. Simpson's directory (to be generalized).
5 |
6 | """
7 |
8 | from pathlib import Path
9 | from types import NoneType
10 |
11 | import xarray as xr
12 | import numpy as np
13 | import matplotlib.pyplot as plt
14 | import matplotlib.ticker as ticker
15 |
16 |
17 | import adf_utils as utils
18 | import warnings # use to warn user about missing files.
19 | warnings.formatwarning = utils.my_formatwarning
20 |
21 |
22 | def global_mean_timeseries(adfobj):
23 | """
24 | load time series file, calculate global mean, annual mean
25 | for each case
26 | Make a combined plot, save it, add it to website.
27 | Include the CESM2 LENS result if it can be found.
28 | """
29 |
30 | #Notify user that script has started:
31 | msg = "\n Generating global mean time series plots..."
32 | print(f"{msg}\n {'-' * (len(msg)-3)}")
33 |
34 | # Gather ADF configurations
35 | plot_loc = get_plot_loc(adfobj)
36 | plot_type = adfobj.read_config_var("diag_basic_info").get("plot_type", "png")
37 | res = adfobj.variable_defaults # will be dict of variable-specific plot preferences
38 | # or an empty dictionary if use_defaults was not specified in YAML.
39 |
40 | # Loop over variables
41 | for field in adfobj.diag_var_list:
42 | #Notify user of variable being plotted:
43 | print(f"\t - time series plot for {field}")
44 |
45 | # Check res for any variable specific options that need to be used BEFORE going to the plot:
46 | if field in res:
47 | vres = res[field]
48 | #If found then notify user, assuming debug log is enabled:
49 | adfobj.debug_log(f"global_mean_timeseries: Found variable defaults for {field}")
50 | else:
51 | vres = {}
52 | #End if
53 |
54 | # reference time series (DataArray)
55 | ref_ts_da = adfobj.data.load_reference_timeseries_da(field)
56 |
57 | base_name = adfobj.data.ref_case_label
58 |
59 | # Check to see if this field is available
60 | if ref_ts_da is None:
61 | if not adfobj.compare_obs:
62 | print(
63 | f"\t WARNING: Variable {field} for case '{base_name}' provides Nonetype. Skipping this variable"
64 | )
65 | continue
66 | else:
67 | # check data dimensions:
68 | has_lat_ref, has_lev_ref = utils.zm_validate_dims(ref_ts_da)
69 |
70 | # check if this is a "2-d" varaible:
71 | if has_lev_ref:
72 | print(
73 | f"\t WARNING: Variable {field} has a lev dimension for '{base_name}', which does not work with this script."
74 | )
75 | continue
76 | # End if
77 |
78 | # check if there is a lat dimension:
79 | if not has_lat_ref:
80 | print(
81 | f"\t WARNING: Variable {field} is missing a lat dimension for '{base_name}', cannot continue to plot."
82 | )
83 | continue
84 | # End if
85 |
86 | # reference time series global average
87 | ref_ts_da_ga = utils.spatial_average(ref_ts_da, weights=None, spatial_dims=None)
88 |
89 | # annually averaged
90 | ref_ts_da = utils.annual_mean(ref_ts_da_ga, whole_years=True, time_name="time")
91 | # End if
92 |
93 | # Loop over model cases:
94 | case_ts = {} # dictionary of annual mean, global mean time series
95 |
96 | # use case nicknames instead of full case names if supplied:
97 | labels = {
98 | case_name: nickname if nickname else case_name
99 | for nickname, case_name in zip(
100 | adfobj.data.test_nicknames, adfobj.data.case_names
101 | )
102 | }
103 | ref_label = (
104 | adfobj.data.ref_nickname
105 | if adfobj.data.ref_nickname
106 | else base_name
107 | )
108 |
109 | skip_var = False
110 | for case_name in adfobj.data.case_names:
111 |
112 | c_ts_da = adfobj.data.load_timeseries_da(case_name, field)
113 |
114 | if c_ts_da is None:
115 | print(
116 | f"\t WARNING: Variable {field} for case '{case_name}' provides Nonetype. Skipping this variable"
117 | )
118 | skip_var = True
119 | continue
120 | # End if
121 |
122 | # If no reference, we still need to check if this is a "2-d" varaible:
123 | # check data dimensions:
124 | has_lat_case, has_lev_case = utils.zm_validate_dims(c_ts_da)
125 |
126 | # If 3-d variable, notify user, flag and move to next test case
127 | if has_lev_case:
128 | print(
129 | f"\t WARNING: Variable {field} has a lev dimension for '{case_name}', which does not work with this script."
130 | )
131 |
132 | skip_var = True
133 | continue
134 | # End if
135 |
136 | # check if there is a lat dimension:
137 | if not has_lat_case:
138 | print(
139 | f"\t WARNING: Variable {field} is missing a lat dimension for '{case_name}', cannot continue to plot."
140 | )
141 | skip_var = True
142 | continue
143 | # End if
144 |
145 | # Gather spatial avg for test case
146 | c_ts_da_ga = utils.spatial_average(c_ts_da)
147 | case_ts[labels[case_name]] = utils.annual_mean(c_ts_da_ga)
148 |
149 | # If this case is 3-d or missing variable, then break the loop and go to next variable
150 | if skip_var:
151 | continue
152 |
153 | lens2_data = Lens2Data(
154 | field
155 | ) # Provides access to LENS2 dataset when available (class defined below)
156 |
157 | ## SPECIAL SECTION -- CESM2 LENS DATA:
158 | # Plot the timeseries
159 | fig, ax = make_plot(
160 | case_ts, lens2_data, label=adfobj.data.ref_nickname, ref_ts_da=ref_ts_da
161 | )
162 |
163 | unit = vres.get("new_unit","[-]")
164 | ax.set_ylabel(getattr(ref_ts_da,"unit", unit)) # add units
165 | plot_name = plot_loc / f"{field}_GlobalMean_ANN_TimeSeries_Mean.{plot_type}"
166 |
167 | conditional_save(adfobj, plot_name, fig)
168 |
169 | adfobj.add_website_data(
170 | plot_name,
171 | f"{field}_GlobalMean",
172 | None,
173 | season="ANN",
174 | multi_case=True,
175 | plot_type="TimeSeries",
176 | )
177 |
178 | #Notify user that script has ended:
179 | print(" ... global mean time series plots have been generated successfully.")
180 |
181 |
182 | # Helper/plotting functions
183 | ###########################
184 |
185 | def conditional_save(adfobj, plot_name, fig, verbose=None):
186 | """Determines whether to save figure"""
187 | # double check this
188 | if adfobj.get_basic_info("redo_plot") and plot_name.is_file():
189 | # Case 1: Delete old plot, save new plot
190 | plot_name.unlink()
191 | fig.savefig(plot_name)
192 | elif (adfobj.get_basic_info("redo_plot") and not plot_name.is_file()) or (
193 | not adfobj.get_basic_info("redo_plot") and not plot_name.is_file()
194 | ):
195 | # Save new plot
196 | fig.savefig(plot_name)
197 | elif not adfobj.get_basic_info("redo_plot") and plot_name.is_file():
198 | # Case 2: Keep old plot, do not save new plot
199 | if verbose:
200 | print("\t - plot file detected, redo is false, so keep existing file.")
201 | else:
202 | warnings.warn(
203 | f"Conditional save found unknown condition. File will not be written: {plot_name}"
204 | )
205 | plt.close(fig)
206 | ######
207 |
208 |
209 | def get_plot_loc(adfobj, verbose=None):
210 | """Return the path for plot files.
211 | Contains side-effect: will make the directory and parents if needed.
212 | """
213 | plot_location = adfobj.plot_location
214 | if not plot_location:
215 | plot_location = adfobj.get_basic_info("cam_diag_plot_loc")
216 | if isinstance(plot_location, list):
217 | for pl in plot_location:
218 | plpth = Path(pl)
219 | # Check if plot output directory exists, and if not, then create it:
220 | if not plpth.is_dir():
221 | if verbose:
222 | print(f"\t {pl} not found, making new directory")
223 | plpth.mkdir(parents=True)
224 | if len(plot_location) == 1:
225 | plot_loc = Path(plot_location[0])
226 | else:
227 | if verbose:
228 | print(
229 | f"\t Ambiguous plotting location since all cases go on same plot. Will put them in first location: {plot_location[0]}"
230 | )
231 | plot_loc = Path(plot_location[0])
232 | else:
233 | plot_loc = Path(plot_location)
234 | print(f"\t Determined plot location: {plot_loc}")
235 | return plot_loc
236 | ######
237 |
238 |
239 | class Lens2Data:
240 | """Access Isla's LENS2 data to get annual means."""
241 |
242 | def __init__(self, field):
243 | self.field = field
244 | self.has_lens, self.lens2 = self._include_lens()
245 |
246 | def _include_lens(self):
247 | lens2_path = Path(
248 | f"/glade/campaign/cgd/cas/islas/CESM_DATA/LENS2/global_means/annualmeans/"
249 | )
250 |
251 | lens2_fil = sorted(lens2_path.glob(f"{self.field}_*LENS2*first50*nc"))
252 | if lens2_fil:
253 | lens2_fil = lens2_fil[0]
254 | lens2 = xr.open_mfdataset(lens2_fil)
255 | has_lens = True
256 | else:
257 | warnings.warn(f"\t INFO: Did not find LENS2 file for {self.field}.")
258 | has_lens = False
259 | lens2 = None
260 | return has_lens, lens2
261 | ######
262 |
263 |
264 | def make_plot(case_ts, lens2, label=None, ref_ts_da=None):
265 | """plot yearly values of ref_ts_da"""
266 | fig, ax = plt.subplots()
267 |
268 | # Plot reference/baseline if available
269 | if type(ref_ts_da) != NoneType:
270 | ax.plot(ref_ts_da.year, ref_ts_da, label=label)
271 | else:
272 | return fig, ax
273 | for idx, (c, cdata) in enumerate(case_ts.items()):
274 | ax.plot(cdata.year, cdata, label=c)
275 | # Force the plot axis to always plot the test case years
276 | if idx == 0:
277 | syr = min(cdata.year)
278 | eyr = max(cdata.year)
279 |
280 | field = lens2.field # this will be defined even if no LENS2 data
281 | if lens2.has_lens:
282 | lensmin = lens2.lens2[field].min("M") # note: "M" is the member dimension
283 | lensmax = lens2.lens2[field].max("M")
284 | ax.fill_between(lensmin.year, lensmin, lensmax, color="lightgray", alpha=0.5)
285 | ax.plot(
286 | lens2.lens2[field].year,
287 | lens2.lens2[field].mean("M"),
288 | color="darkgray",
289 | linewidth=2,
290 | label="LENS2",
291 | )
292 | # Get the current y-axis limits
293 | ymin, ymax = ax.get_ylim()
294 | # Check if the y-axis crosses zero
295 | if ymin < 0 < ymax:
296 | ax.axhline(y=0, color="lightgray", linestyle="-", linewidth=1)
297 | ax.set_title(field, loc="left")
298 |
299 | # Set the x-axis limits to the first test case climo years
300 | ax.set_xlim(syr, eyr)
301 | # Force x-axis to use only integer labels
302 | ax.xaxis.set_major_locator(ticker.MaxNLocator(integer=True))
303 |
304 | ax.set_xlabel("YEAR")
305 | # Place the legend
306 | ax.legend(
307 | bbox_to_anchor=(0.5, -0.15), loc="upper center", ncol=min(len(case_ts), 3)
308 | )
309 | plt.tight_layout(pad=2, w_pad=1.0, h_pad=1.0)
310 |
311 | return fig, ax
312 | ######
313 |
314 |
315 | ##############
316 | #END OF SCRIPT
--------------------------------------------------------------------------------
/scripts/plotting/qbo.py:
--------------------------------------------------------------------------------
1 | import xarray as xr
2 | import numpy as np
3 | from pathlib import Path
4 | import matplotlib.pyplot as plt
5 | import matplotlib.colors as mcolors
6 | import matplotlib as mpl
7 |
8 | import adf_utils as utils
9 | import warnings # use to warn user about missing files
10 | warnings.formatwarning = utils.my_formatwarning
11 |
12 | def qbo(adfobj):
13 | """
14 | This subroutine plots...
15 |
16 | (1) the times series of the 5S to 5N zonal mean U (QBOts.png)
17 | - this uses the same record length for each dataset and compares
18 | with ERA5.
19 |
20 | (2) the Dunkerton and Delisi QBO amplitude (QBOamp.png)
21 | - this uses the full record length for each dataset and compares
22 | with ERA5.
23 |
24 | Isla Simpson (islas@ucar.edu) 22nd March 2022
25 |
26 | """
27 | #Notify user that script has started:
28 | msg = "\n Generating qbo plots..."
29 | print(f"{msg}\n {'-' * (len(msg)-3)}")
30 |
31 | #Extract relevant info from the ADF:
32 | case_names = adfobj.get_cam_info('cam_case_name', required=True)
33 | case_loc = adfobj.get_cam_info('cam_ts_loc', required=True)
34 | base_name = adfobj.get_baseline_info('cam_case_name')
35 | base_loc = adfobj.get_baseline_info('cam_ts_loc')
36 | obsdir = adfobj.get_basic_info('obs_data_loc', required=True)
37 | plot_locations = adfobj.plot_location
38 | plot_type = adfobj.get_basic_info('plot_type')
39 |
40 | #Grab all case nickname(s)
41 | test_nicknames = adfobj.case_nicknames["test_nicknames"]
42 | base_nickname = adfobj.case_nicknames["base_nickname"]
43 | case_nicknames = test_nicknames + [base_nickname]
44 |
45 | # check if existing plots need to be redone
46 | redo_plot = adfobj.get_basic_info('redo_plot')
47 | print(f"\t NOTE: redo_plot is set to {redo_plot}")
48 |
49 | if not plot_type:
50 | plot_type = 'png'
51 | #End if
52 |
53 | #Check if zonal wind ("U") variable is present. If not then skip
54 | #this script:
55 | if not ('U' in adfobj.diag_var_list):
56 | msg = "No zonal wind ('U') variable present"
57 | msg += " in 'diag_var_list', so QBO plots will"
58 | msg += " be skipped."
59 | print(msg)
60 | return
61 | #End if
62 |
63 | #Set path for QBO figures:
64 | plot_loc_ts = Path(plot_locations[0]) / f'QBO_TimeSeries_Special_Mean.{plot_type}'
65 | plot_loc_amp = Path(plot_locations[0]) / f'QBO_Amplitude_Special_Mean.{plot_type}'
66 |
67 | #Until a multi-case plot directory exists, let user know
68 | #that the QBO plot will be kept in the first case directory:
69 | print(f"\t QBO plots will be saved here: {plot_locations[0]}")
70 |
71 | # Check redo_plot. If set to True: remove old plots, if they already exist:
72 | if (not redo_plot) and plot_loc_ts.is_file() and plot_loc_amp.is_file():
73 | #Add already-existing plot to website (if enabled):
74 | adfobj.debug_log(f"'{plot_loc_ts}' and '{plot_loc_amp}' exist and clobber is false.")
75 | adfobj.add_website_data(plot_loc_ts, "QBO", None, season="TimeSeries", multi_case=True, non_season=True)
76 | adfobj.add_website_data(plot_loc_amp, "QBO", None, season="Amplitude", multi_case=True, non_season=True)
77 |
78 | #Continue to next iteration:
79 | return
80 | elif (redo_plot):
81 | if plot_loc_ts.is_file():
82 | plot_loc_ts.unlink()
83 | if plot_loc_amp.is_file():
84 | plot_loc_amp.unlink()
85 | #End if
86 |
87 | #Check if model vs model run, and if so, append baseline to case lists:
88 | if not adfobj.compare_obs:
89 | case_loc.append(base_loc)
90 | case_names.append(base_name)
91 | #End if
92 |
93 | #----Read in the OBS (ERA5, 5S-5N average already
94 | obs = xr.open_dataset(obsdir+"/U_ERA5_5S_5N_1979_2019.nc").U_5S_5N
95 |
96 | #----Read in the case data and baseline
97 | ncases = len(case_loc)
98 | casedat = [utils.load_dataset(sorted(Path(case_loc[i]).glob(f"{case_names[i]}.*.U.*.nc"))) for i in range(0,ncases,1)]
99 |
100 | #Find indices for all case datasets that don't contain a zonal wind field (U):
101 | bad_idxs = []
102 | for idx, dat in enumerate(casedat):
103 | if 'U' not in dat.variables:
104 | warnings.warn(f"\t WARNING: Case {case_names[idx]} contains no 'U' field, skipping...")
105 | bad_idxs.append(idx)
106 | #End if
107 | #End for
108 |
109 | #Pare list down to cases that actually contain a zonal wind field (U):
110 | if bad_idxs:
111 | for bad_idx in bad_idxs:
112 | casedat.pop(bad_idx)
113 | #End for
114 | #End if
115 |
116 | #----Calculate the zonal mean
117 | casedatzm = []
118 | for i in range(0,ncases,1):
119 | has_dims = utils.validate_dims(casedat[i].U, ['lon'])
120 | if not has_dims['has_lon']:
121 | print(f"\t WARNING: Variable U is missing a lat dimension for '{case_loc[i]}', cannot continue to plot.")
122 | else:
123 | casedatzm.append(casedat[i].U.mean("lon"))
124 | if len(casedatzm) == 0:
125 | print(f"\t WARNING: No available cases found, exiting script.")
126 | exitmsg = "\tNo QBO plots will be made."
127 | print(exitmsg)
128 | return
129 | if len(casedatzm) != ncases:
130 | print(f"\t WARNING: Number of available cases does not match number of cases. Will exit script for now.")
131 | exitmsg = "\tNo QBO plots will be made."
132 | print(exitmsg)
133 | return
134 |
135 | #----Calculate the 5S-5N average
136 | casedat_5S_5N = [ cosweightlat(casedatzm[i],-5,5) for i in range(0,ncases,1) ]
137 |
138 | #----Find the minimum number of years across dataset for plotting the timeseries.
139 | nyobs = np.floor(obs.time.size/12.)
140 | nycase = [ np.floor(casedat_5S_5N[i].time.size/12.) for i in range(0,ncases,1) ]
141 | nycase.append(nyobs)
142 | minny = int(np.min(nycase))
143 |
144 | #----QBO timeseries plots
145 | fig = plt.figure(figsize=(16,16))
146 | fig.suptitle('QBO Time Series', fontsize=14)
147 |
148 | x1, x2, y1, y2 = plotpos()
149 | ax = plotqbotimeseries(fig, obs, minny, x1[0], x2[0], y1[0], y2[0],'ERA5')
150 |
151 | casecount=0
152 | for icase in range(0,ncases,1):
153 | if (icase < 11 ): # only only going to work with 12 panels currently
154 | ax = plotqbotimeseries(fig, casedat_5S_5N[icase],minny,
155 | x1[icase+1],x2[icase+1],y1[icase+1],y2[icase+1], case_names[icase])
156 | casecount=casecount+1
157 | else:
158 | warnings.warn("The QBO diagnostics can only manage up to twelve cases!")
159 | break
160 | #End if
161 | #End for
162 |
163 | ax = plotcolorbar(fig, x1[0]+0.2, x2[2]-0.2,y1[casecount]-0.035,y1[casecount]-0.03)
164 |
165 | #Save figure to file:
166 | fig.savefig(plot_loc_ts, bbox_inches='tight', facecolor='white')
167 |
168 | #Add plot to website (if enabled):
169 | adfobj.add_website_data(plot_loc_ts, "QBO", None, season="TimeSeries", multi_case=True, non_season=True)
170 |
171 | #-----------------
172 |
173 | #---Dunkerton and Delisi QBO amplitude
174 | obsamp = calcddamp(obs)
175 | modamp = [ calcddamp(casedat_5S_5N[i]) for i in range(0,ncases,1) ]
176 |
177 | fig = plt.figure(figsize=(16,16))
178 |
179 | ax = fig.add_axes([0.05,0.6,0.4,0.4])
180 | ax.set_ylim(-np.log10(150),-np.log10(1))
181 | ax.set_yticks([-np.log10(100),-np.log10(30),-np.log10(10),-np.log10(3),-np.log10(1)])
182 | ax.set_yticklabels(['100','30','10','3','1'], fontsize=12)
183 | ax.set_ylabel('Pressure (hPa)', fontsize=12)
184 | ax.set_xlabel('Dunkerton and Delisi QBO amplitude (ms$^{-1}$)', fontsize=12)
185 | ax.set_title('Dunkerton and Delisi QBO amplitude', fontsize=14)
186 |
187 | ax.plot(obsamp, -np.log10(obsamp.pre), color='black', linewidth=2, label='ERA5')
188 |
189 | for icase in range(0,ncases,1):
190 | ax.plot(modamp[icase], -np.log10(modamp[icase].lev), linewidth=2, label=case_nicknames[icase])
191 |
192 | ax.legend(loc='upper left')
193 | fig.savefig(plot_loc_amp, bbox_inches='tight', facecolor='white')
194 |
195 | #Add plot to website (if enabled):
196 | adfobj.add_website_data(plot_loc_amp, "QBO", None, season="Amplitude", multi_case=True, non_season=True)
197 |
198 | #-------------------
199 |
200 | #Notify user that script has ended:
201 | print(" ...QBO plots have been generated successfully.")
202 |
203 | #End QBO plotting script:
204 | return
205 |
206 | #-----------------For Calculating-----------------------------
207 |
208 | def cosweightlat(darray, lat1, lat2):
209 | """Calculate the weighted average for an [:,lat] array over the region
210 | lat1 to lat2
211 | """
212 |
213 | # flip latitudes if they are decreasing
214 | if (darray.lat[0] > darray.lat[darray.lat.size -1]):
215 | print("QBO: flipping latitudes")
216 | darray = darray.sortby('lat')
217 |
218 | region = darray.sel(lat=slice(lat1, lat2))
219 | weights=np.cos(np.deg2rad(region.lat))
220 | regionw = region.weighted(weights)
221 | regionm = regionw.mean("lat")
222 |
223 | return regionm
224 |
225 | def calcddamp(data):
226 | """Calculate the Dunkerton and Delisi QBO amplitude"""
227 | datseas = data.groupby('time.month').mean('time')
228 | datdeseas = data.groupby('time.month')-datseas
229 | ddamp = np.sqrt(2)*datdeseas.std(dim='time')
230 | return ddamp
231 |
232 |
233 | #---------------------------------For Plotting------------------------------------------
234 | def plotpos():
235 | """ Positionings to position the plots nicely (3x4)"""
236 | x1 = [0.05,0.37,0.69,0.05,0.37,0.69,0.05,0.37,0.69,0.05,0.37,0.69]
237 | x2 = [0.32,0.64,0.95,0.32,0.64,0.95,0.32,0.64,0.95,0.32,0.64,0.95]
238 | y1 = [0.8,0.8,0.8,0.59,0.59,0.59,0.38,0.38,0.38,0.17,0.17,0.17]
239 | y2 = [0.95,0.95,0.95,0.74,0.74,0.74,0.53,0.53,0.53,0.32,0.32,0.32]
240 | return x1, x2, y1, y2
241 |
242 | def plotqbotimeseries(fig, dat, ny, x1, x2, y1, y2, title):
243 | """ Function for plotting each QBO time series panel
244 |
245 | Input:
246 |
247 | fig = the figure axis
248 | dat = the data to plot of the form (time, lev)
249 | ny = the number of years to plot
250 | x1, x2, y1, y2 = plot positioning arguments
251 |
252 | """
253 |
254 | ax = fig.add_axes([x1, y1, (x2-x1), (y2-y1)])
255 | datplot = dat.isel(time=slice(0,ny*12)).transpose()
256 | ci = 1 ; cmax=45
257 | nlevs = (cmax - (-1*cmax))/ci + 1
258 | clevs = np.arange(-1*cmax, cmax+ci, ci)
259 | mymap = blue2red_cmap(nlevs)
260 |
261 | plt.rcParams['font.size'] = '12'
262 |
263 | if "lev" in datplot.dims:
264 | ax.contourf(datplot.time.dt.year + (datplot.time.dt.month/12.), -1.*np.log10(datplot.lev), datplot,
265 | levels = clevs, cmap=mymap, extent='both')
266 | elif "pre" in datplot.dims:
267 | ax.contourf(datplot.time.dt.year + (datplot.time.dt.month/12.), -1.*np.log10(datplot.pre), datplot,
268 | levels = clevs, cmap=mymap, extent='both')
269 | else:
270 | raise ValueError("Cannot find either 'lev' or 'pre' in datasets for QBO diagnostics")
271 |
272 | ax.set_ylim(-np.log10(1000.), -np.log10(1))
273 | ax.set_yticks([-np.log10(1000),-np.log10(300),-np.log10(100),-np.log10(30),-np.log10(10),
274 | -np.log10(3),-np.log10(1)])
275 | ax.set_yticklabels(['1000','300','100','30','10','3','1'])
276 | ax.set_ylabel('Pressure (hPa)', fontsize=12)
277 | ax.set_title(title, fontsize=14)
278 |
279 | return ax
280 |
281 | def plotcolorbar(fig, x1, x2, y1, y2):
282 | """ Plotting the color bar at location [x1, y1, x2-x2, y2-y1 ] """
283 | ci = 1 ; cmax=45
284 | nlevs = (cmax - (-1*cmax))/ci + 1
285 | clevs = np.arange(-1.*cmax, cmax+ci, ci)
286 | mymap = blue2red_cmap(nlevs)
287 |
288 | ax = fig.add_axes([x1, y1, x2-x1, y2-y1])
289 | norm = mpl.colors.Normalize(vmin=-1.*cmax, vmax=cmax)
290 |
291 | clb = mpl.colorbar.ColorbarBase(ax, cmap=mymap,
292 | orientation='horizontal', norm=norm, values=clevs)
293 |
294 | clb.ax.tick_params(labelsize=12)
295 | clb.set_label('U (ms$^{-1}$)', fontsize=14)
296 |
297 | return ax
298 |
299 | def blue2red_cmap(n, nowhite = False):
300 | """ combine two existing color maps to create a diverging color map with white in the middle
301 | n = the number of contour intervals
302 | """
303 |
304 | if (int(n/2) == n/2):
305 | # even number of contours
306 | nwhite=1
307 | nneg=n/2
308 | npos=n/2
309 | else:
310 | nwhite=2
311 | nneg = (n-1)/2
312 | npos = (n-1)/2
313 |
314 | if (nowhite):
315 | nwhite=0
316 |
317 | colors1 = plt.cm.Blues_r(np.linspace(0,1, int(nneg)))
318 | colors2 = plt.cm.YlOrRd(np.linspace(0,1, int(npos)))
319 | colorsw = np.ones((nwhite,4))
320 |
321 | colors = np.vstack((colors1, colorsw, colors2))
322 | mymap = mcolors.LinearSegmentedColormap.from_list('my_colormap', colors)
323 |
324 | return mymap
--------------------------------------------------------------------------------
/scripts/plotting/zonal_mean.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | import numpy as np
3 | import xarray as xr
4 | import plotting_functions as pf
5 |
6 | import adf_utils as utils
7 | import warnings # use to warn user about missing files.
8 | warnings.formatwarning = utils.my_formatwarning
9 |
10 | def zonal_mean(adfobj):
11 |
12 | """
13 | Plots zonal average from climatological files (annual and seasonal).
14 | Compare CAM climatologies against
15 | other climatological data (observations or baseline runs).
16 |
17 | Parameters
18 | ----------
19 | adfobj : AdfDiag
20 | The diagnostics object that contains all the configuration information
21 |
22 | Returns
23 | -------
24 | None
25 | Does not return value, produces files.
26 |
27 | Notes
28 | -----
29 | Uses AdfData for loading data described by adfobj.
30 |
31 | Directly uses adfobj for the following:
32 | diag_var_list, climo_yrs, variable_defaults, read_config_var,
33 | get_basic_info, add_website_data, debug_log
34 |
35 | Determines whether `lev` dimension is present. If not, makes
36 | a line plot, but if so it makes a contour plot.
37 | TODO: There's a flag to plot linear vs log pressure, but no
38 | method to infer what the user wants.
39 | """
40 |
41 | #Notify user that script has started:
42 | msg = "\n Generating zonal mean plots..."
43 | print(f"{msg}\n {'-' * (len(msg)-3)}")
44 |
45 | var_list = adfobj.diag_var_list
46 |
47 | #Special ADF variable which contains the output paths for
48 | #all generated plots and tables:
49 | plot_locations = adfobj.plot_location
50 |
51 | #Grab case years
52 | syear_cases = adfobj.climo_yrs["syears"]
53 | eyear_cases = adfobj.climo_yrs["eyears"]
54 |
55 | #Grab baseline years (which may be empty strings if using Obs):
56 | syear_baseline = adfobj.climo_yrs["syear_baseline"]
57 | eyear_baseline = adfobj.climo_yrs["eyear_baseline"]
58 |
59 | res = adfobj.variable_defaults # will be dict of variable-specific plot preferences
60 | # or an empty dictionary if use_defaults was not specified in YAML.
61 |
62 | #Set plot file type:
63 | # -- this should be set in basic_info_dict, but is not required
64 | # -- So check for it, and default to png
65 | basic_info_dict = adfobj.read_config_var("diag_basic_info")
66 | plot_type = basic_info_dict.get('plot_type', 'png')
67 | print(f"\t NOTE: Plot type is set to {plot_type}")
68 |
69 | # check if existing plots need to be redone
70 | redo_plot = adfobj.get_basic_info('redo_plot')
71 | print(f"\t NOTE: redo_plot is set to {redo_plot}")
72 | #-----------------------------------------
73 |
74 |
75 | #Set seasonal ranges:
76 | seasons = {"ANN": np.arange(1,13,1),
77 | "DJF": [12, 1, 2],
78 | "JJA": [6, 7, 8],
79 | "MAM": [3, 4, 5],
80 | "SON": [9, 10, 11]}
81 |
82 | #Check if plots already exist and redo_plot boolean
83 | #If redo_plot is false and file exists, keep track and attempt to skip calcs to
84 | #speed up preformance a bit if re-running the ADF
85 | zonal_skip = []
86 | logp_zonal_skip = []
87 |
88 | #Loop over model cases:
89 | for case_idx, case_name in enumerate(adfobj.data.case_names):
90 | #Set output plot location:
91 | plot_loc = Path(plot_locations[case_idx])
92 |
93 | #Check if plot output directory exists, and if not, then create it:
94 | if not plot_loc.is_dir():
95 | print(f" {plot_loc} not found, making new directory")
96 | plot_loc.mkdir(parents=True)
97 | #End if
98 |
99 | #Loop over the variables for each season
100 | for var in var_list:
101 | for s in seasons:
102 | #Check zonal log-p:
103 | plot_name_log = plot_loc / f"{var}_{s}_Zonal_logp_Mean.{plot_type}"
104 |
105 | # Check redo_plot. If set to True: remove old plot, if it already exists:
106 | if (not redo_plot) and plot_name_log.is_file():
107 | logp_zonal_skip.append(plot_name_log)
108 | #Continue to next iteration:
109 | adfobj.add_website_data(plot_name_log, f"{var}_logp", case_name, season=s,
110 | plot_type="Zonal", category="Log-P")
111 | pass
112 |
113 | elif (redo_plot) and plot_name_log.is_file():
114 | plot_name_log.unlink()
115 | #End if
116 |
117 | #Check regular zonal
118 | plot_name = plot_loc / f"{var}_{s}_Zonal_Mean.{plot_type}"
119 | # Check redo_plot. If set to True: remove old plot, if it already exists:
120 | if (not redo_plot) and plot_name.is_file():
121 | zonal_skip.append(plot_name)
122 | #Add already-existing plot to website (if enabled):
123 | adfobj.add_website_data(plot_name, var, case_name, season=s,
124 | plot_type="Zonal")
125 |
126 | continue
127 | elif (redo_plot) and plot_name.is_file():
128 | plot_name.unlink()
129 | #End if
130 | #End for (seasons)
131 | #End for (variables)
132 | #End for (cases)
133 | #
134 | # End redo plots check
135 | #
136 |
137 | #
138 | # Setup Plotting
139 | #
140 | #Loop over variables:
141 | for var in var_list:
142 | #Notify user of variable being plotted:
143 | print(f"\t - zonal mean plots for {var}")
144 |
145 | if var not in adfobj.data.ref_var_nam:
146 | dmsg = f"\t WARNING: No reference data found for variable `{var}`, zonal mean plotting skipped."
147 | adfobj.debug_log(dmsg)
148 | print(dmsg)
149 | continue
150 |
151 | # Check res for any variable specific options that need to be used BEFORE going to the plot:
152 | if var in res:
153 | vres = res[var]
154 | #If found then notify user, assuming debug log is enabled:
155 | adfobj.debug_log(f"\t INFO: zonal_mean: Found variable defaults for {var}")
156 |
157 | else:
158 | vres = {}
159 | #End if
160 |
161 | # load reference data (observational or baseline)
162 | if not adfobj.compare_obs:
163 | base_name = adfobj.data.ref_case_label
164 | else:
165 | base_name = adfobj.data.ref_labels[var]
166 |
167 | # Gather reference variable data
168 | odata = adfobj.data.load_reference_regrid_da(base_name, var)
169 |
170 | #Check if regridded file exists, if not skip zonal plot for this var
171 | if odata is None:
172 | dmsg = f"\t WARNING: No regridded baseline file for {base_name} for variable `{var}`, zonal mean plotting skipped."
173 | adfobj.debug_log(dmsg)
174 | continue
175 |
176 | #Check zonal mean dimensions
177 | has_lat_ref, has_lev_ref = utils.zm_validate_dims(odata)
178 |
179 | # check if there is a lat dimension:
180 | # if not, skip test cases and move to next variable
181 | if not has_lat_ref:
182 | print(
183 | f"\t WARNING: Variable {var} is missing a lat dimension for '{base_name}', cannot continue to plot."
184 | )
185 | continue
186 | # End if
187 |
188 | #Loop over model cases:
189 | for case_idx, case_name in enumerate(adfobj.data.case_names):
190 |
191 | #Set case nickname:
192 | case_nickname = adfobj.data.test_nicknames[case_idx]
193 |
194 | #Set output plot location:
195 | plot_loc = Path(plot_locations[case_idx])
196 |
197 | # load re-gridded model files:
198 | mdata = adfobj.data.load_regrid_da(case_name, var)
199 |
200 | if mdata is None:
201 | dmsg = f"\t WARNING: No regridded test file for {case_name} for variable `{var}`, zonal mean plotting skipped."
202 | adfobj.debug_log(dmsg)
203 | continue
204 |
205 | # determine whether it's 2D or 3D
206 | # 3D triggers search for surface pressure
207 | # check data dimensions:
208 | has_lat, has_lev = utils.zm_validate_dims(mdata)
209 |
210 | # check if there is a lat dimension:
211 | if not has_lat:
212 | print(
213 | f"\t WARNING: Variable {var} is missing a lat dimension for '{case_name}', cannot continue to plot."
214 | )
215 | continue
216 | # End if
217 |
218 | #Check if reference file has vertical levels
219 | #Notify user of level dimension:
220 | if has_lev:
221 | print(f"\t INFO: {var} has lev dimension.")
222 |
223 | #Check to make sure each case has vertical levels if one of the cases does
224 | if (has_lev) and (not has_lev_ref):
225 | print(f"\t WARNING: expecting lev boolean for both case: {has_lev} and ref: {has_lev_ref}")
226 | continue
227 | if (has_lev_ref) and (not has_lev):
228 | print(f"\t WARNING: expecting lev boolean for both case: {has_lev} and ref: {has_lev_ref}")
229 | continue
230 |
231 | #
232 | # Seasonal Averages
233 | #
234 |
235 | #Create new dictionaries:
236 | mseasons = {}
237 | oseasons = {}
238 |
239 | #Loop over season dictionary:
240 | for s in seasons:
241 |
242 | # time to make plot; here we'd probably loop over whatever plots we want for this variable
243 | # I'll just call this one "Zonal_Mean" ... would this work as a pattern [operation]_[AxesDescription] ?
244 | # NOTE: Up to this point, nothing really differs from global_latlon_map,
245 | # so we could have made one script instead of two.
246 | # Merging would make overall timing better because looping twice will double I/O steps.
247 | #
248 |
249 | # difference: each entry should be (lat, lon) or (plev, lat, lon)
250 | # dseasons[s] = mseasons[s] oseasons[s]
251 | # difference will be calculated in plot_zonal_mean_and_save;
252 | # because we can let any pressure-level interpolation happen there
253 | # This could be re-visited for efficiency or improved code structure.
254 |
255 | #Seasonal Averages
256 | mseasons[s] = utils.seasonal_mean(mdata, season=s, is_climo=True)
257 | oseasons[s] = utils.seasonal_mean(odata, season=s, is_climo=True)
258 |
259 | #Set the file name
260 | plot_name = plot_loc / f"{var}_{s}_Zonal_Mean.{plot_type}"
261 | plot_name_log = None
262 |
263 | if has_lev:
264 | #Set the file name for log-pressure plots
265 | plot_name_log = plot_loc / f"{var}_logp_{s}_Zonal_Mean.{plot_type}"
266 | #End if
267 |
268 | #Create plots
269 | if plot_name not in zonal_skip:
270 |
271 | #Create new plot:
272 | pf.plot_zonal_mean_and_save(plot_name, case_nickname, adfobj.data.ref_nickname,
273 | [syear_cases[case_idx],eyear_cases[case_idx]],
274 | [syear_baseline,eyear_baseline],
275 | mseasons[s], oseasons[s], has_lev, log_p=False, obs=adfobj.compare_obs, **vres)
276 |
277 | #Add plot to website (if enabled):
278 | adfobj.add_website_data(plot_name, var, case_name, season=s, plot_type="Zonal")
279 | #End if
280 |
281 | #Create log-pressure plots as well (if applicable)
282 | if (plot_name_log) and (plot_name_log not in logp_zonal_skip):
283 |
284 | pf.plot_zonal_mean_and_save(plot_name_log, case_nickname, adfobj.data.ref_nickname,
285 | [syear_cases[case_idx],eyear_cases[case_idx]],
286 | [syear_baseline,eyear_baseline],
287 | mseasons[s], oseasons[s], has_lev, log_p=True, obs=adfobj.compare_obs, **vres)
288 |
289 | #Add plot to website (if enabled):
290 | adfobj.add_website_data(plot_name_log, f"{var}_logp", case_name, season=s, plot_type="Zonal", category="Log-P")
291 | #End if
292 |
293 | #End for (seasons loop)
294 | #End for (case names loop)
295 | #End for (variables loop)
296 |
297 | #Notify user that script has ended:
298 | print(" ...Zonal mean plots have been generated successfully.")
299 |
300 |
301 | ##############
302 | #END OF SCRIPT
--------------------------------------------------------------------------------
/lib/test/unit_tests/test_adf_config.py:
--------------------------------------------------------------------------------
1 | """
2 | Collection of python unit tests
3 | for the "AdfConfig" class.
4 | """
5 |
6 | #+++++++++++++++++++++++
7 | #Import required modules
8 | #+++++++++++++++++++++++
9 |
10 | import unittest
11 | import sys
12 | import os
13 | import os.path
14 |
15 | #Set relevant path variables:
16 | _CURRDIR = os.path.abspath(os.path.dirname(__file__))
17 | _ADF_LIB_DIR = os.path.join(_CURRDIR, os.pardir, os.pardir)
18 | _TEST_FILES_DIR = os.path.join(_CURRDIR, "test_files")
19 |
20 | #Add ADF "lib" directory to python path:
21 | sys.path.append(_ADF_LIB_DIR)
22 |
23 | #Import AdfConfig class and AdfError
24 | from adf_config import AdfConfig
25 | from adf_base import AdfError
26 |
27 | #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
28 | #Main AdfBase testing routine, used when script is run directly
29 | #++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
30 |
31 | class AdfConfigTestRoutine(unittest.TestCase):
32 |
33 | """
34 | Runs all of the unit tests
35 | for the AdfConfig class. Ideally
36 | this set of tests will provide
37 | complete code coverage for AdfConfig.
38 | """
39 | def test_AdfConfig_create(self):
40 |
41 | """
42 | Check that the AdfConfig class can
43 | be initialized properly.
44 | """
45 |
46 | #Use example config file:
47 | baseline_example_file = os.path.join(_ADF_LIB_DIR, os.pardir, "config_cam_baseline_example.yaml")
48 |
49 | #Create AdfConfig object:
50 | adf_test = AdfConfig(baseline_example_file)
51 |
52 | #Assert that new object is of the "AdfConfig" class:
53 | self.assertIsInstance(adf_test, AdfConfig)
54 |
55 | #Also check that "read_config_var" works as expected:
56 | basic_diag_dict = adf_test.read_config_var("diag_basic_info")
57 |
58 | check_user = adf_test.read_config_var("user")
59 | check_user_expected = 'USER-NAME-NOT-SET'
60 | self.assertEqual(check_user, check_user_expected)
61 |
62 | obs_data_loc = adf_test.read_config_var("obs_data_loc", conf_dict=basic_diag_dict)
63 |
64 | self.assertEqual(obs_data_loc, "/glade/campaign/cgd/amp/amwg/ADF_obs")
65 |
66 | #####
67 |
68 | def test_AdfConfig_missing_file(self):
69 |
70 | """
71 | Check that AdfConfig throws the
72 | proper error when no config file
73 | is found.
74 | """
75 |
76 | #Set error message:
77 | ermsg = "File 'not_real.yaml' not found. Please provide full path."
78 |
79 | #Expect a FileNotFound error:
80 | with self.assertRaises(FileNotFoundError) as err:
81 |
82 | #Try and create AdfConfig object with non-existent file:
83 | adf_test = AdfConfig("not_real.yaml")
84 |
85 | #Check that error message matches what's expected:
86 | self.assertEqual(ermsg, str(err.exception))
87 |
88 | #####
89 |
90 | def test_AdfConfig_double_nested_config_var(self):
91 |
92 | """
93 | Check that AdfConfig throws the
94 | proper error when there is a
95 | doubly-nested variable present
96 | in the config (YAML) file.
97 | """
98 |
99 | #Use double-nested var config file:
100 | unset_example_file = os.path.join(_TEST_FILES_DIR, "config_cam_double_nested.yaml")
101 |
102 | #Set error message:
103 | ermsg = "ADF currently only allows for a single nested dict"
104 | ermsg += " in the config (YAML) file.\n Variable '{'double_nested_var': 'bad_val'}' is nested too far."
105 |
106 | #Expect an ADF error:
107 | with self.assertRaises(AdfError) as err:
108 |
109 | #Try and create AdfConfig object with doubly-nested config variable:
110 | adf_test = AdfConfig(unset_example_file)
111 |
112 | #Check that error message matches what's expected:
113 | self.assertEqual(ermsg, str(err.exception))
114 |
115 | #####
116 |
117 | def test_AdfConfig_read_config_bad_conf_dict(self):
118 |
119 | """
120 | Check that the "read_config_var"
121 | method throws the correct error
122 | when a non-dictionary is passed
123 | to "conf_dict".
124 | """
125 |
126 | #Use example config file:
127 | baseline_example_file = os.path.join(_ADF_LIB_DIR, os.pardir, "config_cam_baseline_example.yaml")
128 |
129 | #Create AdfConfig object:
130 | adf_test = AdfConfig(baseline_example_file)
131 |
132 | #Set error message:
133 | ermsg = "Supplied 'conf_dict' variable should be a dictionary, not type ''"
134 |
135 | #Expect a Type error:
136 | with self.assertRaises(TypeError) as err:
137 |
138 | #Try to read variable with bad "conf_dict" type:
139 | _ = adf_test.read_config_var("diag_basic_info", conf_dict="hello")
140 |
141 | #Check that error message matches what's expected:
142 | self.assertEqual(ermsg, str(err.exception))
143 |
144 | #####
145 |
146 | def test_AdfConfig_read_config_missing_var(self):
147 |
148 | """
149 | Check that the "read_config_var"
150 | method returns None when a
151 | non-required variable is requested that
152 | doesn't exist in the config dictionary.
153 | """
154 |
155 | #Use example config file:
156 | baseline_example_file = os.path.join(_ADF_LIB_DIR, os.pardir, "config_cam_baseline_example.yaml")
157 |
158 | #Create AdfConfig object:
159 | adf_test = AdfConfig(baseline_example_file)
160 |
161 | #Try to read non-existing variable:
162 | conf_val = adf_test.read_config_var("hello")
163 |
164 | #Check that provided value is "None":
165 | self.assertEqual(conf_val, None)
166 |
167 | #####
168 |
169 | def test_AdfConfig_read_config_missing_required_var(self):
170 |
171 | """
172 | Check that the "read_config_var"
173 | method throws the correct error
174 | when a variable is requested that
175 | doesn't exist in the config dictionary,
176 | and is required.
177 | """
178 |
179 | #Use example config file:
180 | baseline_example_file = os.path.join(_ADF_LIB_DIR, os.pardir, "config_cam_baseline_example.yaml")
181 |
182 | #Create AdfConfig object:
183 | adf_test = AdfConfig(baseline_example_file)
184 |
185 | #Set error message:
186 | #Note that for some reason a KeyError adds exra quotes,
187 | #hence the extra string quotes here
188 | ermsg = '''"Required variable 'hello' not found in config file. Please see 'config_cam_baseline_example.yaml'."'''
189 |
190 | #Expect a Key error:
191 | with self.assertRaises(KeyError) as err:
192 |
193 | #Try to read non-existing variable:
194 | _ = adf_test.read_config_var("hello", required=True)
195 |
196 | #Check that error message matches what's expected:
197 | self.assertEqual(ermsg, str(err.exception))
198 |
199 | #####
200 |
201 | def test_AdfConfig_read_config_unset_var(self):
202 |
203 | """
204 | Check that the "read_config_var"
205 | method returns None when a
206 | non-required variable is requested that
207 | exists but hasn't been set to a value.
208 | """
209 |
210 | #Use unset var config file:
211 | unset_example_file = os.path.join(_TEST_FILES_DIR, "config_cam_unset_var.yaml")
212 |
213 | #Create AdfConfig object:
214 | adf_test = AdfConfig(unset_example_file)
215 |
216 | #Try to read non-existing variable:
217 | conf_val = adf_test.read_config_var("bad_var")
218 |
219 | #Check that provided value is "None":
220 | self.assertEqual(conf_val, None)
221 |
222 | #####
223 |
224 | def test_AdfConfig_read_config_required_unset_var(self):
225 |
226 | """
227 | Check that the "read_config_var"
228 | method throws the correct error
229 | when a variable is requested that
230 | exists but hasn't been set to a value
231 | """
232 |
233 | #Use unset var config file:
234 | unset_example_file = os.path.join(_TEST_FILES_DIR, "config_cam_unset_var.yaml")
235 |
236 | #Create AdfConfig object:
237 | adf_test = AdfConfig(unset_example_file)
238 |
239 | #Set error message:
240 | ermsg = "Required variable 'bad_var' has not been set to a value. Please see 'config_cam_baseline_example.yaml'."
241 |
242 | #Expect a Value error:
243 | with self.assertRaises(ValueError) as err:
244 |
245 | #Try to read non-existing variable:
246 | _ = adf_test.read_config_var("bad_var", required=True)
247 |
248 | #Check that error message matches what's expected:
249 | self.assertEqual(ermsg, str(err.exception))
250 |
251 | #####
252 |
253 | def test_AdfConfig_expand_references(self):
254 |
255 | """
256 | Check that the AdfConfig class can
257 | properly expand variables using keywords
258 | """
259 |
260 | #Use example config file:
261 | keyword_example_file = os.path.join(_TEST_FILES_DIR, "config_cam_keywords.yaml")
262 |
263 | #Create AdfConfig object:
264 | adf_test = AdfConfig(keyword_example_file)
265 |
266 | #Check that variables match pre-expansion:
267 | test_dict = adf_test.read_config_var("good_dict")
268 | test_dict_two = adf_test.read_config_var("good_dict_two")
269 |
270 | test_var = adf_test.read_config_var("good_var", conf_dict=test_dict)
271 | test_var_two = adf_test.read_config_var("good_var", conf_dict=test_dict_two)
272 |
273 | self.assertEqual(test_var, "It says ${test_var} and ${another_var}.")
274 | self.assertEqual(test_var_two, "${good_dict.good_var}")
275 |
276 | #Now expand variable references and check results:
277 | adf_test.expand_references(test_dict)
278 | adf_test.expand_references(test_dict_two)
279 |
280 | test_var_expanded = adf_test.read_config_var("good_var", conf_dict=test_dict)
281 | test_var_two_expanded = adf_test.read_config_var("good_var", conf_dict=test_dict_two)
282 |
283 | self.assertEqual(test_var_expanded, "It says yay! and 5.")
284 | self.assertEqual(test_var_two_expanded, "It says yay! and 5.")
285 |
286 | #####
287 |
288 | def test_AdfConfig_expand_references_non_specific_var(self):
289 |
290 | """
291 | Check that expand_references throws
292 | the correct error when a variable
293 | is used in a keyword that is defined
294 | in multiple different locations
295 | """
296 |
297 | #Use example config file:
298 | keyword_example_file = os.path.join(_TEST_FILES_DIR, "config_cam_keywords.yaml")
299 |
300 | #Create AdfConfig object:
301 | adf_test = AdfConfig(keyword_example_file)
302 |
303 | #Check that variable matches pre-expansion:
304 | test_dict = adf_test.read_config_var("bad_dict")
305 |
306 | test_var = adf_test.read_config_var("bad_var", conf_dict=test_dict)
307 |
308 | self.assertEqual(test_var, "${good_var}")
309 |
310 | #Set error message:
311 | ermsg = "More than one variable matches keyword '${good_var}'"
312 | ermsg += "\nPlease use '${section.variable}' keyword method to specify"
313 | ermsg += " which variable you want to use."
314 |
315 | #Expect an ADF error:
316 | with self.assertRaises(AdfError) as err:
317 |
318 | #Now check for failure when variable reference is expanded:
319 | adf_test.expand_references(test_dict)
320 |
321 | #Check that error message matches what's expected:
322 | self.assertEqual(ermsg, str(err.exception))
323 |
324 | #####
325 |
326 | def test_AdfConfig_expand_references_non_existent_var(self):
327 |
328 | """
329 | Check that expand_references throws
330 | the correct error when a variable
331 | is used in a keyword that doesn't
332 | actually exist in the config file.
333 | """
334 |
335 | #Use example config file:
336 | keyword_example_file = os.path.join(_TEST_FILES_DIR, "config_cam_keywords.yaml")
337 |
338 | #Create AdfConfig object:
339 | adf_test = AdfConfig(keyword_example_file)
340 |
341 | #Check that variable matches pre-expansion:
342 | test_dict = adf_test.read_config_var("bad_dict_two")
343 |
344 | test_var = adf_test.read_config_var("bad_var_two", conf_dict=test_dict)
345 |
346 | self.assertEqual(test_var, "${no_var}")
347 |
348 | #Set error message:
349 | ermsg = f"ERROR: Variable 'no_var'"
350 | ermsg += " not found in config (YAML) file."
351 |
352 | #Expect an ADF error:
353 | with self.assertRaises(AdfError) as err:
354 |
355 | #Now check for failure when variable reference is expanded:
356 | adf_test.expand_references(test_dict)
357 |
358 | #Check that error message matches what's expected:
359 | self.assertEqual(ermsg, str(err.exception))
360 |
361 |
362 | #++++++++++++++++++++++++++++++++++++++++++++++++
363 | #Run unit tests if this script is called directly
364 | #++++++++++++++++++++++++++++++++++++++++++++++++
365 |
366 | if __name__ == "__main__":
367 | unittest.main()
368 |
369 |
--------------------------------------------------------------------------------
/lib/externals/CVDP/ncl_scripts/snd.trends.ncl:
--------------------------------------------------------------------------------
1 | ; Calculates snow depth global trends
2 | ;
3 | ; Variables used: snd
4 | ;
5 | load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/gsn_code.ncl"
6 | load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/gsn_csm.ncl"
7 | load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/contributed.ncl"
8 | load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/shea_util.ncl"
9 | load "$CVDP_SCRIPTS/functions.ncl"
10 |
11 | begin
12 | print("Starting: snd.trends.ncl")
13 |
14 | SCALE_TIMESERIES = getenv("SCALE_TIMESERIES")
15 | OUTPUT_DATA = getenv("OUTPUT_DATA")
16 | PNG_SCALE = tofloat(getenv("PNG_SCALE"))
17 | OPT_CLIMO = getenv("OPT_CLIMO")
18 | CLIMO_SYEAR = toint(getenv("CLIMO_SYEAR"))
19 | CLIMO_EYEAR = toint(getenv("CLIMO_EYEAR"))
20 | OUTPUT_TYPE = getenv("OUTPUT_TYPE")
21 | COLORMAP = getenv("COLORMAP")
22 |
23 | nsim = numAsciiRow("namelist_byvar/namelist_snowdp")
24 | na = asciiread("namelist_byvar/namelist_snowdp",(/nsim/),"string")
25 | names = new(nsim,"string")
26 | paths = new(nsim,"string")
27 | syear = new(nsim,"integer",-999)
28 | eyear = new(nsim,"integer",-999)
29 | delim = "|"
30 |
31 | do gg = 0,nsim-1
32 | names(gg) = str_strip(str_get_field(na(gg),1,delim))
33 | paths(gg) = str_strip(str_get_field(na(gg),2,delim))
34 | syear(gg) = stringtointeger(str_strip(str_get_field(na(gg),3,delim)))
35 | eyear(gg) = stringtointeger(str_strip(str_get_field(na(gg),4,delim)))
36 | end do
37 | nyr = eyear-syear+1
38 | nyr_max = max(nyr)
39 |
40 | pi=4.*atan(1.0)
41 | rad=(pi/180.)
42 |
43 | wks_type = OUTPUT_TYPE
44 | if (wks_type.eq."png") then
45 | wks_type@wkWidth = 1500*PNG_SCALE
46 | wks_type@wkHeight = 1500*PNG_SCALE
47 | end if
48 | wks_trends_djf = gsn_open_wks(wks_type,getenv("OUTDIR")+"snd.trends.djf")
49 | wks_trends_mam = gsn_open_wks(wks_type,getenv("OUTDIR")+"snd.trends.mam")
50 | wks_trends_jja = gsn_open_wks(wks_type,getenv("OUTDIR")+"snd.trends.jja")
51 | wks_trends_son = gsn_open_wks(wks_type,getenv("OUTDIR")+"snd.trends.son")
52 | wks_trends_ann = gsn_open_wks(wks_type,getenv("OUTDIR")+"snd.trends.ann")
53 | wks_trends_mon = gsn_open_wks(wks_type,getenv("OUTDIR")+"snd.trends.mon")
54 |
55 | if (COLORMAP.eq.0) then
56 | gsn_define_colormap(wks_trends_djf,"ncl_default")
57 | gsn_define_colormap(wks_trends_mam,"ncl_default")
58 | gsn_define_colormap(wks_trends_jja,"ncl_default")
59 | gsn_define_colormap(wks_trends_son,"ncl_default")
60 | gsn_define_colormap(wks_trends_ann,"ncl_default")
61 | gsn_define_colormap(wks_trends_mon,"ncl_default")
62 | end if
63 | if (COLORMAP.eq.1) then
64 | gsn_define_colormap(wks_trends_djf,"BlueDarkRed18")
65 | gsn_define_colormap(wks_trends_mam,"BlueDarkRed18")
66 | gsn_define_colormap(wks_trends_jja,"BlueDarkRed18")
67 | gsn_define_colormap(wks_trends_son,"BlueDarkRed18")
68 | gsn_define_colormap(wks_trends_ann,"BlueDarkRed18")
69 | gsn_define_colormap(wks_trends_mon,"BlueDarkRed18")
70 | end if
71 | cmap = gsn_retrieve_colormap(wks_trends_djf)
72 |
73 | map_djf = new(nsim,"graphic")
74 | map_mam = new(nsim,"graphic")
75 | map_jja = new(nsim,"graphic")
76 | map_son = new(nsim,"graphic")
77 | map_ann = new(nsim,"graphic")
78 | map_mon = new(nsim,"graphic")
79 |
80 | do ee = 0,nsim-1
81 | snd = data_read_in(paths(ee),"SNOWDP",syear(ee),eyear(ee)) ; read in data, orient lats/lons correctly, set time coordinate variable up
82 | if (isatt(snd&lat,"_FillValue")) then ; required in v6.2.0-beta to reset _FillValue to avoid error message
83 | snd&lat@_FillValue = 1.e20
84 | snd&lat@missing_value = snd&lat@_FillValue
85 | end if
86 | if (isatt(snd&lon,"_FillValue")) then
87 | snd&lon@_FillValue = 1.e20
88 | snd&lon@missing_value = snd&lon@_FillValue
89 | end if
90 |
91 | if (isatt(snd,"is_all_missing")) then
92 | delete(snd)
93 | continue
94 | end if
95 | if (OPT_CLIMO.eq."Full") then
96 | snd = rmMonAnnCycTLL(snd)
97 | else
98 | check_custom_climo(names(ee),syear(ee),eyear(ee),CLIMO_SYEAR,CLIMO_EYEAR)
99 | temp_arr = snd
100 | delete(temp_arr&time)
101 | temp_arr&time = cd_calendar(snd&time,-1)
102 | if (CLIMO_SYEAR.lt.0) then
103 | climo = clmMonTLL(temp_arr({(eyear(ee)+CLIMO_SYEAR)*100+1:(eyear(ee)+CLIMO_EYEAR)*100+12},:,:))
104 | else
105 | climo = clmMonTLL(temp_arr({CLIMO_SYEAR*100+1:CLIMO_EYEAR*100+12},:,:))
106 | end if
107 | delete(temp_arr)
108 | snd = calcMonAnomTLL(snd,climo)
109 | delete(climo)
110 | end if
111 |
112 | tttt = dtrend_msg_n(ispan(0,dimsizes(snd&time)-1,1),snd,False,True,0)
113 | snd_trends_mon = snd(0,:,:)
114 | snd_trends_mon = (/ onedtond(tttt@slope, (/dimsizes(snd&lat),dimsizes(snd&lon)/) ) /)
115 | snd_trends_mon = snd_trends_mon*dimsizes(snd&time)
116 | snd_trends_mon@units = snd@units+" "+nyr(ee)+"yr~S~-1~N~"
117 | delete(tttt)
118 |
119 | snd_seas = runave_n_Wrap(snd,3,0,0)
120 | snd_seas(0,:,:) = (/ dim_avg_n(snd(:1,:,:),0) /)
121 | snd_seas(dimsizes(snd&time)-1,:,:) = (/ dim_avg_n(snd(dimsizes(snd&time)-2:,:,:),0) /)
122 | snd_ann = runave_n_Wrap(snd,12,0,0)
123 | delete(snd)
124 |
125 | snd_trends_seas = snd_seas(:3,:,:)
126 | snd_trends_seas = snd_trends_seas@_FillValue
127 | snd_trends_ann = snd_trends_seas(0,:,:)
128 | do ff = 0,4
129 | if (ff.le.3) then
130 | tarr = snd_seas(ff*3::12,:,:)
131 | end if
132 | if (ff.eq.4) then
133 | tarr = snd_ann(5::12,:,:)
134 | end if
135 | tttt = dtrend_msg_n(ispan(0,dimsizes(tarr&time)-1,1),tarr,False,True,0)
136 | if (ff.le.3) then
137 | snd_trends_seas(ff,:,:) = (/ onedtond(tttt@slope, (/dimsizes(tarr&lat),dimsizes(tarr&lon)/) ) /)
138 | end if
139 | if (ff.eq.4) then
140 | snd_trends_ann = (/ onedtond(tttt@slope, (/dimsizes(tarr&lat),dimsizes(tarr&lon)/) ) /)
141 | end if
142 | delete([/tarr,tttt/])
143 | end do
144 | snd_trends_seas = snd_trends_seas*nyr(ee)
145 | snd_trends_seas@units = snd_seas@units+" "+nyr(ee)+"yr~S~-1~N~"
146 | snd_trends_ann = snd_trends_ann*nyr(ee)
147 | snd_trends_ann@units = snd_ann@units+" "+nyr(ee)+"yr~S~-1~N~"
148 | delete([/snd_seas,snd_ann/])
149 |
150 |
151 | if (OUTPUT_DATA.eq."True") then
152 | modname = str_sub_str(names(ee)," ","_")
153 | bc = (/"/","'","(",")"/)
154 | do gg = 0,dimsizes(bc)-1
155 | modname = str_sub_str(modname,bc(gg),"_")
156 | end do
157 | fn = getenv("OUTDIR")+modname+".cvdp_data.snd.trends."+syear(ee)+"-"+eyear(ee)+".nc"
158 | if (.not.isfilepresent2(fn)) then
159 | z = addfile(fn,"c")
160 | z@source = "NCAR Climate Analysis Section's Climate Variability Diagnostics Package v"+getenv("VERSION")
161 | z@notes = "Data from "+names(ee)+" from "+syear(ee)+"-"+eyear(ee)
162 | if (OPT_CLIMO.eq."Full") then
163 | z@climatology = syear(ee)+"-"+eyear(ee)+" climatology removed prior to all calculations (other than means)"
164 | else
165 | if (CLIMO_SYEAR.lt.0) then
166 | z@climatology = (eyear(ee)+CLIMO_SYEAR)+"-"+(eyear(ee)+CLIMO_EYEAR)+" climatology removed prior to all calculations (other than means)"
167 | else
168 | z@climatology = CLIMO_SYEAR+"-"+CLIMO_EYEAR+" climatology removed prior to all calculations (other than means)"
169 | end if
170 | end if
171 | z@Conventions = "CF-1.6"
172 | else
173 | z = addfile(fn,"w")
174 | end if
175 | snd_seas = (/ snd_trends_seas /)
176 | snd_seas!1 = "LAT"
177 | snd_seas&LAT = snd_trends_seas&lat
178 | snd_seas!2 = "LON"
179 | snd_seas&LON = snd_trends_seas&lon
180 | copy_VarAtts(snd_trends_seas,snd_seas)
181 |
182 | snd_ann = (/ snd_trends_ann /)
183 | snd_ann!0 = "LAT"
184 | snd_ann&LAT = snd_trends_ann&lat
185 | snd_ann!1 = "LON"
186 | snd_ann&LON = snd_trends_ann&lon
187 | copy_VarAtts(snd_trends_ann,snd_ann)
188 |
189 | snd_mon = (/ snd_trends_mon /)
190 | snd_mon!0 = "LAT"
191 | snd_mon&LAT = snd_trends_mon&lat
192 | snd_mon!1 = "LON"
193 | snd_mon&LON = snd_trends_mon&lon
194 | copy_VarAtts(snd_trends_mon,snd_mon)
195 |
196 | z->snd_trends_djf = set_varAtts(snd_seas(0,:,:),"snd linear trends (DJF)","","")
197 | z->snd_trends_mam = set_varAtts(snd_seas(1,:,:),"snd linear trends (MAM)","","")
198 | z->snd_trends_jja = set_varAtts(snd_seas(2,:,:),"snd linear trends (JJA)","","")
199 | z->snd_trends_son = set_varAtts(snd_seas(3,:,:),"snd linear trends (SON)","","")
200 | z->snd_trends_ann = set_varAtts(snd_ann,"snd linear trends (annual)","","")
201 | z->snd_trends_mon = set_varAtts(snd_mon,"snd linear trends (monthly)","","")
202 | delete(z)
203 | delete([/snd_seas,snd_ann,snd_mon/])
204 | end if
205 |
206 | snd_trends_seas = where(abs(snd_trends_seas).le..005,snd_trends_seas@_FillValue,snd_trends_seas) ; .005m = arbitrary # to white out
207 | snd_trends_ann = where(abs(snd_trends_ann).le..005,snd_trends_ann@_FillValue,snd_trends_ann) ; areas w/very very small trends..
208 | snd_trends_mon = where(abs(snd_trends_mon).le..005,snd_trends_mon@_FillValue,snd_trends_mon)
209 | ;========================================================================
210 | ; cmap = read_colormap_file("ncl_default")
211 |
212 |
213 | res = True
214 | res@mpProjection = "WinkelTripel"
215 | res@mpGeophysicalLineColor = "gray42"
216 | if (wks_type.eq."png") then
217 | res@mpGeophysicalLineThicknessF = 2.
218 | else
219 | res@mpGeophysicalLineThicknessF = 1.
220 | end if
221 | res@mpPerimOn = False
222 | res@mpGridLatSpacingF = 90 ; change latitude line spacing
223 | res@mpGridLonSpacingF = 180. ; change longitude line spacing
224 | res@mpGridLineColor = "transparent" ; trick ncl into drawing perimeter
225 | res@mpGridAndLimbOn = True ; turn on lat/lon lines
226 | res@mpFillOn = False
227 | res@mpCenterLonF = 0.
228 | res@mpOutlineOn = True
229 | res@gsnDraw = False
230 | res@gsnFrame = False
231 |
232 | res@cnFillPalette = cmap(2::-1,:)
233 | res@cnFillMode = "RasterFill"
234 | res@cnLevelSelectionMode = "ExplicitLevels"
235 | if (COLORMAP.eq.0) then
236 | res@cnLevels = fspan(-.5,.5,21)
237 | end if
238 | if (COLORMAP.eq.1) then
239 | res@cnLevels = fspan(-.8,.8,17)
240 | end if
241 | res@cnLineLabelsOn = False
242 | res@cnFillOn = True
243 | res@cnLinesOn = False
244 | res@lbLabelBarOn = False
245 |
246 | res@gsnLeftStringOrthogonalPosF = -0.05
247 | res@gsnLeftStringParallelPosF = .005
248 | res@gsnRightStringOrthogonalPosF = -0.05
249 | res@gsnRightStringParallelPosF = 0.96
250 | res@gsnRightString = ""
251 | res@gsnLeftString = ""
252 | res@gsnLeftStringFontHeightF = 0.014
253 | res@gsnCenterStringFontHeightF = 0.018
254 | res@gsnRightStringFontHeightF = 0.014
255 | res@gsnLeftString = syear(ee)+"-"+eyear(ee)
256 |
257 | res@gsnRightString = snd_trends_seas@units
258 | res@gsnCenterString = names(ee)
259 | map_djf(ee) = gsn_csm_contour_map(wks_trends_djf,snd_trends_seas(0,:,:),res)
260 | map_mam(ee) = gsn_csm_contour_map(wks_trends_mam,snd_trends_seas(1,:,:),res)
261 | map_jja(ee) = gsn_csm_contour_map(wks_trends_jja,snd_trends_seas(2,:,:),res)
262 | map_son(ee) = gsn_csm_contour_map(wks_trends_son,snd_trends_seas(3,:,:),res)
263 | map_ann(ee) = gsn_csm_contour_map(wks_trends_ann,snd_trends_ann,res)
264 | map_mon(ee) = gsn_csm_contour_map(wks_trends_mon,snd_trends_mon,res)
265 |
266 | delete([/snd_trends_seas,snd_trends_ann,snd_trends_mon/])
267 | delete(res)
268 | end do
269 | panres = True
270 | panres@gsnMaximize = True
271 | panres@gsnPaperOrientation = "portrait"
272 | panres@gsnPanelLabelBar = True
273 | panres@gsnPanelYWhiteSpacePercent = 3.0
274 | panres@pmLabelBarHeightF = 0.05
275 | panres@pmLabelBarWidthF = 0.65
276 | panres@lbTitleOn = False
277 | panres@lbBoxLineColor = "gray70"
278 | panres@lbLabelFontHeightF = 0.013
279 | if (nsim.le.4) then
280 | if (nsim.eq.1) then
281 | panres@txFontHeightF = 0.022
282 | panres@gsnPanelBottom = 0.50
283 | else
284 | panres@txFontHeightF = 0.0145
285 | panres@gsnPanelBottom = 0.50
286 | end if
287 | else
288 | panres@txFontHeightF = 0.016
289 | panres@gsnPanelBottom = 0.05
290 | end if
291 | panres@lbLabelStride = 1
292 |
293 | panres@txString = "SND Trends (DJF)"
294 | ncol = floattointeger(sqrt(nsim))
295 | nrow = (nsim/ncol)+mod(nsim,ncol)
296 | gsn_panel2(wks_trends_djf,map_djf,(/nrow,ncol/),panres)
297 | delete(wks_trends_djf)
298 |
299 | panres@txString = "SND Trends (MAM)"
300 | gsn_panel2(wks_trends_mam,map_mam,(/nrow,ncol/),panres)
301 | delete(wks_trends_mam)
302 |
303 | panres@txString = "SND Trends (JJA)"
304 | gsn_panel2(wks_trends_jja,map_jja,(/nrow,ncol/),panres)
305 | delete(wks_trends_jja)
306 |
307 | panres@txString = "SND Trends (SON)"
308 | gsn_panel2(wks_trends_son,map_son,(/nrow,ncol/),panres)
309 | delete(wks_trends_son)
310 |
311 | panres@txString = "SND Trends (Annual)"
312 | gsn_panel2(wks_trends_ann,map_ann,(/nrow,ncol/),panres)
313 | delete(wks_trends_ann)
314 |
315 | panres@txString = "SND Trends (Monthly)"
316 | gsn_panel2(wks_trends_mon,map_mon,(/nrow,ncol/),panres)
317 | delete(wks_trends_mon)
318 | delete([/nrow,ncol,map_djf,map_mam,map_jja,map_son,map_ann,map_mon,panres,cmap/])
319 | print("Finished: snd.trends.ncl")
320 | end
321 |
--------------------------------------------------------------------------------