├── .gitignore ├── .gitmodules ├── AC_tools ├── AC_time.py ├── GEOS.py ├── GEOSChem_bpch.py ├── GEOSChem_nc.py ├── HEMCO.py ├── KPP.py ├── Tests │ ├── __init__.py │ ├── conftest.py │ ├── test_AC_time.py │ ├── test_GEOSChem_bpch.py │ ├── test_bpch2netCDF.py │ ├── test_core.py │ ├── test_funcs4pf.py │ ├── test_generic.py │ ├── test_plotting.py │ └── test_variables.py ├── __init__.py ├── bpch2netCDF.py ├── core.py ├── family_variables.yml ├── generic.py ├── mask.py ├── observations.py ├── obsolete │ ├── SMVGEAR_REDUNDANT.py │ ├── __init__.py │ ├── misc_REDUNDANT.py │ ├── plotting_REDUNDANT.py │ └── variables_REDUNDANT.py ├── planeflight.py ├── plotting.py ├── reference_families_for_species.yml ├── spatial_extents4masks.csv ├── species_database_GCv12_9.yml ├── species_mass.yml ├── species_units.yml ├── utils.py └── variables.py ├── LICENSE ├── README.rst ├── doc ├── Makefile ├── _build │ ├── doctrees │ │ ├── environment.pickle │ │ └── index.doctree │ └── html │ │ ├── .buildinfo │ │ ├── .nojekyll │ │ ├── _static │ │ ├── ajax-loader.gif │ │ ├── basic.css │ │ ├── doctools.js │ │ ├── documentation_options.js │ │ ├── jquery-3.2.1.js │ │ ├── jquery.js │ │ ├── language_data.js │ │ ├── nature.css │ │ ├── pygments.css │ │ ├── searchtools.js │ │ ├── underscore-1.3.1.js │ │ ├── underscore.js │ │ └── websupport.js │ │ ├── genindex.html │ │ ├── index.html │ │ ├── objects.inv │ │ ├── search.html │ │ └── searchindex.js ├── conf.py ├── index.rst └── make.bat ├── environment.yaml ├── examples └── plot_global_ozone.py ├── readthedocs.yml ├── scripts ├── .gitignore ├── 2D_GEOSChem_slice_subregion_plotter_example.py ├── AC_tools ├── Basic_GEOSChem_bpch_plotter.py ├── Basic_planeflight_plotter.py ├── KPP_analyse_vertical_Ox_loss_by_route_BPCH.py ├── KPP_analyse_vertical_Ox_loss_by_route_NetCDF.py ├── KPP_mk_tagged_GC_Mechanism.py ├── SMVGEAR_process_prod_loss_tags.py ├── __init__.py ├── animate_array.py ├── bpch2netCDF.py ├── check_met.py ├── find_replace.sh ├── get_data_files.py ├── mk_pf_MUTD_2D_ROW_EU_grid_surface.py ├── mk_planeflight_input_file_for_campaign_locs.py ├── mk_planeflight_input_file_for_point_locs.py ├── netCDF_splitter2var_2D.py └── pf2NetCDF.py └── setup.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | .pytest_cache/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | db.sqlite3 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # Environments 85 | .env 86 | .venv 87 | env/ 88 | venv/ 89 | ENV/ 90 | env.bak/ 91 | venv.bak/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | .spyproject 96 | 97 | # Rope project settings 98 | .ropeproject 99 | 100 | # mkdocs documentation 101 | /site 102 | 103 | # mypy 104 | .mypy_cache/ 105 | 106 | # temp. or backup files 107 | *.~ 108 | *#* 109 | *.bak 110 | 111 | # outputed or input files files 112 | *.nc 113 | *.nc4 114 | *.csv 115 | *.png 116 | *.jpg 117 | *.pdf 118 | *.na 119 | *.dat 120 | *.png 121 | *.pyc 122 | *.ps 123 | *.txt 124 | *.sw* 125 | *.swn 126 | *.swo* 127 | *.out 128 | *.csv 129 | *.log 130 | *restart.4x5.geos5.* 131 | *.data 132 | d_planeflight_generator/data/* 133 | d_fast-J_JX/data/* 134 | *option_*.py 135 | *.o* 136 | MChem_bpch_plotter.py 137 | MChem_tools.py 138 | monthly_run 139 | /Tests/test_files 140 | /Tests/__pycache__ 141 | data/* 142 | data 143 | 144 | # Do not include bash scripts 145 | *.sh 146 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tsherwen/AC_tools/eb308d5f547fbe1dfb131c83e9504f5f5c8f6328/.gitmodules -------------------------------------------------------------------------------- /AC_tools/HEMCO.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Functions for use with the Harvard-NASA Emissions Component (HEMCO) 5 | 6 | Use help() to get details on a particular function. 7 | 8 | """ 9 | # - Required modules: 10 | # I/O / Low level 11 | import os 12 | import sys 13 | import glob 14 | import pandas as pd 15 | import logging 16 | # Math/Analysis 17 | import numpy as np 18 | # Time 19 | import time 20 | import datetime as datetime 21 | 22 | 23 | def rm_eruptive_volcancos_from_files(dates=None, sdate=None, edate=None, 24 | folder=None, output_folder=None): 25 | """ 26 | Remove the eruptive volcanos from the input HEMCO emission files 27 | """ 28 | # Set dates to update files as NASA ATom4, unless others provided 29 | if isinstance(dates, type(None)): 30 | if isinstance(sdate, type(datetime.datetime)): 31 | sdate = datetime.datetime(2018, 1, 1) 32 | if isinstance(edate, type(datetime.datetime)): 33 | edate = datetime.datetime(2018, 6, 1) 34 | dates = pd.date_range(sdate, edate) 35 | assert folder != type(None), 'ABORTING: folder for volcano files needed' 36 | # Write out the new files to the same folder if a new one is not given 37 | if isinstance(output_folder, type(None)): 38 | output_folder = folder 39 | # Helper functions for processing volcano files 40 | 41 | def get_volc_subfolder4dt(folder=None, dt=None): 42 | """ Get the volcano folder for a specific datetime """ 43 | year = dt.year 44 | month = dt.month 45 | folder = '{}/{}/{:0>2}/'.format(folder, year, month) 46 | return folder 47 | 48 | def get_volc_filename4dt(dt): 49 | """ Get the filename for a specific datetime """ 50 | dt_str = dt.strftime('%Y%m%d') 51 | return 'so2_volcanic_emissions_Carns.{}.rc'.format(dt_str) 52 | 53 | def get_volc_file_lines4date(dt, folder=None): 54 | """ Open a volcano file for given date and return its lines """ 55 | # Get string contain filename string and another for folder 56 | filename = get_volc_filename4dt(dt=dt) 57 | folder2use = get_volc_subfolder4dt(dt=dt, folder=folder) 58 | # Extract lines 59 | lines = read_lines_from_txt_file(filename=filename, 60 | folder=folder2use) 61 | return lines 62 | 63 | def rm_eruption_from_volc_file_lines(lines, skiplines=4, dt_str='', 64 | verbose=True, debug=False): 65 | """ 66 | Remove the eruptive volcano lines from the HEMCO file 67 | 68 | Notes 69 | ------ 70 | - The columns (and units) in the files are: 71 | LAT (-90,90), LON (-180,180), SULFUR [kg S/s], ELEVATION [m], 72 | CLOUD_COLUMN_HEIGHT [m] 73 | """ 74 | # Local variables 75 | pstr1 = "NOTE: rm'd emission of {} kg S/s ({}) - ({}N, {}E, {}m, {}m)" 76 | pstr2 = 'WARNING: line inc. without check: {}' 77 | NewLines = [] 78 | # Now loop lines and only save those without eruptive volcanoes 79 | for n_line, line in enumerate(lines): 80 | include_line = True 81 | if (n_line+1) > skiplines: 82 | try: 83 | # Get elevation and emission height 84 | tmp_line = line.strip().split() 85 | LAT = tmp_line[0] 86 | LON = tmp_line[1] 87 | S = tmp_line[2] # SULFUR 88 | ELEV = tmp_line[3] # ELEVATION 89 | CLOUD = tmp_line[4] # CLOUD_COLUMN_HEIGHT 90 | # If not equal, then EXCLUDE the line 91 | if ELEV != CLOUD: 92 | if verbose: 93 | print(pstr1.format(S, dt_str, LAT, LON, ELEV, 94 | CLOUD)) 95 | include_line = False 96 | except IndexError: 97 | if debug: 98 | print(pstr2.format(line.strip())) 99 | else: 100 | if debug: 101 | print(pstr2.format(line.strip())) 102 | if include_line: 103 | NewLines += [line] 104 | return NewLines 105 | 106 | # - Now loop dates and re write files 107 | for dt in dates: 108 | dt_str = dt.strftime('%Y/%m/%d') 109 | # Get lines of volcano file for date 110 | lines = get_volc_file_lines4date(dt=dt, folder=folder) 111 | # Remove the eruptive volcanoes from the file 112 | NewLines = rm_eruption_from_volc_file_lines(lines=lines, dt_str=dt_str) 113 | if len(lines) != len(NewLines): 114 | print('WARNING: # of lines updated for {}'.format(dt_str)) 115 | # SubFolder='VOLCANO_NO_ERUPT/v2019-08/' 116 | # Get string contain filename string 117 | filename = get_volc_filename4dt(dt=dt) 118 | folder2use = get_volc_subfolder4dt(dt=dt, folder=output_folder) 119 | # Save out the new file 120 | write_lines2txt_file(NewLines, folder=folder2use, filename=filename) 121 | -------------------------------------------------------------------------------- /AC_tools/Tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tsherwen/AC_tools/eb308d5f547fbe1dfb131c83e9504f5f5c8f6328/AC_tools/Tests/__init__.py -------------------------------------------------------------------------------- /AC_tools/Tests/conftest.py: -------------------------------------------------------------------------------- 1 | ##### 2 | # This file contains the settings used for py.test 3 | ##### 4 | 5 | import pytest 6 | import logging 7 | import os 8 | import AC_tools as AC 9 | # temporality restore urllib2 as urllib not installed 10 | try: 11 | import urllib2 12 | except ModuleNotFoundError: 13 | import urllib.request 14 | import urllib.error 15 | import urllib.parse 16 | 17 | #FORMAT = "[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s" 18 | FORMAT = "%(filename)s:%(lineno)s - %(funcName)s() : %(message)s" 19 | test_file_dir = '../data' 20 | 21 | logging.basicConfig(filename='test.log', level=logging.DEBUG, format=FORMAT) 22 | logging.getLogger().setLevel(logging.DEBUG) 23 | 24 | 25 | def pytest_addoption(parser): 26 | parser.addoption("--slow", action="store_true", 27 | help="remake ctm.nc tests") 28 | 29 | 30 | def pytest_configure(): 31 | 32 | # Make sure we are in the correct folder for the test. 33 | dirname = os.path.split(os.getcwd())[1] 34 | if not dirname == 'Tests': 35 | pytest.exit("Not running in the Tests folder!") 36 | 37 | # Make sure the data is downloaded 38 | # from ..Scripts import get_data_files 39 | # from AC_tools import get_data_files 40 | 41 | return 42 | -------------------------------------------------------------------------------- /AC_tools/Tests/test_AC_time.py: -------------------------------------------------------------------------------- 1 | from ..bpch2netCDF import * 2 | import logging 3 | import pytest 4 | logging.basicConfig(filename='test.log', level=logging.DEBUG) 5 | logging.info('Starting GEOSChem test.') 6 | 7 | 8 | def test_convert_to_netCDF(): 9 | return 10 | 11 | 12 | def test_get_folder(): 13 | return 14 | 15 | 16 | logging.info('GEOSChem test complete') 17 | -------------------------------------------------------------------------------- /AC_tools/Tests/test_GEOSChem_bpch.py: -------------------------------------------------------------------------------- 1 | from ..GEOSChem_bpch import * 2 | import logging 3 | import pytest 4 | 5 | wd = '../../data' 6 | 7 | slow = pytest.mark.skipif( 8 | not pytest.config.getoption("--slow"), 9 | reason="need --slow option to run" 10 | ) 11 | 12 | 13 | def test_get_surface_area(): 14 | arr = get_surface_area(wd=wd) 15 | assert isinstance(arr, np.ndarray), 'Surface area not a numpy array.' 16 | assert (len(arr.shape) == 3), 'Surface area does not have 2 dimensions.' 17 | 18 | # TO-DO 19 | # Test without passing a wd. Requires a data source. current implementation of this is a bit cludgey. 20 | arr = get_surface_area() 21 | assert isinstance(arr, np.ndarray), 'Surface area not a numpy array.' 22 | assert (len(arr.shape) == 3), 'Surface area does not have 2 dimensions.' 23 | 24 | return 25 | 26 | 27 | @slow 28 | def test_get_gc_res(): 29 | 30 | # Test we can get the geoschem resolution 31 | 32 | # More resolutions need to be added and tested. 33 | 34 | _4x5_dir = os.path.join(wd, 'LM/LANDMAP_LWI_ctm_4x5') 35 | _2x25_dir = os.path.join(wd, 'LM/LANDMAP_LWI_ctm_2x25') 36 | _05x0666_dir = os.path.join(wd, 'LM/LANDMAP_LWI_ctm_05x0666') 37 | _025x03125_dir = os.path.join(wd, 'LM/LANDMAP_LWI_ctm_025x03125') 38 | 39 | # assert (get_gc_res( _4x5_dir ) == '4x5') 40 | assert (get_gc_res(_2x25_dir) == '2x2.5') 41 | assert (get_gc_res(_05x0666_dir) == '0.5x0.666') 42 | assert (get_gc_res(_025x03125_dir) == '0.25x0.3125') 43 | return 44 | 45 | 46 | def test_get_LWI_map(): 47 | # Test specifying wd 48 | arr = get_LWI_map(wd=wd) 49 | assert isinstance(arr, np.ndarray), 'Land map not a numpy array.' 50 | assert (len(arr.shape) == 3), 'Land map has too many dimensions.' 51 | return 52 | 53 | 54 | def test_get_O3_burden_bpch(): 55 | var = get_O3_burden_bpch(wd=wd) 56 | assert (round(var.sum(), 0) == round(376875.15625, 0) 57 | ), "The O3 burden is wrong ({var})".format(var=var) 58 | return 59 | 60 | 61 | def test_get_air_mass_np(): 62 | arr = get_air_mass_np(wd=wd) 63 | assert isinstance(arr, np.ndarray), 'Air mass array is not a numpy array' 64 | return 65 | 66 | 67 | def test_get_GC_output(): 68 | arr = get_GC_output(wd=wd, species='O3', category='IJ_AVG_S') 69 | assert isinstance(arr, np.ndarray), 'GC output is not a numpy array' 70 | assert round(arr.sum(), 6) == round( 71 | 0.14242639, 6), "The ozone budget doesnt seem correct({bud})".format(bud=arr.sum()) 72 | return 73 | 74 | 75 | def test_get_HEMCO_output_for_WD(): 76 | arr = get_HEMCO_output(wd=wd, vars='ALD2_TOTAL') 77 | assert isinstance(arr, np.ndarray), 'HEMCO output is not a numpy array' 78 | assert round(arr.sum(), 2) == round( 79 | 2.50E-9, 2), "The HEMCO output seems wrong" 80 | return 81 | 82 | 83 | def test_get_HEMCO_output_for_file(): 84 | hemco_file = os.path.join(wd, 'HEMCO_Diagnostics.nc') 85 | arr = get_HEMCO_output(filename=hemco_file, vars='ALD2_TOTAL') 86 | assert isinstance(arr, np.ndarray), 'HEMCO output is not a numpy array' 87 | assert round(arr.sum(), 2) == round( 88 | 2.50E-9, 2), "The HEMOC output seem incorrect" 89 | return 90 | -------------------------------------------------------------------------------- /AC_tools/Tests/test_bpch2netCDF.py: -------------------------------------------------------------------------------- 1 | from ..bpch2netCDF import * 2 | import logging 3 | import pytest 4 | import os 5 | #import urllib.parse, urllib.error 6 | # temporality restore urllib2 as urllib not installed 7 | try: 8 | import urllib2 9 | from urllib import urlopen 10 | # except ModuleNotFoundError: 11 | except ImportError: 12 | import urllib.request 13 | import urllib.error 14 | import urllib.parse 15 | 16 | slow = pytest.mark.skipif( 17 | not pytest.config.getoption("--slow"), 18 | reason="need --slow option to run" 19 | ) 20 | 21 | test_file_dir = '../data' 22 | 23 | # def setup_function(function): 24 | # """ 25 | # Downloads all the test dataset files using rsync. 26 | # """ 27 | # 28 | # test_files = ['test.nc', 'test.bpch','tracerinfo.dat','diaginfo.dat'] 29 | # 30 | # url_base = 'http://atmosviz1.york.ac.uk/~bn506/data/AC_tools/' 31 | # test_file_dir = 'test_files' 32 | # 33 | # 34 | # if not os.path.exists(test_file_dir): 35 | # os.makedirs(test_file_dir) 36 | # 37 | # for file_name in test_files: 38 | # file_path = os.path.join(test_file_dir, file_name) 39 | # if not os.path.isfile(file_path): 40 | # my_file = open(file_path, 'wb') 41 | # logging.debug(file_name + " not found. Downloading now.") 42 | # url = url_base + file_name 43 | # file_data = urllib2.urlopen( url ).read() 44 | # my_file.write(file_data) 45 | # my_file.close() 46 | # 47 | # logging.debug(file_name + " downloaded.") 48 | # 49 | # return 50 | 51 | 52 | def file_comparison(file_1, file_2): 53 | file_1_data = open(file_1, 'r') 54 | file_2_data = open(file_2, 'r') 55 | if file_1_data.read() == file_2_data.read(): 56 | same = True 57 | else: 58 | same = False 59 | return same 60 | 61 | 62 | @slow 63 | def test_convert_to_netCDF(): 64 | logging.info("beginning test") 65 | # Recreate a ctm.nc file and confirm it is the same 66 | logging.debug("Creating the temp netCDF file") 67 | convert_to_netCDF(folder=test_file_dir, bpch_file_list=[ 68 | 'test.bpch'], remake=True, filename='test.nc') 69 | datafile = os.path.join(test_file_dir, 'ctm.nc') 70 | testfile = os.path.join(test_file_dir, 'test.nc') 71 | 72 | logging.debug("Comparing the temp netCDF file to the origional") 73 | assert file_comparison(datafile, testfile), \ 74 | 'bpch converter failed to replicate the original file.' 75 | 76 | os.remove(testfile) 77 | logging.info("test complete") 78 | return 79 | 80 | 81 | def test_get_folder(): 82 | logging.info("beginning test") 83 | folder = get_folder(test_file_dir) 84 | assert isinstance(folder, str), "The folder is not a string" 85 | assert os.path.exists(folder), "Cannot find the test folder" 86 | logging.info("test complete") 87 | return 88 | -------------------------------------------------------------------------------- /AC_tools/Tests/test_core.py: -------------------------------------------------------------------------------- 1 | from ..core import * 2 | import logging 3 | import pytest 4 | import numpy as np 5 | logging.basicConfig(filename='test.log', level=logging.DEBUG) 6 | 7 | data_dir = '../data' 8 | 9 | 10 | def test_get_sigfig(): 11 | print(get_sigfig(100, 1)) 12 | assert(get_sigfig(3.22294, 1) == 3) 13 | assert(get_sigfig(3.29294, 2) == 3.3) 14 | assert(get_sigfig(3.29294, 3) == 3.29) 15 | return 16 | 17 | 18 | def test_get_sigfig_big(): 19 | # assert(get_sigfig(3.22294E20, 0)==3E20) 20 | assert(get_sigfig(3.22294E20, 1) == 3E20) 21 | assert(get_sigfig(3.29294E20, 2) == 3.3E20) 22 | assert(get_sigfig(3.29294E20, 3) == 3.29E20) 23 | 24 | 25 | def test_get_sigfig_small(): 26 | assert(get_sigfig(3.22294E-20, 1) == 3E-20) 27 | assert(get_sigfig(3.29294E-20, 2) == 3.3E-20) 28 | assert(get_sigfig(3.29294E-20, 3) == 3.29E-20) 29 | return 30 | 31 | # def test_get_sigfig_negative(): 32 | # assert(get_sigfig(-3.22294, 1)==-3.2) 33 | # assert(get_sigfig(-3.29294, 2)==-3.29) 34 | # assert(get_sigfig(-3.29294, 3)==-3.293) 35 | # return 36 | 37 | 38 | def test_get_scientific_number(): 39 | assert(get_scientific_number(3.22294, 1, string=True) == "3.2") 40 | assert(get_scientific_number(3.29294E10, 2, string=True) == "3.29E10") 41 | assert(get_scientific_number(-3.29294E-10, 3, string=True) == "-3.293E-10") 42 | return 43 | 44 | 45 | def test_gchemgrid(): 46 | # Test fail for no inputs 47 | with pytest.raises(Exception): 48 | gchemgrid() 49 | 50 | # Test dictionary return 51 | arr = gchemgrid('c_km_geos5_r') 52 | assert isinstance(arr, np.ndarray), 'item is not a numpy array' 53 | 54 | dic = gchemgrid(rtn_dict=True) 55 | assert isinstance(dic, dict), 'Dictionary return failed.' 56 | return 57 | 58 | 59 | def test_get_dims4res_4x5(): 60 | # test the dictionary returns 61 | res = get_dims4res('4x5') 62 | assert (res == (72, 46, 47)), '4x5 res lookup failed' 63 | 64 | 65 | def test_get_dims4res_2x25(): 66 | # test the dictionary returns 67 | res = get_dims4res('2x2.5') 68 | assert (res == (144, 91, 47)), '2x2.5 res lookup failed' 69 | 70 | 71 | def test_get_dims4res_2D(): 72 | res_2D = get_dims4res('4x5', just2D=True) 73 | assert (res_2D == (72, 46)), "2D lookup failed" 74 | 75 | 76 | def test_get_dims4res_dict(): 77 | test_dict_2d = get_dims4res(r_dims=True) 78 | assert (test_dict_2d[72, 46, 47] == '4x5'), 'dict lookup failed' 79 | 80 | 81 | def test_get_latlonalt4res_default(): 82 | 83 | # Test default 84 | (lon, lat, alt) = get_latlonalt4res() 85 | print("lon = ", lon) 86 | assert len(lat) == 46, 'The default latitude is wrong' 87 | assert len(lon) == 72, 'The default longitude is wrong' 88 | assert len(alt) == 47, 'The default altidure is wrong' 89 | -------------------------------------------------------------------------------- /AC_tools/Tests/test_funcs4pf.py: -------------------------------------------------------------------------------- 1 | from ..bpch2netCDF import * 2 | import logging 3 | import pytest 4 | logging.basicConfig(filename='test.log', level=logging.DEBUG) 5 | logging.info('Starting GEOSChem test.') 6 | 7 | 8 | def test_get_folder(): 9 | return 10 | 11 | 12 | logging.info('GEOSChem test complete') 13 | -------------------------------------------------------------------------------- /AC_tools/Tests/test_generic.py: -------------------------------------------------------------------------------- 1 | from ..bpch2netCDF import * 2 | import logging 3 | import pytest 4 | logging.basicConfig(filename='test.log', level=logging.DEBUG) 5 | logging.info('Starting GEOSChem test.') 6 | 7 | 8 | def test_get_folder(): 9 | return 10 | 11 | 12 | logging.info('GEOSChem test complete') 13 | -------------------------------------------------------------------------------- /AC_tools/Tests/test_plotting.py: -------------------------------------------------------------------------------- 1 | from ..plotting import * 2 | import logging 3 | import pytest 4 | import os 5 | logging.basicConfig(filename='test.log', level=logging.DEBUG) 6 | 7 | 8 | wd = '../data' 9 | out_dir = 'test_output' 10 | 11 | slow = pytest.mark.skipif( 12 | not pytest.config.getoption("--slow"), 13 | reason="need --slow option to run" 14 | ) 15 | 16 | if not os.path.exists(out_dir): 17 | os.mkdir(out_dir) 18 | 19 | 20 | @pytest.fixture() 21 | def test_data(): 22 | from ..GEOSChem_bpch import get_GC_output 23 | test_data = get_GC_output(wd, species='O3') 24 | return test_data 25 | 26 | 27 | # Leave the default plot in as not slow. 28 | def test_map_plot_default(test_data): 29 | print(test_data.shape) 30 | map_plot(test_data[:, :, 0, 0]) 31 | return 32 | 33 | 34 | @slow 35 | def test_map_plot_transpose(test_data): 36 | map_plot(test_data[:, :, 0, 0].T) 37 | return 38 | 39 | 40 | @slow 41 | def test_map_plot_wd(test_data): 42 | map_plot(test_data[:, :, 0, 0], wd=wd) 43 | return 44 | 45 | 46 | @slow 47 | def test_map_plot_wrong_shape(test_data): 48 | with pytest.raises(AssertionError): 49 | map_plot(test_data[0, 0, :, :]) 50 | return 51 | 52 | 53 | @slow 54 | def test_map_plot_none(): 55 | with pytest.raises(AssertionError): 56 | map_plot(None) 57 | return 58 | 59 | 60 | @slow 61 | def test_save_plot_default(test_data): 62 | map_plot(test_data[:, :, 0, 0]) 63 | save_plot() 64 | filename = "myplot.png" 65 | os.remove(filename) 66 | return 67 | 68 | 69 | @slow 70 | def test_save_plot_with_name(test_data): 71 | map_plot(test_data[:, :, 0, 0]) 72 | save_plot(title="test_1") 73 | filename = "test_1.png" 74 | os.remove(filename) 75 | return 76 | 77 | 78 | @slow 79 | def test_save_plot_in_folder(test_data): 80 | map_plot(test_data[:, :, 0, 0]) 81 | save_plot(title="test_3", location="new_folder") 82 | filename = os.path.join("new_folder", "test_3.png") 83 | os.remove(filename) 84 | os.rmdir("new_folder") 85 | return 86 | 87 | 88 | @slow 89 | def test_save_plot_with_filetypes(test_data): 90 | map_plot(test_data[:, :, 0, 0]) 91 | save_plot(title="test_4", extensions=["pdf", "png"]) 92 | filename = "test_4.png" 93 | filename2 = "test_4.pdf" 94 | os.remove(filename) 95 | os.remove(filename2) 96 | return 97 | 98 | # Test plot has been created, and then remove it" 99 | # filenames = [filename_0, filename_1, filename_2, 100 | # filename_3, filename_4, filename_5] 101 | # for filename in filenames: 102 | # assert os.path.isfile( filename ), "Failed to create {file}".format(file=filename) 103 | # os.remove(filename) 104 | # 105 | # os.rmdir("new_folder") 106 | 107 | 108 | pytest.main() 109 | -------------------------------------------------------------------------------- /AC_tools/Tests/test_variables.py: -------------------------------------------------------------------------------- 1 | from ..bpch2netCDF import * 2 | import logging 3 | import pytest 4 | logging.basicConfig(filename='test.log', level=logging.DEBUG) 5 | logging.info('Starting GEOSChem test.') 6 | 7 | 8 | def test_convert_to_netCDF(): 9 | return 10 | 11 | 12 | def test_get_folder(): 13 | return 14 | 15 | 16 | logging.info('GEOSChem test complete') 17 | -------------------------------------------------------------------------------- /AC_tools/__init__.py: -------------------------------------------------------------------------------- 1 | # compatibility with both python 2 and 3 2 | from __future__ import print_function 3 | import numpy as np 4 | import sys 5 | # AC_tools modules 6 | from . AC_time import * 7 | from . core import * 8 | from . utils import * 9 | from . GEOSChem_nc import * 10 | from . GEOS import * 11 | from . HEMCO import * 12 | from . KPP import * 13 | from . mask import * 14 | from . observations import * 15 | from . planeflight import * 16 | from . plotting import * 17 | # from . SMVGEAR import * 18 | from . variables import * 19 | # include the redundant files for now 20 | from . GEOSChem_bpch import * 21 | from . obsolete.plotting_REDUNDANT import * 22 | from . obsolete.variables_REDUNDANT import * 23 | from . obsolete.misc_REDUNDANT import * 24 | from . obsolete.SMVGEAR_REDUNDANT import * 25 | 26 | 27 | """ 28 | AC_tools is a module of functions started by Tomas, and contributed to by others in the York Atmospheric Modelling group, and hopefully maintained by the Group. 29 | To access the help, from python or ipython, type help(AC_tools) to get general help 30 | To get more detailed help from a module for example, type help(AC_tools.AC_time.py) 31 | If you find missing documentation any thing is unclear in any of this, please request a git push to github. 32 | """ 33 | 34 | # Setup logging for module 35 | import logging 36 | level = logging.DEBUG 37 | FORMAT = "%(levelname)8s - %(message)s @---> %(filename)s:%(lineno)s %(funcName)s()" 38 | logging.basicConfig(filename='AC_tools.log', filemode='w', level=level, 39 | format=FORMAT) 40 | logging.getLogger().setLevel(level) 41 | 42 | # Import submodules here for easier access 43 | -------------------------------------------------------------------------------- /AC_tools/bpch2netCDF.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | """ 3 | This script analysis a folder containing bpch files and outputs the results 4 | in a single netCDF file in the folder. 5 | 6 | This allows for significantly faster and easier input of data, 7 | and more common anaylsis techniques like pandas without extra 8 | post processing. 9 | """ 10 | 11 | import logging 12 | import sys 13 | import glob 14 | import os 15 | import netCDF4 16 | if sys.version_info.major < 3: 17 | try: 18 | import iris 19 | except ImportError: 20 | print('WARNING iris not imported') 21 | # retain back compatibility for PyGChem 22 | try: 23 | if (sys.version_info.major <= 2): 24 | import pygchem 25 | if pygchem.__version__ == '0.2.0': 26 | import pygchem.diagnostics as gdiag 27 | else: 28 | try: 29 | from pygchem import datasets 30 | except: 31 | import pygchem.datafields as datasets 32 | except ImportError: 33 | print('pygchem not imported!') 34 | 35 | 36 | def convert_to_netCDF(folder=None, filename='ctm.nc', bpch_file_list=None, 37 | remake=False, hemco_file_list=None, verbose=True, 38 | bpch_file_type="*.ctm.nc"): 39 | """ 40 | Converts GEOS-Chem outputs to netCDF 41 | 42 | Parameters 43 | ---------- 44 | folder (str): specify the folder you want to use - defaults to cwd 45 | filename (str): specific the netCDF filename you want to use 46 | bpch_file_list (list): list the bpch files you want to use 47 | remake (bool): Overwrite any old files (default=False) 48 | 49 | Notes 50 | ----- 51 | Setup for: 52 | - bpch_to_netCDF 53 | - hemco_to_netCDF 54 | - planeflight_to_netCDF 55 | """ 56 | logging.debug("Convert to netCDF called with folder={},".format(folder) + 57 | " bpch_file_type={}/filename={}".format(bpch_file_type, filename)) 58 | 59 | # try: 60 | bpch_to_netCDF(folder=folder, filename=filename, 61 | bpch_file_list=bpch_file_list, remake=remake, 62 | file_type=bpch_file_type, verbose=verbose) 63 | # except: 64 | # logging.error("Could not convert bpch to netCDF in {_dir}"\ 65 | # .format(_dir=folder)) 66 | # try: 67 | # hemco_to_netCDF( folder, hemco_file_list, remake) 68 | # except: 69 | # logging.warning("Could not convert hemco to netCDF in {_dir}"\ 70 | # .format(_dir=folder)) 71 | 72 | return 73 | 74 | 75 | def hemco_to_netCDF(folder, hemco_file_list=None, remake=False): 76 | """ 77 | Conbine HEMCO diagnostic output files to a single NetCDF file. 78 | 79 | Parameters 80 | ---------- 81 | remake (bool): overwrite existing NetCDF file 82 | 83 | """ 84 | if __package__ is None: 85 | from .bpch2netCDF import get_folder 86 | else: 87 | from .bpch2netCDF import get_folder 88 | folder = get_folder(folder) 89 | output_file = os.path.join(folder, 'hemco.nc') 90 | 91 | # If the hemco netCDF file already exists then quit unless remake=True 92 | if not remake: 93 | if os.path.exists(output_file): 94 | logging.warning(output_file + ' already exists, not remaking') 95 | return 96 | 97 | logging.info("Combining hemco diagnostic files") 98 | 99 | # By default look for any files that look like hemco diagnostic files: 100 | # Look for all hemco netcdf files then remove the restart files. 101 | if hemco_file_list == None: 102 | hemco_files = glob.glob(folder + '/*HEMCO*.nc') 103 | for filename in hemco_files: 104 | if "restart" in filename: 105 | hemco_files.remove(filename) 106 | 107 | else: 108 | file_list = [] 109 | for hemco_file in hemco_file_list: 110 | full_path = os.path.join(folder, hemco_file) 111 | if not os.path.exists(full_path): 112 | logging.error(full_path + " could not be found") 113 | raise IOError( 114 | "{path} could not be found".format(path=full_path)) 115 | file_list.append(full_path) 116 | hemco_files = file_list 117 | 118 | if len(hemco_files) == 0: 119 | logging.warning("No hemco diagnostic files found in {_dir}" 120 | .format(_dir=folder)) 121 | else: 122 | logging.debug("The following hemco files were found:") 123 | logging.debug(str(hemco_files)) 124 | 125 | # Use iris cubes to combine the data into an output file 126 | 127 | hemco_data = iris.load(hemco_files) 128 | # Concatanate the times. 129 | hemco_data = hemco_data.concatenate() 130 | iris.save(hemco_data, output_file) 131 | 132 | logging.info(str(hemco_data)) 133 | logging.info("Hecmo file created at {file}".format(file=output_file)) 134 | return 135 | 136 | 137 | def bpch_to_netCDF(folder=None, filename='ctm.nc', bpch_file_list=None, 138 | remake=False, filetype="*ctm.bpch*", 139 | check4_trac_avg_if_no_ctm_bpch=True, backend='PyGChem', 140 | verbose=False, **kwargs): 141 | """ 142 | Converts GEOS-Chem ctm.bpch output file(s) to NetCDF 143 | 144 | Parameters 145 | ---------- 146 | folder (str): working directory for data files 147 | filename (str): name to give created NetCDF 148 | bpch_file_list (list): list of files to convert 149 | remake (bool): overwrite existing NetCDF file 150 | filetype (str): string with wildcards to match filenames 151 | ( e.g. *ctm.bpch*, trac_avg.*, or *ts*bpch* ) 152 | verbose (bool): print (minor) logging to screen 153 | 154 | Returns 155 | ------- 156 | (None) saves a NetCDF file to disk 157 | """ 158 | import os 159 | # Check if file already exists and warn about remaking 160 | if __package__ is None: 161 | from .bpch2netCDF import get_folder 162 | else: 163 | from .bpch2netCDF import get_folder 164 | folder = get_folder(folder) 165 | output_file = os.path.join(folder, filename) 166 | 167 | # If the netCDf file already exists dont overwrite it without remake=True. 168 | if not remake: 169 | if os.path.exists(output_file): 170 | logging.warning(output_file + ' already exists. Not recreating.') 171 | return 172 | 173 | # Look for files if file list is not provided. 174 | if isinstance(bpch_file_list, type(None)): 175 | logging.debug("Searching for the following bpch filetype: {filetype}" 176 | .format(filetype=filetype)) 177 | bpch_files = glob.glob(folder + '/' + filetype) 178 | # Also check if directory contains *trac_avg* files, if no ctm.bpch 179 | if (len(bpch_files) == 0) and check4_trac_avg_if_no_ctm_bpch: 180 | filetype = '*trac_avg*' 181 | logging.info('WARNING! - now trying filetype={}'.format(filetype)) 182 | bpch_files = glob.glob(folder + '/' + filetype) 183 | # Raise error if no files matching filetype 184 | if len(bpch_files) == 0: 185 | logging.error("No bpch files ({}) found in {}".format(filetype, 186 | folder)) 187 | raise IOError("{} contains no bpch files.".format(folder)) 188 | 189 | # Use the specified files. 190 | else: 191 | file_list = [] 192 | for bpch_file in bpch_file_list: 193 | full_path = folder + '/' + bpch_file 194 | if not os.path.exists(full_path): 195 | logging.error(full_path + " could not be found") 196 | raise IOError("Full path could not be found") 197 | file_list.append(full_path) 198 | bpch_files = file_list 199 | 200 | # Open the bpch files 201 | logging.debug("The following bpch files were found (n={}):" 202 | .format(len(bpch_files))) 203 | logging.debug(str(bpch_files)) 204 | if verbose: 205 | print(("Creating a netCDF from {} file(s).".format(len(bpch_files)) + 206 | " This can take some time...")) 207 | if backend == 'PyGChem': 208 | # Load all the files into memory 209 | bpch_data = datasets.load(bpch_files) 210 | # Save the netCDF file 211 | datasets.save(bpch_data, output_file) 212 | elif backend == 'xbpch': 213 | import xbpch 214 | # Load all the files into memory (as xarray dataset object) 215 | ds = xbpch.open_mfbpchdataset(bpch_files) 216 | # save through xarray dataset object 217 | ds.to_netcdf(output_file, unlimited_dims={'time_counter': True}) 218 | elif backend == 'iris': 219 | # iris.fileformats.netcdf.save(data, output_file) 220 | print('WARNING NetCDF made by iris is non CF-compliant') 221 | elif backend == 'PNC': 222 | import PseudoNetCDF as pnc 223 | import xarray as xr 224 | if len(bpch_files) == 1: 225 | bpch_to_netCDF_via_PNC(filename=filename, 226 | output_file=output_file, bpch_file=bpch_files[0]) 227 | # Individually convert bpch files if more than one file 228 | if len(bpch_files) > 1: 229 | for n_bpch_file, bpch_file in enumerate(bpch_files): 230 | bpch_to_netCDF_via_PNC(filename=filename, 231 | output_file='TEMP_{}_'.format( 232 | n_bpch_file)+filename, 233 | bpch_file=bpch_file) 234 | # - Combine the NetCDF files with xarray 235 | TEMP_ncfiles = glob.glob(folder+'TEMP_*_'+filename) 236 | # Open files with xarray 237 | ds_l = [xr.open_dataset(i) for i in TEMP_ncfiles] 238 | # Make sure the time dimension is unlimitetd 239 | ds = xr.concat(ds_l, dim='time') 240 | # Now save the combined file 241 | ds.to_netcdf(folder+filename, 242 | unlimited_dims={'time_counter': True}) 243 | # Remove the temporary files 244 | for TEMP_ncfile in TEMP_ncfiles: 245 | os.remove(TEMP_ncfile) 246 | 247 | logging.info("A netCDF file has been created with the name {ctm}" 248 | .format(ctm=output_file)) 249 | return 250 | 251 | 252 | def bpch_to_netCDF_via_PNC(format='bpch2', filename='ctm.nc', 253 | output_file=None, bpch_file=None, folder=None): 254 | """ Convert bpch to NetCDF using PNC as backend """ 255 | import PseudoNetCDF as pnc 256 | # Load the file into memory 257 | infile = pnc.pncopen(bpch_file, format=format) 258 | # Kludge - reduce DXYP_DXYP dims online 259 | dxyp = infile.variables['DXYP_DXYP'] 260 | # Surface area should have time dim, if fit does remove it. 261 | if len(dxyp.shape) == 4: 262 | dxyp.dimensions = dxyp.dimensions[1:] 263 | infile.variables['DXYP_DXYP'] = dxyp 264 | # Now write file to disc 265 | # pnc.pncwrite(infile, folder+filename) 266 | pnc.pncwrite(infile, output_file) 267 | 268 | 269 | def get_folder(folder): 270 | """ 271 | Get name of folder that contains ctm.bpch data from command line 272 | """ 273 | if isinstance(folder, type(None)): 274 | # getting the folder location from system argument 275 | if len(sys.argv) <= 1: 276 | logging.warning("No folder location specified for the data") 277 | folder = os.getcwd() 278 | else: 279 | folder = str(sys.argv[1]) 280 | 281 | # Check folder exists 282 | if not os.path.exists(folder): 283 | print("Folder does not exist") 284 | print(folder) 285 | sys.exit() 286 | 287 | return folder 288 | 289 | 290 | if __name__ == "__main__": 291 | convert_to_netCDF() 292 | print("Complete") 293 | -------------------------------------------------------------------------------- /AC_tools/family_variables.yml: -------------------------------------------------------------------------------- 1 | f_var: ['EW-FLX-$', 'NS-FLX-$', 'UP-FLX-$'] 2 | r_t: ['Photolysis', 'HOx', 'Bromine', 'Iodine'] 3 | r_tn: ['Photolysis', 'HOx', 'Bromine', 'Iodine'] 4 | r_tn_lc: ['photolysis', 'HOx', 'bromine', 'iodine'] 5 | r_tn_Cly: ['Photolysis', 'HOx', 'Chlorine', 'Bromine', 'Iodine'] 6 | r_tn_lc_Cly: ['photolysis', 'HOx', 'chlorine', 'bromine', 'iodine'] 7 | fams: ['I2', 'HOI', 'IO', 'I', 'HI+OIO+IONO+INO', 'IONO2', 'IxOy', 'CH3I', 'CH2IX'] 8 | fams_A: ['I2', 'HOI', 'IO', 'I', 'HI+OIO+IONO+INO', 'IONO2', 'IxOy', 'CH3I', 'CH2IX', 'AERI'] 9 | fam_slice: [(0, 1), (1, 2), (2, 3), (3, 4), (4, 8), (8, 9), (9, 12), (12, 13), (13, None)] 10 | fam_slice_A: [(0, 1), (1, 2), (2, 3), (3, 4), (4, 8), (8, 9), (9, 12), (12, 13), (13, 16), (16, None)] 11 | Ox_key: ['POX', 'PO3_14', 'PO3_15', 'LOX'] 12 | POxLOx: ['POX', 'LOX'] 13 | iPOxiLOx: ['POX', 'LOX', 'iPOX', 'iLOX'] 14 | BL_FT_UT: [(0, 6), (6, 26), (26, 38)] 15 | n_order: ['CH2IX', 'CH3I', 'I2', 'HOI', 'IO', 'I', 'IONO2', 'HI+OIO+IONO+INO', 'IxOy'] 16 | n_order_A: ['CH2IX', 'CH3I', 'I2', 'HOI', 'IO', 'I', 'IONO2', 'HI+OIO+IONO+INO', 'IxOy', 'AERI'] 17 | d_dep: ['DRYD-FLX'] 18 | w_dep: ['WETDCV-$', 'WETDLS-$'] 19 | BL_m: ['TURBMC-$'] 20 | f_strat: ['STRT-FL'] 21 | p_l: ['PORL-L=$'] 22 | Cld_flx: ['CV-FLX-$'] 23 | IOrg_RIS: ['CH3IT', 'CH2ICl', 'CH2I2', 'CH2IBr', 'I2', 'HOI', 'I', 'IO', 'OIO', 'HI', 'IONO', 'IONO2'] 24 | I_specs: ['I2', 'HOI', 'IO', 'OIO', 'HI', 'IONO', 'IONO2', 'I2O2', 'I2O3', 'I2O4CH3IT', 'CH2I2', 'I', 'INO'] 25 | Iy: ['I2', 'HOI', 'IO', 'OIO', 'HI', 'INO', 'IONO', 'IONO2', 'I2O2', 'I2O3', 'I2O4', 'I', 'ICl', 'IBr'] 26 | IxOy: ['IO', 'OIO', 'I2O2', 'I2O3', 'I2O4'] 27 | Iy+AERO: ['I2', 'HOI', 'IO', 'OIO', 'HI', 'INO', 'IONO', 'IONO2', 'I2O2', 'I2O3', 'I2O4', 'I', 'ICl', 'IBr', 'AERI', 'ISALA', 'ISALC'] 28 | IOy: ['HOI', 'IO', 'OIO', 'IONO', 'IONO2', 'INO', 'I2O2', 'I2O4', 'I2O3'] 29 | I2Ox: ['I2O2', 'I2O4', 'I2O3'] 30 | Iy_no_i2o4: ['I2', 'HOI', 'IO', 'OIO', 'HI', 'IONO', 'IONO2', 'I2O2', 'I', 'INO', 'I2O3'] 31 | Iy_no_i2o41.1: ['I2', 'HOI', 'IO', 'OIO', 'HI', 'IONO', 'IONO2', 'I2O2', 'I', 'INO'] 32 | Phot_s_Iy: ['CH3IT', 'CH2ICl', 'CH2I2', 'CH2IBr'] 33 | HOI: ['HOI'] 34 | IOx: ['IO', 'I'] 35 | IO: ['IO'] 36 | I: ['I'] 37 | OIO: ['OIO'] 38 | LIOx: ['LIOx'] 39 | PIOx: ['PIOx'] 40 | iodine_all: ['I2', 'HOI', 'IO', 'I', 'HI', 'OIO', 'INO', 'IONO', 'IONO2', 'I2O2', 'I2O4', 'I2O3', 'I2O5', 'CH3IT', 'CH2I2', 'CH2ICl', 'CH2IBr', 'C3H7I', 'C2H5I', 'ICl', 'I2O', 'IBr', 'HIO3'] 41 | iodine_all_A: ['I2', 'HOI', 'IO', 'I', 'HI', 'OIO', 'INO', 'IONO', 'IONO2', 'I2O2', 'I2O4', 'I2O3', 'I2O5', 'CH3IT', 'CH2I2', 'CH2ICl', 'CH2IBr', 'C3H7I', 'C2H5I', 'ICl', 'I2O', 'IBr', 'HIO3', 'AERI'] 42 | diurnal_sp: ['IO', 'I2', 'CH2I2', 'BrO'] 43 | obs_comp: ['CH3IT', 'CH2I2', 'CH2ICl', 'CH2IBr', 'C2H5I', 'C3H7I', 'I2', 'IO'] 44 | emiss_specs: ['CH3IT', 'CH2I2', 'CH2ICl', 'CH2IBr', 'I2', 'HOI'] 45 | w_dep_specs: ['I2', 'HI', 'HOI', 'IONO', 'IONO2', 'I2O2', 'I2O4', 'I2O3', 'AERI'] 46 | d_dep_specsl1.1: ['I2', 'HI', 'HOI', 'IONO', 'IONO2', 'I2O2', 'I2O4', 'AERI'] 47 | d_dep_specs: ['I2df', 'HIdf', 'HOIdf', 'IONOdf', 'IONO2df', 'I2O2df', 'I2O4df', 'I2O3df', 'AERIdf'] 48 | d_dep_specs_3.0: ['I2df', 'HIdf', 'HOIdf', 'IONOdf', 'IONO2df', 'I2O2df', 'I2O4df', 'I2O3df', 'ICldf', 'IBrdf', 'AERIdf'] 49 | Bry_d_dep_specs: ['HBr', 'HOBr', 'BrCl', 'Br2', 'IBr', 'BrNO3'] 50 | Bry_w_dep_specs: ['HBr', 'HOBr', 'BrCl', 'Br2', 'IBr'] 51 | Cly_d_dep_specs: ['HCl', 'HOCl', 'ClNO3', 'BrCl', 'ICl'] 52 | Ox: ['O3', 'NO2', 'NO3', 'PAN', 'PPN', 'MPAN', 'HNO4', 'N2O5', 'HNO3', 'BrO', 'HOBr', 'BrNO2', 'BrNO3', 'MPN', 'ETHLN', 'MVKN', 'MCRHN', 'MCRHNB', 'PROPNN', 'R4N2', 'PRN1', 'PRPN', 'R4N1', 'HONIT', 'MONITS', 'MONITU', 'OLND', 'OLNN', 'IHN1', 'IHN2', 'IHN3', 'IHN4', 'INPB', 'INPD', 'ICN', 'IDN', 'ITCN', 'ITHN', 'ISOPNOO1', 'ISOPNOO2', 'INO2B', 'INO2D', 'INA', 'IDHNBOO', 'IDHNDOO1', 'IDHNDOO2', 'IHPNBOO', 'IHPNDOO', 'ICNOO', 'IDNOO', 'MACRNO2', 'ClO', 'HOCl', 'ClNO2', 'ClNO3', 'Cl2O2', 'OClO', 'O', 'O1D', 'IO', 'HOI', 'IONO', 'IONO2', 'OIO', 'I2O2', 'I2O3', 'I2O4'] 53 | NOx: ['NO', 'NO2'] 54 | HOx: ['OH', 'HO2'] 55 | SOx: ['SO2', 'SO4', 'SO4s', 'SO4D1', 'SO4D2', 'SO4D3', 'SO4D4'] 56 | SO4-all: ['SO4', 'SO4s', 'SO4D1', 'SO4D2', 'SO4D3', 'SO4D4'] 57 | N_specs: ['NO', 'NO2', 'PAN', 'HNO3', 'PMN', 'PPN', 'R4N2', 'N2O5', 'HNO4', 'NH3', 'NH4', 'BrNO2', 'BrNO3', 'MPN', 'ISOPN', 'PROPNN', 'MMN', 'NO3', 'HNO2', 'IONO', 'IONO2', 'INO'] 58 | NOy: ['NO', 'NO2', 'PAN', 'HNO3', 'PMN', 'PPN', 'R4N2', 'N2O5', 'HNO4', 'BrNO2', 'BrNO3', 'MPN', 'ISOPN', 'PROPNN', 'MMN', 'NO3', 'HNO2', 'IONO', 'IONO2', 'INO', 'ClNO2', 'ClNO3'] 59 | NOy-all: ['BrNO3', 'ClNO3', 'ETHLN', 'ETNO3', 'HNO2', 'HNO3', 'HNO4', 'HONIT', 'ICN', 'IDN', 'IHN1', 'IHN2', 'IHN3', 'IHN4', 'INDIOL', 'INPB', 'INPD', 'IONITA', 'IONO', 'IONO2', 'IPRNO3', 'ITCN', 'ITHN', 'MCRHN', 'MCRHNB', 'MENO3', 'MONITA', 'MONITS', 'MONITU', 'MPAN', 'MPN', 'MVKN', 'N2O5', 'NIT', 'NITs', 'NITD1', 'NITD2','NITD3', 'NITD4', 'NO', 'NO2', 'NO3', 'NPRNO3', 'PAN', 'PPN', 'PROPNN', 'R4N2'] 60 | NOy-gas: ['BrNO3', 'ClNO3', 'ETHLN', 'ETNO3', 'HNO2', 'HNO3', 'HNO4', 'HONIT', 'ICN', 'IDN', 'IHN1', 'IHN2', 'IHN3', 'IHN4', 'INDIOL', 'INPB', 'INPD', 'IONITA', 'IONO', 'IONO2', 'IPRNO3', 'ITCN', 'ITHN', 'MCRHN', 'MCRHNB', 'MENO3', 'MONITA', 'MONITS', 'MONITU', 'MPAN', 'MPN', 'MVKN', 'N2O5', 'NO', 'NO2', 'NO3', 'NPRNO3', 'PAN', 'PPN', 'PROPNN', 'R4N2'] 61 | N_specs_no_I: ['NO', 'NO2', 'PAN', 'HNO3', 'PMN', 'PPN', 'R4N2', 'N2O5', 'HNO4', 'NH3', 'NH4', 'BrNO2', 'BrNO3', 'MPN', 'ISOPN', 'PROPNN', 'MMN', 'NO3', 'HNO2'] 62 | Bry: ['Br2', 'BrCl', 'IBr', 'HOBr', 'BrO', 'HBr', 'BrNO2', 'BrNO3', 'Br'] 63 | Cly: ['ClOO', 'OClO', 'ClO', 'Cl2O2', 'ICl', 'Cl2', 'Cl', 'BrCl', 'ClNO3', 'ClNO2', 'HOCl', 'HCl'] 64 | Cl_specs: ['Cl2', 'BrCl', 'ICl', 'HOCl', 'ClO', 'ClOO', 'OClO', 'Cl2O2', 'HCl', 'ClNO2', 'ClNO3', 'Cl', 'CH2Cl2', 'CHCl3', 'CH2ICl', 'CH3Cl'] 65 | Br_specs: ['Br2', 'BrNO3', 'Br', 'HBr', 'CH2IBr', 'CH3Br', 'CH2Br2', 'BrCl', 'BrNO2', 'BrSALC', 'BrSALA', 'HOBr', 'IBr', 'BrO', 'CHBr3'] 66 | Br_emiss: ['CH2Br2', 'CHBr3', 'SSBr2'] 67 | inactive_I: ['BrCl', 'OClO', 'ClO', 'HOCl', 'Cl', 'Cl2', 'I2O5', 'I2O', 'HIO3', 'IBr', 'ICl', 'C2H5I', 'C3H7I'] 68 | active_I: ['I2', 'HOI', 'IO', 'I', 'HI', 'OIO', 'INO', 'IONO', 'IONO2', 'I2O2', 'I2O4', 'I2O3', 'CH3IT', 'CH2I2', 'CH2ICl', 'CH2IBr'] 69 | surface_specs: ['O3', 'NO', 'NO2', 'NO3', 'N2O5', 'IO', 'IONO2'] 70 | run_name_dict: {'run': 'Br-I', 'Br_2ppt': 'Halogens (I+,Br+) + fixed 2 pptv BrO', 'just_I': 'IODINE', 'no_hal': 'NOHAL', 'just_Br': 'BROMINE', 'Br_1ppt': 'Halogens (I+,Br+) + fixed 1 pptv BrO', 'obs': 'Observations'} 71 | latex_run_names: {'I2Ox_half': 'I$_{2}$O$_{X}$ loss ($\\gamma$) /2', 'run': 'Br-I', 'MacDonald_iodide': 'Ocean iodide', 'Sulfate_up': 'Sulfate uptake', 'I2Ox_phot_exp': 'I$_{2}$O$_{X}$ exp. X-sections', 'het_double': 'het. cycle ($\\gamma$) x2', 'I2Ox_phot_x2': 'I$_{2}$O$_{X}$ X-sections x2', 'no_het': 'no het. cycle ', 'I2Ox_double': 'I$_{2}$O$_{X}$ loss ($\\gamma$) x2', 'just_I': 'IODINE', 'BrO1pptv': 'MBL BrO 1 pmol mol$^{-1}$', 'het_half': 'het. cycle ($\\gamma$) /2', 'Just_I_org': 'Just org. I', 'no_I2Ox': 'No I$_{2}$O$_{X}$ Photolysis', 'BrO1pptv_ALL': 'BrO 1 pptv in Trop.', 'BrO2pptv': 'MBL BrO 2 pmol mol$^{-1}$', 'no_hal': 'NOHAL', 'Just_Br': 'BROMINE', 'run.Cl.Br.I.aerosol': 'GEOS-Chem (v10 Cl.Br.I)', 'Iodine simulation.': 'Br-I.', '(I+,Br+)': 'Br-I.', '(I+,Br-)': 'IODINE', '(I-,Br+)': 'BROMINE', '(I-,Br-)': 'NOHAL'} 72 | spec_2_pptv: ['I2', 'HOI', 'IO', 'OIO', 'HI', 'IONO', 'IONO2', 'I2O2', 'CH3IT', 'CH2I2', 'IBr', 'ICl', 'I', 'HIO3', 'I2O', 'INO', 'I2O3', 'I2O4', 'I2O5', 'AERI', 'Cl2', 'Cl', 'HOCl', 'ClO', 'OClO', 'BrCl', 'CH2ICl', 'CH2IBr', 'C3H7I', 'C2H5I', 'Br2', 'Br', 'BrO', 'HOBr', 'HBr', 'BrNO2', 'BrNO3', 'CHBr3', 'CH2Br2', 'CH3Br', 'RCHO', 'MVK', 'MACR', 'PMN', 'PPN', 'R4N2', 'DMS', 'SO4s', 'MSA', 'NITs', 'BCPO', 'DST4', 'ISOPN', 'MOBA', 'PROPNN', 'HAC', 'GLYC', 'MMN', 'RIP', 'IEPOX', 'MAP', 'N2O5', 'NO3'] 73 | spec_2_pptC: ['PRPE', 'ISOP'] 74 | spec_2_ppbv: ['NO', 'DMS', 'RIP', 'IEPOX', 'BCPO', 'DST4', 'HAC', 'GLYC', 'MACR', 'ISOP'] 75 | spec_2_ppbC: ['ALK4'] 76 | red_specs_f_name: ['O3', 'NO2', 'NO', 'NO3', 'N2O5', 'HNO4', 'HNO3', 'HNO2', 'PAN', 'PPN', 'PMN', 'H2O2', 'HO2', 'OH', 'RO2', 'SO2', 'SO4', 'GMAO_TEMP', 'GMAO_UWND', 'GMAO_VWND', 'I2', 'HOI', 'IO', 'I', 'HI', 'OIO', 'INO', 'IONO', 'IONO2', 'I2O2', 'I2O4', 'I2O3', 'CH3IT', 'CH2I2', 'CH2ICl', 'CH2IBr'] 77 | FastJ_lower: [289.0, 298.25, 307.45, 312.45, 320.3, 345.0, 412.45] 78 | FastJ_upper: [298.25, 307.45, 312.45, 320.3, 345.0, 412.45, 850.0] 79 | FastJ_mids: [294, 303, 310, 316, 333, 380, 574] 80 | NIT-all: ['NIT', 'NITs', 'NITD1', 'NITD2', 'NITD3', 'NITD4'] 81 | NITa: ['NIT', 'NITD1', 'NITD2',] 82 | NIT-all: ['NIT', 'NITs', 'NITD1', 'NITD2', 'NITD3', 'NITD4'] 83 | DST-all: ['DST1', 'DST2', 'DST3', 'DST4', ] 84 | DSTAL-all: ['DSTAL1', 'DSTAL2', 'DSTAL3', 'DSTAL4', ] 85 | SAL-all: ['SALA', 'SALC'] 86 | FamilyNames: ['NIT-all', 'Ox', 'NOy-gas','Cly', 'Bry', 'Iy', 'NOx', 'NOy', 'NOz', 'SOx', 'SO4-all', 'DSTAL-all', 'DST-all', 'SAL-all', 'NITa'] 87 | CopyVariable4Family: {'NIT-all': 'NIT', 'DST-all': 'DST1', 'DSTAL-all': 'DSTAL1', 'SAL-all': 'SALA', 'SO4-all': 'SO4', 'SOx': 'SO2', 'NITa': 'NIT'} 88 | -------------------------------------------------------------------------------- /AC_tools/generic.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Redundant function. TODO: remove hardwired links, then delete. 5 | """ 6 | 7 | -------------------------------------------------------------------------------- /AC_tools/observations.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Functions for use with atmospheric observations (e.g. from FAAM BAE146 aircraft) 5 | 6 | Use help() to get details on a particular function. 7 | """ 8 | #from bs4 import BeautifulSoup 9 | #import requests 10 | import re 11 | #import wget 12 | import urllib 13 | import json 14 | try: 15 | import requests 16 | except ImportError: 17 | print("WARNING: failed to import Python module 'requests'") 18 | 19 | def get_FAAM_locations_as_df(flight_ID='C225'): 20 | """ 21 | Retive the FAAM BAE146 position (current of historic) from the html website by flight ID 22 | """ 23 | # What is the root URL for the data? 24 | URL = 'https://www.faam.ac.uk/gluxe/position/query?flight={}'.format( 25 | flight_ID) 26 | # Parse the URL via requests 27 | f = urllib.request.urlopen(URL) 28 | soup = BeautifulSoup(f) 29 | s = soup.get_text() 30 | # Parse the data a JSON string 31 | json_acceptable_string = s.replace("'", "\"") 32 | d = json.loads(json_acceptable_string) 33 | # Return as a dataframe 34 | return pd.DataFrame(d) 35 | 36 | 37 | def sort_sites_by_lat(sites): 38 | """ 39 | Order given list of GAW sties by latitudes 40 | """ 41 | # Get info 42 | vars = [gaw_2_loc(s) for s in sites] # lat, lon, alt, TZ 43 | # Sort by lat, index orginal sites list and return 44 | lats = [i[0] for i in vars] 45 | slats = sorted(lats)[::-1] 46 | return [sites[i] for i in [lats.index(ii) for ii in slats]] -------------------------------------------------------------------------------- /AC_tools/obsolete/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tsherwen/AC_tools/eb308d5f547fbe1dfb131c83e9504f5f5c8f6328/AC_tools/obsolete/__init__.py -------------------------------------------------------------------------------- /AC_tools/obsolete/misc_REDUNDANT.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Redundant misc. functions to be eventually removed from AC_tools. 5 | """ 6 | 7 | import os 8 | import numpy as np 9 | from matplotlib.backends.backend_pdf import PdfPages 10 | import matplotlib.pyplot as plt 11 | from pandas import DataFrame 12 | # time 13 | import time 14 | import datetime as datetime 15 | # math 16 | from math import radians, sin, cos, asin, sqrt, pi, atan2 17 | 18 | 19 | def get_arr_edge_indices(arr, res='4x5', extra_points_point_on_edge=None, 20 | verbose=True, debug=False): 21 | """ 22 | Find indices in a lon, lat (2D) grid, where value does not equal a given 23 | value ( e.g. the edge ) 24 | """ 25 | if verbose: 26 | print(('get_arr_edge_indices for arr of shape: ', arr.shape)) 27 | 28 | # initialise variables 29 | lon_c, lat_c, NIU = get_latlonalt4res(res=res, centre=True) 30 | lon_e, lat_e, NIU = get_latlonalt4res(res=res, centre=False) 31 | lon_diff = lon_e[-5]-lon_e[-6] 32 | lat_diff = lat_e[-5]-lat_e[-6] 33 | nn, n, = 0, 0 34 | last_lat_box = arr[nn, n] 35 | coords = [] 36 | last_lon_box = arr[nn, n] 37 | need_lon_outer_edge, need_lat_outer_edge = False, False 38 | if debug: 39 | print((lon_e, lat_e)) 40 | 41 | # ---- Loop X dimension ( lon ) 42 | for nn, lon_ in enumerate(lon_c): 43 | 44 | # Loop Y dimension ( lat ) and store edges 45 | for n, lat_ in enumerate(lat_c): 46 | 47 | if debug: 48 | print((arr[nn, n], last_lat_box, last_lon_box, 49 | arr[nn, n] == last_lat_box, arr[nn, n] == last_lon_box)) 50 | 51 | if arr[nn, n] != last_lat_box: 52 | 53 | # If 1st lat, selct bottom of box 54 | point_lon = lon_e[nn]+lon_diff/2 55 | if need_lat_outer_edge: 56 | point_lat = lat_e[n+1] 57 | else: 58 | point_lat = lat_e[n] 59 | need_lat_outer_edge = True 60 | need_lat_outer_edge = False 61 | 62 | # Add mid point to cordinates list 63 | if isinstance(extra_points_point_on_edge, type(None)): 64 | mid_point = [point_lon, point_lat] 65 | coords += [mid_point] 66 | 67 | # Add given number of points along edge 68 | else: 69 | coords += [[lon_e[nn]+(lon_diff*i), point_lat] for i in 70 | np.linspace(0, 1, extra_points_point_on_edge, 71 | endpoint=True)] 72 | 73 | # temporally save the previous box's value 74 | last_lat_box = arr[nn, n] 75 | 76 | # ---- Loop Y dimension ( lat ) 77 | for n, lat_ in enumerate(lat_c): 78 | 79 | if debug: 80 | print((arr[nn, n], last_lat_box, last_lon_box, 81 | arr[nn, n] == last_lat_box, arr[nn, n] == last_lon_box)) 82 | # Loop X dimension ( lon ) and store edges 83 | for nn, lon_ in enumerate(lon_c): 84 | 85 | # If change in value at to list 86 | if arr[nn, n] != last_lon_box: 87 | point_lat = lat_e[n]+lat_diff/2 88 | 89 | # Make sure we select the edge lon 90 | if need_lon_outer_edge: 91 | point_lon = lon_e[nn+1] 92 | else: 93 | point_lon = lon_e[nn] 94 | need_lon_outer_edge = True 95 | need_lon_outer_edge = False 96 | 97 | # Add mid point to coordinates list 98 | if isinstance(extra_points_point_on_edge, type(None)): 99 | mid_point = [point_lon, point_lat] 100 | coords += [mid_point] 101 | 102 | # Add given number of points along edge 103 | else: 104 | coords += [[point_lon, lat_e[n]+(lat_diff*i)] for i in 105 | np.linspace(0, 1, extra_points_point_on_edge, 106 | endpoint=True)] 107 | 108 | # temporally save the previous box's value 109 | last_lon_box = arr[nn, n] 110 | 111 | return coords 112 | 113 | 114 | def split_data_by_days(data=None, dates=None, day_list=None, 115 | verbose=False, debug=False): 116 | """ 117 | Takes a list of datetimes and data and returns a list of data and 118 | the bins ( days ) 119 | """ 120 | if verbose: 121 | print('split_data_by_days called') 122 | 123 | # Create DataFrame of Data and dates 124 | df = DataFrame(data, index=dates, columns=['data']) 125 | # Add list of dates ( just year, month, day ) <= this is mappable, update? 126 | df['days'] = [datetime.datetime(*i.timetuple()[:3]) for i in dates] 127 | if debug: 128 | print(df) 129 | 130 | # Get list of unique days 131 | if isinstance(day_list, type(None)): 132 | day_list = sorted(set(df['days'].values)) 133 | # Loop unique days and select data on these days 134 | data4days = [] 135 | for day in day_list: 136 | print((day, df[df['days'] == day])) 137 | data4days += [df['data'][df['days'] == day]] 138 | # Just return the values ( i.e. not pandas array ) 139 | data4days = [i.values.astype(float) for i in data4days] 140 | print([type(i) for i in data4days]) 141 | # print data4days[0] 142 | # sys.exit() 143 | 144 | if debug: 145 | print(('returning data for {} days, with lengths: '.format( 146 | len(day_list)), [len(i) for i in data4days])) 147 | 148 | # Return as list of days (datetimes) + list of data for each day 149 | return data4days, day_list 150 | 151 | 152 | def obs2grid(glon=None, glat=None, galt=None, nest='high res global', 153 | sites=None, debug=False): 154 | """ 155 | values that have a given lat, lon and alt 156 | 157 | Notes 158 | ------- 159 | - Function flagged for removal 160 | """ 161 | if isinstance(glon, type(None)): 162 | glon, glat, galt = get_latlonalt4res(nest=nest, centre=False, 163 | debug=debug) 164 | 165 | # Assume use of known CAST sites... unless others given. 166 | if isinstance(sites, type(None)): 167 | loc_dict = get_loc(rtn_dict=True) 168 | sites = list(loc_dict.keys()) 169 | 170 | # Pull out site location indicies 171 | indices_list = [] 172 | for site in sites: 173 | lon, lat, alt = loc_dict[site] 174 | vars = get_xy(lon, lat, glon, glat) 175 | indices_list += [vars] 176 | return indices_list 177 | -------------------------------------------------------------------------------- /AC_tools/reference_families_for_species.yml: -------------------------------------------------------------------------------- 1 | Cly: Cl 2 | Cl: Cl 3 | LOx: O3 4 | POx: O3 5 | LOX: O3 6 | POX: O3 7 | LIOx: I 8 | PIOx: I 9 | PClOx: Cl 10 | LClOx: Cl 11 | PClOxI: Cl 12 | LClOxI: Cl 13 | PClOxII: Cl 14 | LClOxII: Cl 15 | LCI: Cl 16 | LCII: Cl 17 | PBrOx: Br 18 | LBrOx: Br 19 | Br: Br 20 | Bry: Br 21 | I: I 22 | Iy: I 23 | IxOy: I 24 | SO4: S 25 | SO4-all: S 26 | SO4D1: S 27 | SO4D2: S 28 | SO4D3: S 29 | SO4D4: S 30 | NIT: N 31 | NIT-all: N 32 | NITD1: N 33 | NITD2: N 34 | NITD3: N 35 | NITD4: N 36 | NITs: N 37 | NH4: N 38 | NHO3: N 39 | NHO2: N 40 | ISOP: C 41 | 'NO': N 42 | NO2: N 43 | NOx: N 44 | NOy: N 45 | NOz: N 46 | NOy-gas: N 47 | NOy-all: N 48 | N2O5: N 49 | O3: O3 50 | SO2: S 51 | ACET: C 52 | ALD2: C 53 | DMS: S 54 | HOI: I 55 | I2: I 56 | CH3I: I 57 | CH3IT: I 58 | CH2I2: I 59 | CH2IBr: I 60 | CH2ICl: I 61 | CHBr3: Br 62 | CH2Br2: Br 63 | CH3Br: Br 64 | CH2Cl2: Cl 65 | CHCl3: Cl 66 | CH3Cl: Cl 67 | HCl: Cl 68 | HBr: Br -------------------------------------------------------------------------------- /AC_tools/spatial_extents4masks.csv: -------------------------------------------------------------------------------- 1 | MaskName,lowerlat,higherlat,lowerlon,higherlon,ID,Rectangle?,Polygon? 2 | Tropics,,,,,0,,FALSE 3 | tropics,,,,,0,,FALSE 4 | mid_lats,,,,,1,,FALSE 5 | Mid Lats,,,,,1,,FALSE 6 | Mid lats,,,,,1,,FALSE 7 | south_pole,,,,,2,,FALSE 8 | south pole,,,,,2,,FALSE 9 | north_pole,,,,,3,,FALSE 10 | north pole,,,,,3,,FALSE 11 | ,,,,,4,,FALSE 12 | unmask_all,,,,,4,,FALSE 13 | All,,,,,4,,FALSE 14 | global,,,,,4,,FALSE 15 | Global,,,,,4,,FALSE 16 | Oceanic,,,,,6,,FALSE 17 | Ocean,,,,,6,,FALSE 18 | Ocean Tropics,,,,,13,,FALSE 19 | Oceanic Tropics,,,,,13,,FALSE 20 | Ocn. Trop.,,,,,13,,FALSE 21 | Extratropics,,,,,5,,FALSE 22 | Ex. Tropics,,,,,5,,FALSE 23 | NH,,,,,7,,FALSE 24 | SH,,,,,8,,FALSE 25 | Ice,,,,,10,,FALSE 26 | Land,,,,,11,,FALSE 27 | lat40_2_40,,,,,12,,FALSE 28 | Land Tropics,,,,,14,,FALSE 29 | All Sur.,,,,,15,,FALSE 30 | surface,,,,,15,,FALSE 31 | Ocean Sur.,,,,,16,,FALSE 32 | Land Sur.,,,,,17,,FALSE 33 | Ice Sur.,,,,,18,,FALSE 34 | lat50_2_50,,,,,19,,FALSE 35 | 50S-50N,,,,,19,,FALSE 36 | Ocn. 50S-50N,,,,,20,,FALSE 37 | North Sea,,,,,21,,FALSE 38 | Med. Sea,,,,,22,,FALSE 39 | Mediterranean Sea,,,,,22,,FALSE 40 | Black Sea,,,,,23,,FALSE 41 | Irish Sea,,,,,24,,FALSE 42 | Europe,,,,,25,,FALSE 43 | EU,,,,,25,,FALSE 44 | Land Tropics Sur.,,,,,27,,FALSE 45 | Boreal Land,,,,,28,,FALSE 46 | Alps,43,47,5,15,29,,FALSE 47 | loc,,,,,30,,FALSE 48 | location,,,,,30,,FALSE 49 | France,,,,,31,,FALSE 50 | CONUS,23,60,-125,-54,32,TRUE,FALSE 51 | Cape_Verde_Flying,11.9,21.1,-29.1,-15.9,33,TRUE,FALSE 52 | local_CVAO_area,0,25,-30,-10,34,TRUE,FALSE 53 | inflow_CVAO_area,0,35,-40,-10,35,TRUE,FALSE -------------------------------------------------------------------------------- /AC_tools/species_mass.yml: -------------------------------------------------------------------------------- 1 | ACET: 12.0 2 | ACTA: 60.0 3 | AERI: 127.0 4 | AERI_AVG: 151.0 5 | ALD2: 12.0 6 | ALK4: 12.0 7 | ASOA1: 150.0 8 | ASOA2: 150.0 9 | ASOA3: 150.0 10 | ASOAN: 150.0 11 | ASOG1: 150.0 12 | ASOG2: 150.0 13 | ASOG3: 150.0 14 | BCPI: 12.0 15 | BCPO: 12.0 16 | BENZ: 78.11 17 | Br: 80.0 18 | Br2: 160.0 19 | BrCl: 115.0 20 | BrNO2: 126.0 21 | BrNO3: 142.0 22 | BrO: 96.0 23 | BrSALA: 80.0 24 | BrSALC: 80.0 25 | Bromine: 80.0 26 | Bry: 80.0 27 | C: 12.0 28 | Ca: 40.1 29 | C2O4: 88.0 30 | C2H4: 12.0 31 | C2H5I: 168.0 32 | C2H6: 12.0 33 | C3H7I: 170.0 34 | C3H8: 12.0 35 | CCl4: 152.0 36 | CFC11: 137.0 37 | CFC113: 187.0 38 | CFC114: 187.0 39 | CFC115: 187.0 40 | CFC12: 121.0 41 | CH2Br2: 174.0 42 | CH2Cl2: 85.0 43 | CH2I2: 268.0 44 | CH2IBr: 221.0 45 | CH2ICl: 167.0 46 | CH2O: 30.0 47 | CH2OO: 46.0 48 | CH3Br: 95.0 49 | CH3CCl3: 133.0 50 | CH3Cl: 50.5 51 | CH3I: 142.0 52 | CH3IT: 142.0 53 | CH4: 16.0 54 | CHBr3: 253.0 55 | CHCl3: 119.5 56 | CO: 28.0 57 | Chlorine: 35.0 58 | Cl: 35.0 59 | Cl2: 71.0 60 | Cl2O2: 103.0 61 | ClNO2: 81.5 62 | ClNO3: 97.5 63 | ClO: 51.0 64 | ClOO: 67.5 65 | Cly: 35.0 66 | DHDN: 226.0 67 | DMS: 62.0 68 | DST1: 29.0 69 | DST2: 29.0 70 | DST3: 29.0 71 | DST4: 29.0 72 | DST-all: 29.0 73 | EOH: 46.07 74 | ETHLN: 105.0 75 | GLYC: 60.0 76 | GLYX: 58.0 77 | H1211: 165.0 78 | H1301: 149.0 79 | H2402: 260.0 80 | H2O: 18.0 81 | H2O2: 34.0 82 | HAC: 74.0 83 | HBr: 81.0 84 | HC187: 187.0 85 | HCFC123: 117.0 86 | HCFC141b: 117.0 87 | HCFC142b: 117.0 88 | HCFC22: 86.0 89 | HCOOH: 46.0 90 | HCl: 36.5 91 | HI: 128.0 92 | HIO3: 176.0 93 | HNO2: 47.0 94 | HNO3: 63.0 95 | HNO4: 79.0 96 | HO2: 33.0 97 | HOBr: 97.0 98 | HOCl: 52.5 99 | HOI: 144.0 100 | HONIT: 215.0 101 | HPALD: 116.0 102 | I: 127.0 103 | I2: 254.0 104 | I2O: 270.0 105 | I2O2: 286.0 106 | I2O3: 302.0 107 | I2O4: 318.0 108 | I2O5: 334.0 109 | IBr: 207.0 110 | ICl: 162.0 111 | IEPOX: 118.0 112 | IEPOXA: 118.0 113 | IEPOXB: 118.0 114 | IEPOXD: 118.0 115 | IMAE: 102.0 116 | INDIOL: 102.0 117 | INO: 157.0 118 | IO: 143.0 119 | IO3: 175.0 120 | IONITA: 14.0 121 | IONO: 173.0 122 | IONO2: 189.0 123 | IPMN: 147.0 124 | ISALA: 127.0 125 | ISALC: 127.0 126 | ISN1: 147.0 127 | ISN1OA: 226.0 128 | ISN1OG: 226.0 129 | ISOA1: 150.0 130 | ISOA2: 150.0 131 | ISOA3: 150.0 132 | ISOG1: 150.0 133 | ISOG2: 150.0 134 | ISOG3: 150.0 135 | ISOP: 12.0 136 | ISOPN: 147.0 137 | ISOPNB: 147.0 138 | ISOPND: 147.0 139 | Iodine: 127.0 140 | Iy: 127.0 141 | K: 39.1 142 | LIMO: 136.0 143 | LVOC: 154.0 144 | LVOCOA: 154.0 145 | MACR: 70.0 146 | MACRN: 149.0 147 | MAP: 76.0 148 | MEK: 12.0 149 | MGLY: 72.0 150 | Mg: 24.3 151 | MMN: 149.0 152 | MOBA: 114.0 153 | MONITA: 14.0 154 | MONITS: 215.0 155 | MONITU: 215.0 156 | MP: 48.0 157 | MPN: 93.0 158 | MSA: 96.0 159 | MTPA: 136.0 160 | MTPO: 136.0 161 | MVK: 70.0 162 | MVKN: 149.0 163 | N: 14.0 164 | Na: 23.0 165 | N2O: 44.0 166 | N2O5: 105.0 167 | NH3: 17.0 168 | NH4: 18.0 169 | NIT: 62.0 170 | NIT-all: 62.0 171 | NITa: 62.0 172 | NITD1: 62.0 173 | NITD2: 62.0 174 | NITD3: 62.0 175 | NITD4: 62.0 176 | NITs: 62.0 177 | NO2: 46.0 178 | NO3: 62.0 179 | NOx: 14.0 180 | NOy: 14.0 181 | NOy-all: 14.0 182 | NOy-gas: 14.0 183 | 'NO': 30.0 184 | NPMN: 147.0 185 | O3: 48.0 186 | Ox: 48.0 187 | OCPI: 12.0 188 | OCPO: 12.0 189 | OCS: 60.0 190 | OClO: 67.0 191 | OH: 17.0 192 | OIO: 159.0 193 | PAN: 121.0 194 | PMN: 147.0 195 | PPN: 135.0 196 | PROPNN: 109.0 197 | PRPE: 12.0 198 | R4N2: 119.0 199 | RCHO: 58.0 200 | RIP: 118.0 201 | RIPA: 118.0 202 | RIPB: 118.0 203 | RIPD: 118.0 204 | S: 32.0 205 | SALA: 31.4 206 | SALC: 31.4 207 | SO2: 64.0 208 | SO4: 96.0 209 | SO4-all: 96.0 210 | SO4D1: 96.0 211 | SO4D2: 96.0 212 | SO4D3: 96.0 213 | SO4D4: 96.0 214 | SO4S: 96.0 215 | SO4s: 96.0 216 | SOAGX: 58.0 217 | SOAIE: 118.0 218 | SOAME: 102.0 219 | SOAMG: 72.0 220 | SOAP: 150.0 221 | SOAS: 150.0 222 | SOx: 32.0 223 | SSBr2: 160.0 224 | Sulfate: 32.0 225 | TOLU: 92.14 226 | TSOA0: 150.0 227 | TSOA1: 150.0 228 | TSOA2: 150.0 229 | TSOA3: 150.0 230 | TSOG0: 150.0 231 | TSOG1: 150.0 232 | TSOG2: 150.0 233 | TSOG3: 150.0 234 | VOCs: 12.0 235 | XYLE: 106.16 236 | pFe: 55.85 237 | sulfur: 32.0 -------------------------------------------------------------------------------- /AC_tools/species_units.yml: -------------------------------------------------------------------------------- 1 | OCPI: ppbv 2 | OCPO: ppbv 3 | PPN: ppbv 4 | HIO3: pptv 5 | O3: ppbv 6 | PAN: ppbv 7 | ACET: ppbC 8 | RIP: ppbv 9 | BrNO3: pptv 10 | Br: pptv 11 | HBr: pptv 12 | HAC: ppbv 13 | ALD2: ppbC 14 | HNO3: ppbv 15 | HNO2: ppbv 16 | C2H5I: pptv 17 | HNO4: ppbv 18 | OIO: pptv 19 | MAP: ppbv 20 | PRPE: ppbC 21 | HI: pptv 22 | CH2I2: pptv 23 | IONO2: pptv 24 | NIT: ppbv 25 | NIT-all: ppbv 26 | CH3Br: pptv 27 | C3H7I: pptv 28 | C3H8: ppbC 29 | DMS: ppbv 30 | CH2O: ppbv 31 | CH3IT: pptv 32 | NO2: ppbv 33 | NO3: ppbv 34 | N2O5: ppbv 35 | CHBr3: pptv 36 | DST4: ppbv 37 | DST3: ppbv 38 | DST2: ppbv 39 | DST1: ppbv 40 | HOCl: ppbv 41 | NITs: ppbv 42 | RCHO: ppbv 43 | C2H6: ppbC 44 | MPN: ppbv 45 | INO: pptv 46 | MP: ppbv 47 | CH2Br2: pptv 48 | SALC: ppbv 49 | NH3: ppbv 50 | CH2ICl: pptv 51 | IEPOX: ppbv 52 | ClO: ppbv 53 | 'NO': pptv 54 | SALA: ppbv 55 | MOBA: ppbv 56 | R4N2: ppbv 57 | BrCl: pptv 58 | OClO: ppbv 59 | PMN: ppbv 60 | CO: ppbv 61 | CH2IBr: pptv 62 | ISOP: ppbC 63 | BCPO: ppbv 64 | MVK: ppbv 65 | BrNO2: pptv 66 | IONO: pptv 67 | Cl2: ppbv 68 | HOBr: pptv 69 | PROPNN: ppbv 70 | Cl: ppbv 71 | I2O2: pptv 72 | I2O3: pptv 73 | I2O4: pptv 74 | I2O5: pptv 75 | MEK: ppbC 76 | MMN: ppbv 77 | ISOPN: ppbv 78 | SO4s: ppbv 79 | I2O: pptv 80 | ALK4: ppbC 81 | MSA: ppbv 82 | I2: pptv 83 | Br2: pptv 84 | IBr: pptv 85 | MACR: ppbv 86 | I: pptv 87 | AERI: pptv 88 | HOI: pptv 89 | BrO: pptv 90 | NH4: ppbv 91 | SO2: ppbv 92 | SO4: ppbv 93 | SO4-all: ppbv 94 | IO: pptv 95 | H2O2: ppbv 96 | BCPI: ppbv 97 | ICl: pptv 98 | GLYC: ppbv 99 | ISALA: pptv 100 | ISALC: pptv 101 | CH3I: pptv 102 | Iy: pptv 103 | PSURF: hPa 104 | OH: pptv 105 | HO2: pptv 106 | MO2: pptv 107 | NOy: ppbv 108 | EOH: ppbv 109 | CH4: ppbv 110 | TSKIN: K 111 | GMAO_TEMP: K 112 | T: $^{\circ}$C 113 | RO2: pptv 114 | GMAO_VWND: m/s 115 | GMAO_UWND: m/s 116 | U10M: m/s 117 | V10M: m/s 118 | U: m/s 119 | V: m/s 120 | PRESS: hPa 121 | CH2OO: pptv 122 | Bry: ppbv 123 | NOx: ppbv 124 | HOx: HOx 125 | VOC: ppbC 126 | TNO3: ppbv 127 | GLYX: pptv 128 | GMAO_SURF: surface area 129 | GMAO_ABSH: frac. 130 | GMAO_PSFC: hPa 131 | acetylene: pptv 132 | propene: pptv 133 | Napthalene: pptv 134 | Styrene: pptv 135 | 1,3-butadiene: pptv 136 | 1,2-butadiene: pptv 137 | iso-butene: pptv 138 | m+p-xylene: pptv 139 | 1-butene: pptv 140 | t-2 pentene: pptv 141 | cis-2-butene: pptv 142 | 1 pentene: pptv 143 | Trans-2-butene: pptv 144 | o-xylene: pptv 145 | iso-pentane: pptv 146 | n-hexane: pptv 147 | iso-butane: pptv 148 | Nonane, 2-methyl-: pptv 149 | Butane, 2,2,3-trimethyl-: pptv 150 | Dodecane: pptv 151 | Pentane, 2,2,4-trimethyl-: pptv 152 | 2,3methylpentane: pptv 153 | Nonane: pptv 154 | cyclopentane: pptv 155 | n- heptane: pptv 156 | n-butane: pptv 157 | n-pentane: pptv 158 | Undecane: pptv 159 | Decane: pptv 160 | Octane: pptv 161 | n-octane: pptv 162 | ClNO2: pptv 163 | ClNO3: pptv 164 | HCl: pptv 165 | ClOO: pptv 166 | Cl2O2: pptv 167 | CH2Cl2: pptv 168 | CHCl3: pptv 169 | CH3Cl: pptv 170 | BrSALA: pptv 171 | BrSALC: pptv 172 | Cly: pptv 173 | PD421: molec cm$^{-3}$ s$^{-1}$ 174 | LON: $^{\circ}$E 175 | LAT: $^{\circ}$N 176 | HOCl+Cl2: pptv 177 | HOBr+Br2: pptv 178 | HNO3/NOx: pptv 179 | HNO3+NIT: pptv 180 | HNO3+NO3: pptv 181 | NIT/NOx: pptv 182 | HNO3/NIT: pptv 183 | Cl-: pptv 184 | pFe: pptv 185 | PM10: $\mu$g m$^{-3}$ 186 | PM2.5: $\mu$g m$^{-3}$ 187 | PM2.5(dust): $\mu$g m$^{-3}$ 188 | PM2.5(SO4): $\mu$g m$^{-3}$ 189 | PM2.5(NIT): $\mu$g m$^{-3}$ 190 | PM2.5(SOA): $\mu$g m$^{-3}$ 191 | PM2.5(SSA): $\mu$g m$^{-3}$ 192 | PM2.5(BC): $\mu$g m$^{-3}$ 193 | PM2.5(OC): $\mu$g m$^{-3}$ 194 | PM: $\mu$g m$^{-3}$ -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Tomás Sherwen 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | AC_tools: Atmospheric Chemistry (AC) tools 2 | ====================================== 3 | .. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.4789901.svg 4 | :target: https://doi.org/10.5281/zenodo.4789901 5 | 6 | **Atmospheric Chemistry Tools (AC_Tools)** contains functions and scripts used for 7 | working with atmospheric model output and observational data. 8 | Many functions are included for working with global and regional 9 | chemical transport model (CTM) ouput from the GEOS-Chem model. 10 | 11 | This package started as a just collection of scripts that were 12 | found to be useful for work in atmospheric chemistry and now 13 | simply aims to contain functionality outside the remit of the 14 | more specialised community packages (e.g PyGChem_, xbpch_, and 15 | gcpy_) and use the existing Python stack (e.g. dask_, xarray_, 16 | pandas_). 17 | `Please raise any questions/comments or bugs as issues here on GitHub `_ 18 | and `pull requests are welcome! `_ 19 | 20 | Installation 21 | ------------ 22 | 23 | **AC_Tools** is currently only installable from source. To do this, you 24 | can either install directly via pip (recommended and this includes any dependencies):: 25 | 26 | 27 | $ pip install git+https://github.com/tsherwen/AC_tools.git 28 | 29 | 30 | or (not recommended), clone the source directory and manually install:: 31 | 32 | 33 | $ git clone https://github.com/tsherwen/AC_tools.git 34 | $ cd AC_tools 35 | $ python setup.py install 36 | 37 | 38 | If you specifically wish to use the legacy "bpch to NetCDF" (`bpch2netCDF`_) capability, you will need to run AC_tools in a Python2 environment due to use of an `iris`_ backend via `PyGChem`_. In this specific case, please install using the later method and ensure that you have `iris`_ (version 1.13.0) installed. You will also need to install `PyGChem`_ (version 0.3.0) by the same route. 39 | 40 | 41 | Quick Start 42 | ----------- 43 | 44 | Functions within **AC_Tools** can be used for various tasks for handling model output and observations. 45 | 46 | An example would be importing NetCDF files or converting ctm.bpch files from a directory of GEOS-Chem_ output (with ``tracerinfo.dat`` and ``diaginfo.dat`` files). Or using GEOS-Chem_ NetCDF output to make a quick plot of surface ozone. 47 | 48 | If using within a python3 environment and GEOS-Chem 49 | 50 | .. code:: python 51 | 52 | import AC_tools as AC 53 | folder = '' 54 | # Get the GEOS-Chem NetCDF output as a xarray dataset object 55 | # NOTE: this is just a wrapper of get_GEOSChem_files_as_ds, which can retrieve GEOS-Chem NetCDFs as a dataset 56 | ds = AC.GetSpeciesConcDataset(wd=folder) 57 | # Average dataset over time 58 | ds = ds.mean(dim='time') 59 | # Select the surface level 60 | ds = ds.sel( lev=ds.lev[0] ) 61 | # Select ozone and do plot basic plot 62 | spec = 'O3' 63 | #ds['SpeciesConc_'+spec].plot() # very simple plot 64 | AC.quick_map_plot( ds, var2plot='SpeciesConc_'+spec) # basic lat-lon plot 65 | plt.show() 66 | # Get global average surface CO 67 | spec = 'CO' 68 | ratio = (ds['SpeciesConc_'+spec] * ds['AREA']).sum() / ds['AREA'].sum() 69 | ratio = float(ratio.values) 70 | # Make a formatted string and then print using this to screen 71 | prt_str = "The global average surface mixing ratio of {spec} (ppbv) is: {ratio}" 72 | print(prt_str.format(spec=spec, ratio=ratio*1E9)) 73 | 74 | 75 | If using within a python2 environment, the below example is a way of accessing GEOS-Chem data. The data is converted from bpch to NetCDF by default via an iris backend through PyGChem (using bpch2netCDF.py). 76 | 77 | .. code:: python 78 | 79 | import AC_tools as AC 80 | folder = '' 81 | # Get the atmospheric ozone burden in Gg O3 as a np.array 82 | array = AC.get_O3_burden_bpch(folder) 83 | print( "The ozone burden is: {burden}".format(burden=array.sum())) 84 | # Get surface area for resolution 85 | s_area = get_surface_area(res)[..., 0] # m2 land map 86 | # Get global average surface CO 87 | spec = 'CO' 88 | array = AC.get_GC_output(wd=folder, vars=['IJ_AVG_S__{}'.format(spec)]) 89 | ratio = AC.get_2D_arr_weighted_by_X(array, res='4x5', s_area=s_area) 90 | # Make a formatted string and then print using this to screen 91 | prt_str = "The global average surface mixing ratio of {spec} (ppbv) is: {ratio}" 92 | print( prt_str.format(spec=spec, ratio=ratio*1E9)) 93 | 94 | 95 | Usage 96 | ------------ 97 | 98 | Example analysis code for using AC_tools is available in the 99 | scripts folder. 100 | 101 | For more information, please visit the AC_tools_wiki_. 102 | 103 | 104 | License 105 | ------- 106 | 107 | Copyright (c) 2015 `Tomas Sherwen`_ 108 | 109 | This work is licensed under a permissive MIT License. 110 | 111 | Contact 112 | ------- 113 | 114 | `Tomas Sherwen`_ - tomas.sherwen@york.ac.uk 115 | 116 | .. _`Tomas Sherwen`: http://github.com/tsherwen 117 | .. _conda: http://conda.pydata.org/docs/ 118 | .. _dask: http://dask.pydata.org/ 119 | .. _licensed: LICENSE 120 | .. _GEOS-Chem: http://www.geos-chem.org 121 | .. _xarray: http://xarray.pydata.org/ 122 | .. _pandas: https://pandas.pydata.org/ 123 | .. _gcpy: https://github.com/geoschem/gcpy 124 | .. _PyGChem: https://github.com/benbovy/PyGChem 125 | .. _xbpch: https://github.com/darothen/xbpch 126 | .. _iris: https://scitools.org.uk/iris/docs/latest/ 127 | .. _bpch2netCDF: https://github.com/tsherwen/AC_tools/blob/master/Scripts/bpch2netCDF.py 128 | .. _AC_tools_wiki: https://github.com/tsherwen/AC_tools/wiki 129 | -------------------------------------------------------------------------------- /doc/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SOURCEDIR = . 8 | BUILDDIR = _build 9 | 10 | # Put it first so that "make" without argument is like "make help". 11 | help: 12 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 13 | 14 | .PHONY: help Makefile 15 | 16 | # Catch-all target: route all unknown targets to Sphinx using the new 17 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 18 | %: Makefile 19 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -------------------------------------------------------------------------------- /doc/_build/doctrees/environment.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tsherwen/AC_tools/eb308d5f547fbe1dfb131c83e9504f5f5c8f6328/doc/_build/doctrees/environment.pickle -------------------------------------------------------------------------------- /doc/_build/doctrees/index.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tsherwen/AC_tools/eb308d5f547fbe1dfb131c83e9504f5f5c8f6328/doc/_build/doctrees/index.doctree -------------------------------------------------------------------------------- /doc/_build/html/.buildinfo: -------------------------------------------------------------------------------- 1 | # Sphinx build info version 1 2 | # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. 3 | config: 1dff76417c1fbad2542694bb07979bbe 4 | tags: 645f666f9bcd5a90fca523b33c5a78b7 5 | -------------------------------------------------------------------------------- /doc/_build/html/.nojekyll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tsherwen/AC_tools/eb308d5f547fbe1dfb131c83e9504f5f5c8f6328/doc/_build/html/.nojekyll -------------------------------------------------------------------------------- /doc/_build/html/_static/ajax-loader.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tsherwen/AC_tools/eb308d5f547fbe1dfb131c83e9504f5f5c8f6328/doc/_build/html/_static/ajax-loader.gif -------------------------------------------------------------------------------- /doc/_build/html/_static/basic.css: -------------------------------------------------------------------------------- 1 | /* 2 | * basic.css 3 | * ~~~~~~~~~ 4 | * 5 | * Sphinx stylesheet -- basic theme. 6 | * 7 | * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. 8 | * :license: BSD, see LICENSE for details. 9 | * 10 | */ 11 | 12 | /* -- main layout ----------------------------------------------------------- */ 13 | 14 | div.clearer { 15 | clear: both; 16 | } 17 | 18 | /* -- relbar ---------------------------------------------------------------- */ 19 | 20 | div.related { 21 | width: 100%; 22 | font-size: 90%; 23 | } 24 | 25 | div.related h3 { 26 | display: none; 27 | } 28 | 29 | div.related ul { 30 | margin: 0; 31 | padding: 0 0 0 10px; 32 | list-style: none; 33 | } 34 | 35 | div.related li { 36 | display: inline; 37 | } 38 | 39 | div.related li.right { 40 | float: right; 41 | margin-right: 5px; 42 | } 43 | 44 | /* -- sidebar --------------------------------------------------------------- */ 45 | 46 | div.sphinxsidebarwrapper { 47 | padding: 10px 5px 0 10px; 48 | } 49 | 50 | div.sphinxsidebar { 51 | float: left; 52 | width: 230px; 53 | margin-left: -100%; 54 | font-size: 90%; 55 | word-wrap: break-word; 56 | overflow-wrap : break-word; 57 | } 58 | 59 | div.sphinxsidebar ul { 60 | list-style: none; 61 | } 62 | 63 | div.sphinxsidebar ul ul, 64 | div.sphinxsidebar ul.want-points { 65 | margin-left: 20px; 66 | list-style: square; 67 | } 68 | 69 | div.sphinxsidebar ul ul { 70 | margin-top: 0; 71 | margin-bottom: 0; 72 | } 73 | 74 | div.sphinxsidebar form { 75 | margin-top: 10px; 76 | } 77 | 78 | div.sphinxsidebar input { 79 | border: 1px solid #98dbcc; 80 | font-family: sans-serif; 81 | font-size: 1em; 82 | } 83 | 84 | div.sphinxsidebar #searchbox form.search { 85 | overflow: hidden; 86 | } 87 | 88 | div.sphinxsidebar #searchbox input[type="text"] { 89 | float: left; 90 | width: 80%; 91 | padding: 0.25em; 92 | box-sizing: border-box; 93 | } 94 | 95 | div.sphinxsidebar #searchbox input[type="submit"] { 96 | float: left; 97 | width: 20%; 98 | border-left: none; 99 | padding: 0.25em; 100 | box-sizing: border-box; 101 | } 102 | 103 | 104 | img { 105 | border: 0; 106 | max-width: 100%; 107 | } 108 | 109 | /* -- search page ----------------------------------------------------------- */ 110 | 111 | ul.search { 112 | margin: 10px 0 0 20px; 113 | padding: 0; 114 | } 115 | 116 | ul.search li { 117 | padding: 5px 0 5px 20px; 118 | background-image: url(file.png); 119 | background-repeat: no-repeat; 120 | background-position: 0 7px; 121 | } 122 | 123 | ul.search li a { 124 | font-weight: bold; 125 | } 126 | 127 | ul.search li div.context { 128 | color: #888; 129 | margin: 2px 0 0 30px; 130 | text-align: left; 131 | } 132 | 133 | ul.keywordmatches li.goodmatch a { 134 | font-weight: bold; 135 | } 136 | 137 | /* -- index page ------------------------------------------------------------ */ 138 | 139 | table.contentstable { 140 | width: 90%; 141 | margin-left: auto; 142 | margin-right: auto; 143 | } 144 | 145 | table.contentstable p.biglink { 146 | line-height: 150%; 147 | } 148 | 149 | a.biglink { 150 | font-size: 1.3em; 151 | } 152 | 153 | span.linkdescr { 154 | font-style: italic; 155 | padding-top: 5px; 156 | font-size: 90%; 157 | } 158 | 159 | /* -- general index --------------------------------------------------------- */ 160 | 161 | table.indextable { 162 | width: 100%; 163 | } 164 | 165 | table.indextable td { 166 | text-align: left; 167 | vertical-align: top; 168 | } 169 | 170 | table.indextable ul { 171 | margin-top: 0; 172 | margin-bottom: 0; 173 | list-style-type: none; 174 | } 175 | 176 | table.indextable > tbody > tr > td > ul { 177 | padding-left: 0em; 178 | } 179 | 180 | table.indextable tr.pcap { 181 | height: 10px; 182 | } 183 | 184 | table.indextable tr.cap { 185 | margin-top: 10px; 186 | background-color: #f2f2f2; 187 | } 188 | 189 | img.toggler { 190 | margin-right: 3px; 191 | margin-top: 3px; 192 | cursor: pointer; 193 | } 194 | 195 | div.modindex-jumpbox { 196 | border-top: 1px solid #ddd; 197 | border-bottom: 1px solid #ddd; 198 | margin: 1em 0 1em 0; 199 | padding: 0.4em; 200 | } 201 | 202 | div.genindex-jumpbox { 203 | border-top: 1px solid #ddd; 204 | border-bottom: 1px solid #ddd; 205 | margin: 1em 0 1em 0; 206 | padding: 0.4em; 207 | } 208 | 209 | /* -- domain module index --------------------------------------------------- */ 210 | 211 | table.modindextable td { 212 | padding: 2px; 213 | border-collapse: collapse; 214 | } 215 | 216 | /* -- general body styles --------------------------------------------------- */ 217 | 218 | div.body { 219 | min-width: 450px; 220 | max-width: 800px; 221 | } 222 | 223 | div.body p, div.body dd, div.body li, div.body blockquote { 224 | -moz-hyphens: auto; 225 | -ms-hyphens: auto; 226 | -webkit-hyphens: auto; 227 | hyphens: auto; 228 | } 229 | 230 | a.headerlink { 231 | visibility: hidden; 232 | } 233 | 234 | h1:hover > a.headerlink, 235 | h2:hover > a.headerlink, 236 | h3:hover > a.headerlink, 237 | h4:hover > a.headerlink, 238 | h5:hover > a.headerlink, 239 | h6:hover > a.headerlink, 240 | dt:hover > a.headerlink, 241 | caption:hover > a.headerlink, 242 | p.caption:hover > a.headerlink, 243 | div.code-block-caption:hover > a.headerlink { 244 | visibility: visible; 245 | } 246 | 247 | div.body p.caption { 248 | text-align: inherit; 249 | } 250 | 251 | div.body td { 252 | text-align: left; 253 | } 254 | 255 | .first { 256 | margin-top: 0 !important; 257 | } 258 | 259 | p.rubric { 260 | margin-top: 30px; 261 | font-weight: bold; 262 | } 263 | 264 | img.align-left, .figure.align-left, object.align-left { 265 | clear: left; 266 | float: left; 267 | margin-right: 1em; 268 | } 269 | 270 | img.align-right, .figure.align-right, object.align-right { 271 | clear: right; 272 | float: right; 273 | margin-left: 1em; 274 | } 275 | 276 | img.align-center, .figure.align-center, object.align-center { 277 | display: block; 278 | margin-left: auto; 279 | margin-right: auto; 280 | } 281 | 282 | .align-left { 283 | text-align: left; 284 | } 285 | 286 | .align-center { 287 | text-align: center; 288 | } 289 | 290 | .align-right { 291 | text-align: right; 292 | } 293 | 294 | /* -- sidebars -------------------------------------------------------------- */ 295 | 296 | div.sidebar { 297 | margin: 0 0 0.5em 1em; 298 | border: 1px solid #ddb; 299 | padding: 7px 7px 0 7px; 300 | background-color: #ffe; 301 | width: 40%; 302 | float: right; 303 | } 304 | 305 | p.sidebar-title { 306 | font-weight: bold; 307 | } 308 | 309 | /* -- topics ---------------------------------------------------------------- */ 310 | 311 | div.topic { 312 | border: 1px solid #ccc; 313 | padding: 7px 7px 0 7px; 314 | margin: 10px 0 10px 0; 315 | } 316 | 317 | p.topic-title { 318 | font-size: 1.1em; 319 | font-weight: bold; 320 | margin-top: 10px; 321 | } 322 | 323 | /* -- admonitions ----------------------------------------------------------- */ 324 | 325 | div.admonition { 326 | margin-top: 10px; 327 | margin-bottom: 10px; 328 | padding: 7px; 329 | } 330 | 331 | div.admonition dt { 332 | font-weight: bold; 333 | } 334 | 335 | div.admonition dl { 336 | margin-bottom: 0; 337 | } 338 | 339 | p.admonition-title { 340 | margin: 0px 10px 5px 0px; 341 | font-weight: bold; 342 | } 343 | 344 | div.body p.centered { 345 | text-align: center; 346 | margin-top: 25px; 347 | } 348 | 349 | /* -- tables ---------------------------------------------------------------- */ 350 | 351 | table.docutils { 352 | border: 0; 353 | border-collapse: collapse; 354 | } 355 | 356 | table.align-center { 357 | margin-left: auto; 358 | margin-right: auto; 359 | } 360 | 361 | table caption span.caption-number { 362 | font-style: italic; 363 | } 364 | 365 | table caption span.caption-text { 366 | } 367 | 368 | table.docutils td, table.docutils th { 369 | padding: 1px 8px 1px 5px; 370 | border-top: 0; 371 | border-left: 0; 372 | border-right: 0; 373 | border-bottom: 1px solid #aaa; 374 | } 375 | 376 | table.footnote td, table.footnote th { 377 | border: 0 !important; 378 | } 379 | 380 | th { 381 | text-align: left; 382 | padding-right: 5px; 383 | } 384 | 385 | table.citation { 386 | border-left: solid 1px gray; 387 | margin-left: 1px; 388 | } 389 | 390 | table.citation td { 391 | border-bottom: none; 392 | } 393 | 394 | /* -- figures --------------------------------------------------------------- */ 395 | 396 | div.figure { 397 | margin: 0.5em; 398 | padding: 0.5em; 399 | } 400 | 401 | div.figure p.caption { 402 | padding: 0.3em; 403 | } 404 | 405 | div.figure p.caption span.caption-number { 406 | font-style: italic; 407 | } 408 | 409 | div.figure p.caption span.caption-text { 410 | } 411 | 412 | /* -- field list styles ----------------------------------------------------- */ 413 | 414 | table.field-list td, table.field-list th { 415 | border: 0 !important; 416 | } 417 | 418 | .field-list ul { 419 | margin: 0; 420 | padding-left: 1em; 421 | } 422 | 423 | .field-list p { 424 | margin: 0; 425 | } 426 | 427 | .field-name { 428 | -moz-hyphens: manual; 429 | -ms-hyphens: manual; 430 | -webkit-hyphens: manual; 431 | hyphens: manual; 432 | } 433 | 434 | /* -- hlist styles ---------------------------------------------------------- */ 435 | 436 | table.hlist td { 437 | vertical-align: top; 438 | } 439 | 440 | 441 | /* -- other body styles ----------------------------------------------------- */ 442 | 443 | ol.arabic { 444 | list-style: decimal; 445 | } 446 | 447 | ol.loweralpha { 448 | list-style: lower-alpha; 449 | } 450 | 451 | ol.upperalpha { 452 | list-style: upper-alpha; 453 | } 454 | 455 | ol.lowerroman { 456 | list-style: lower-roman; 457 | } 458 | 459 | ol.upperroman { 460 | list-style: upper-roman; 461 | } 462 | 463 | dl { 464 | margin-bottom: 15px; 465 | } 466 | 467 | dd p { 468 | margin-top: 0px; 469 | } 470 | 471 | dd ul, dd table { 472 | margin-bottom: 10px; 473 | } 474 | 475 | dd { 476 | margin-top: 3px; 477 | margin-bottom: 10px; 478 | margin-left: 30px; 479 | } 480 | 481 | dt:target, span.highlighted { 482 | background-color: #fbe54e; 483 | } 484 | 485 | rect.highlighted { 486 | fill: #fbe54e; 487 | } 488 | 489 | dl.glossary dt { 490 | font-weight: bold; 491 | font-size: 1.1em; 492 | } 493 | 494 | .optional { 495 | font-size: 1.3em; 496 | } 497 | 498 | .sig-paren { 499 | font-size: larger; 500 | } 501 | 502 | .versionmodified { 503 | font-style: italic; 504 | } 505 | 506 | .system-message { 507 | background-color: #fda; 508 | padding: 5px; 509 | border: 3px solid red; 510 | } 511 | 512 | .footnote:target { 513 | background-color: #ffa; 514 | } 515 | 516 | .line-block { 517 | display: block; 518 | margin-top: 1em; 519 | margin-bottom: 1em; 520 | } 521 | 522 | .line-block .line-block { 523 | margin-top: 0; 524 | margin-bottom: 0; 525 | margin-left: 1.5em; 526 | } 527 | 528 | .guilabel, .menuselection { 529 | font-family: sans-serif; 530 | } 531 | 532 | .accelerator { 533 | text-decoration: underline; 534 | } 535 | 536 | .classifier { 537 | font-style: oblique; 538 | } 539 | 540 | abbr, acronym { 541 | border-bottom: dotted 1px; 542 | cursor: help; 543 | } 544 | 545 | /* -- code displays --------------------------------------------------------- */ 546 | 547 | pre { 548 | overflow: auto; 549 | overflow-y: hidden; /* fixes display issues on Chrome browsers */ 550 | } 551 | 552 | span.pre { 553 | -moz-hyphens: none; 554 | -ms-hyphens: none; 555 | -webkit-hyphens: none; 556 | hyphens: none; 557 | } 558 | 559 | td.linenos pre { 560 | padding: 5px 0px; 561 | border: 0; 562 | background-color: transparent; 563 | color: #aaa; 564 | } 565 | 566 | table.highlighttable { 567 | margin-left: 0.5em; 568 | } 569 | 570 | table.highlighttable td { 571 | padding: 0 0.5em 0 0.5em; 572 | } 573 | 574 | div.code-block-caption { 575 | padding: 2px 5px; 576 | font-size: small; 577 | } 578 | 579 | div.code-block-caption code { 580 | background-color: transparent; 581 | } 582 | 583 | div.code-block-caption + div > div.highlight > pre { 584 | margin-top: 0; 585 | } 586 | 587 | div.code-block-caption span.caption-number { 588 | padding: 0.1em 0.3em; 589 | font-style: italic; 590 | } 591 | 592 | div.code-block-caption span.caption-text { 593 | } 594 | 595 | div.literal-block-wrapper { 596 | padding: 1em 1em 0; 597 | } 598 | 599 | div.literal-block-wrapper div.highlight { 600 | margin: 0; 601 | } 602 | 603 | code.descname { 604 | background-color: transparent; 605 | font-weight: bold; 606 | font-size: 1.2em; 607 | } 608 | 609 | code.descclassname { 610 | background-color: transparent; 611 | } 612 | 613 | code.xref, a code { 614 | background-color: transparent; 615 | font-weight: bold; 616 | } 617 | 618 | h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { 619 | background-color: transparent; 620 | } 621 | 622 | .viewcode-link { 623 | float: right; 624 | } 625 | 626 | .viewcode-back { 627 | float: right; 628 | font-family: sans-serif; 629 | } 630 | 631 | div.viewcode-block:target { 632 | margin: -1px -10px; 633 | padding: 0 10px; 634 | } 635 | 636 | /* -- math display ---------------------------------------------------------- */ 637 | 638 | img.math { 639 | vertical-align: middle; 640 | } 641 | 642 | div.body div.math p { 643 | text-align: center; 644 | } 645 | 646 | span.eqno { 647 | float: right; 648 | } 649 | 650 | span.eqno a.headerlink { 651 | position: relative; 652 | left: 0px; 653 | z-index: 1; 654 | } 655 | 656 | div.math:hover a.headerlink { 657 | visibility: visible; 658 | } 659 | 660 | /* -- printout stylesheet --------------------------------------------------- */ 661 | 662 | @media print { 663 | div.document, 664 | div.documentwrapper, 665 | div.bodywrapper { 666 | margin: 0 !important; 667 | width: 100%; 668 | } 669 | 670 | div.sphinxsidebar, 671 | div.related, 672 | div.footer, 673 | #top-link { 674 | display: none; 675 | } 676 | } -------------------------------------------------------------------------------- /doc/_build/html/_static/doctools.js: -------------------------------------------------------------------------------- 1 | /* 2 | * doctools.js 3 | * ~~~~~~~~~~~ 4 | * 5 | * Sphinx JavaScript utilities for all documentation. 6 | * 7 | * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. 8 | * :license: BSD, see LICENSE for details. 9 | * 10 | */ 11 | 12 | /** 13 | * select a different prefix for underscore 14 | */ 15 | $u = _.noConflict(); 16 | 17 | /** 18 | * make the code below compatible with browsers without 19 | * an installed firebug like debugger 20 | if (!window.console || !console.firebug) { 21 | var names = ["log", "debug", "info", "warn", "error", "assert", "dir", 22 | "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", 23 | "profile", "profileEnd"]; 24 | window.console = {}; 25 | for (var i = 0; i < names.length; ++i) 26 | window.console[names[i]] = function() {}; 27 | } 28 | */ 29 | 30 | /** 31 | * small helper function to urldecode strings 32 | */ 33 | jQuery.urldecode = function(x) { 34 | return decodeURIComponent(x).replace(/\+/g, ' '); 35 | }; 36 | 37 | /** 38 | * small helper function to urlencode strings 39 | */ 40 | jQuery.urlencode = encodeURIComponent; 41 | 42 | /** 43 | * This function returns the parsed url parameters of the 44 | * current request. Multiple values per key are supported, 45 | * it will always return arrays of strings for the value parts. 46 | */ 47 | jQuery.getQueryParameters = function(s) { 48 | if (typeof s === 'undefined') 49 | s = document.location.search; 50 | var parts = s.substr(s.indexOf('?') + 1).split('&'); 51 | var result = {}; 52 | for (var i = 0; i < parts.length; i++) { 53 | var tmp = parts[i].split('=', 2); 54 | var key = jQuery.urldecode(tmp[0]); 55 | var value = jQuery.urldecode(tmp[1]); 56 | if (key in result) 57 | result[key].push(value); 58 | else 59 | result[key] = [value]; 60 | } 61 | return result; 62 | }; 63 | 64 | /** 65 | * highlight a given string on a jquery object by wrapping it in 66 | * span elements with the given class name. 67 | */ 68 | jQuery.fn.highlightText = function(text, className) { 69 | function highlight(node, addItems) { 70 | if (node.nodeType === 3) { 71 | var val = node.nodeValue; 72 | var pos = val.toLowerCase().indexOf(text); 73 | if (pos >= 0 && 74 | !jQuery(node.parentNode).hasClass(className) && 75 | !jQuery(node.parentNode).hasClass("nohighlight")) { 76 | var span; 77 | var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); 78 | if (isInSVG) { 79 | span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); 80 | } else { 81 | span = document.createElement("span"); 82 | span.className = className; 83 | } 84 | span.appendChild(document.createTextNode(val.substr(pos, text.length))); 85 | node.parentNode.insertBefore(span, node.parentNode.insertBefore( 86 | document.createTextNode(val.substr(pos + text.length)), 87 | node.nextSibling)); 88 | node.nodeValue = val.substr(0, pos); 89 | if (isInSVG) { 90 | var bbox = span.getBBox(); 91 | var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); 92 | rect.x.baseVal.value = bbox.x; 93 | rect.y.baseVal.value = bbox.y; 94 | rect.width.baseVal.value = bbox.width; 95 | rect.height.baseVal.value = bbox.height; 96 | rect.setAttribute('class', className); 97 | var parentOfText = node.parentNode.parentNode; 98 | addItems.push({ 99 | "parent": node.parentNode, 100 | "target": rect}); 101 | } 102 | } 103 | } 104 | else if (!jQuery(node).is("button, select, textarea")) { 105 | jQuery.each(node.childNodes, function() { 106 | highlight(this, addItems); 107 | }); 108 | } 109 | } 110 | var addItems = []; 111 | var result = this.each(function() { 112 | highlight(this, addItems); 113 | }); 114 | for (var i = 0; i < addItems.length; ++i) { 115 | jQuery(addItems[i].parent).before(addItems[i].target); 116 | } 117 | return result; 118 | }; 119 | 120 | /* 121 | * backward compatibility for jQuery.browser 122 | * This will be supported until firefox bug is fixed. 123 | */ 124 | if (!jQuery.browser) { 125 | jQuery.uaMatch = function(ua) { 126 | ua = ua.toLowerCase(); 127 | 128 | var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || 129 | /(webkit)[ \/]([\w.]+)/.exec(ua) || 130 | /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || 131 | /(msie) ([\w.]+)/.exec(ua) || 132 | ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || 133 | []; 134 | 135 | return { 136 | browser: match[ 1 ] || "", 137 | version: match[ 2 ] || "0" 138 | }; 139 | }; 140 | jQuery.browser = {}; 141 | jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; 142 | } 143 | 144 | /** 145 | * Small JavaScript module for the documentation. 146 | */ 147 | var Documentation = { 148 | 149 | init : function() { 150 | this.fixFirefoxAnchorBug(); 151 | this.highlightSearchWords(); 152 | this.initIndexTable(); 153 | if (DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) { 154 | this.initOnKeyListeners(); 155 | } 156 | }, 157 | 158 | /** 159 | * i18n support 160 | */ 161 | TRANSLATIONS : {}, 162 | PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; }, 163 | LOCALE : 'unknown', 164 | 165 | // gettext and ngettext don't access this so that the functions 166 | // can safely bound to a different name (_ = Documentation.gettext) 167 | gettext : function(string) { 168 | var translated = Documentation.TRANSLATIONS[string]; 169 | if (typeof translated === 'undefined') 170 | return string; 171 | return (typeof translated === 'string') ? translated : translated[0]; 172 | }, 173 | 174 | ngettext : function(singular, plural, n) { 175 | var translated = Documentation.TRANSLATIONS[singular]; 176 | if (typeof translated === 'undefined') 177 | return (n == 1) ? singular : plural; 178 | return translated[Documentation.PLURALEXPR(n)]; 179 | }, 180 | 181 | addTranslations : function(catalog) { 182 | for (var key in catalog.messages) 183 | this.TRANSLATIONS[key] = catalog.messages[key]; 184 | this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); 185 | this.LOCALE = catalog.locale; 186 | }, 187 | 188 | /** 189 | * add context elements like header anchor links 190 | */ 191 | addContextElements : function() { 192 | $('div[id] > :header:first').each(function() { 193 | $('\u00B6'). 194 | attr('href', '#' + this.id). 195 | attr('title', _('Permalink to this headline')). 196 | appendTo(this); 197 | }); 198 | $('dt[id]').each(function() { 199 | $('\u00B6'). 200 | attr('href', '#' + this.id). 201 | attr('title', _('Permalink to this definition')). 202 | appendTo(this); 203 | }); 204 | }, 205 | 206 | /** 207 | * workaround a firefox stupidity 208 | * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 209 | */ 210 | fixFirefoxAnchorBug : function() { 211 | if (document.location.hash && $.browser.mozilla) 212 | window.setTimeout(function() { 213 | document.location.href += ''; 214 | }, 10); 215 | }, 216 | 217 | /** 218 | * highlight the search words provided in the url in the text 219 | */ 220 | highlightSearchWords : function() { 221 | var params = $.getQueryParameters(); 222 | var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; 223 | if (terms.length) { 224 | var body = $('div.body'); 225 | if (!body.length) { 226 | body = $('body'); 227 | } 228 | window.setTimeout(function() { 229 | $.each(terms, function() { 230 | body.highlightText(this.toLowerCase(), 'highlighted'); 231 | }); 232 | }, 10); 233 | $('') 235 | .appendTo($('#searchbox')); 236 | } 237 | }, 238 | 239 | /** 240 | * init the domain index toggle buttons 241 | */ 242 | initIndexTable : function() { 243 | var togglers = $('img.toggler').click(function() { 244 | var src = $(this).attr('src'); 245 | var idnum = $(this).attr('id').substr(7); 246 | $('tr.cg-' + idnum).toggle(); 247 | if (src.substr(-9) === 'minus.png') 248 | $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); 249 | else 250 | $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); 251 | }).css('display', ''); 252 | if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { 253 | togglers.click(); 254 | } 255 | }, 256 | 257 | /** 258 | * helper function to hide the search marks again 259 | */ 260 | hideSearchWords : function() { 261 | $('#searchbox .highlight-link').fadeOut(300); 262 | $('span.highlighted').removeClass('highlighted'); 263 | }, 264 | 265 | /** 266 | * make the url absolute 267 | */ 268 | makeURL : function(relativeURL) { 269 | return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; 270 | }, 271 | 272 | /** 273 | * get the current relative url 274 | */ 275 | getCurrentURL : function() { 276 | var path = document.location.pathname; 277 | var parts = path.split(/\//); 278 | $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { 279 | if (this === '..') 280 | parts.pop(); 281 | }); 282 | var url = parts.join('/'); 283 | return path.substring(url.lastIndexOf('/') + 1, path.length - 1); 284 | }, 285 | 286 | initOnKeyListeners: function() { 287 | $(document).keyup(function(event) { 288 | var activeElementType = document.activeElement.tagName; 289 | // don't navigate when in search box or textarea 290 | if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') { 291 | switch (event.keyCode) { 292 | case 37: // left 293 | var prevHref = $('link[rel="prev"]').prop('href'); 294 | if (prevHref) { 295 | window.location.href = prevHref; 296 | return false; 297 | } 298 | case 39: // right 299 | var nextHref = $('link[rel="next"]').prop('href'); 300 | if (nextHref) { 301 | window.location.href = nextHref; 302 | return false; 303 | } 304 | } 305 | } 306 | }); 307 | } 308 | }; 309 | 310 | // quick alias for translations 311 | _ = Documentation.gettext; 312 | 313 | $(document).ready(function() { 314 | Documentation.init(); 315 | }); 316 | -------------------------------------------------------------------------------- /doc/_build/html/_static/documentation_options.js: -------------------------------------------------------------------------------- 1 | var DOCUMENTATION_OPTIONS = { 2 | URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), 3 | VERSION: '1.0', 4 | LANGUAGE: 'None', 5 | COLLAPSE_INDEX: false, 6 | FILE_SUFFIX: '.html', 7 | HAS_SOURCE: true, 8 | SOURCELINK_SUFFIX: '.txt', 9 | NAVIGATION_WITH_KEYS: false, 10 | }; -------------------------------------------------------------------------------- /doc/_build/html/_static/language_data.js: -------------------------------------------------------------------------------- 1 | /* 2 | * language_data.js 3 | * ~~~~~~~~~~~~~~~~ 4 | * 5 | * This script contains the language-specific data used by searchtools.js, 6 | * namely the list of stopwords, stemmer, scorer and splitter. 7 | * 8 | * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. 9 | * :license: BSD, see LICENSE for details. 10 | * 11 | */ 12 | 13 | var stopwords = ["a","and","are","as","at","be","but","by","for","if","in","into","is","it","near","no","not","of","on","or","such","that","the","their","then","there","these","they","this","to","was","will","with"]; 14 | 15 | 16 | /* Non-minified version JS is _stemmer.js if file is provided */ 17 | /** 18 | * Porter Stemmer 19 | */ 20 | var Stemmer = function() { 21 | 22 | var step2list = { 23 | ational: 'ate', 24 | tional: 'tion', 25 | enci: 'ence', 26 | anci: 'ance', 27 | izer: 'ize', 28 | bli: 'ble', 29 | alli: 'al', 30 | entli: 'ent', 31 | eli: 'e', 32 | ousli: 'ous', 33 | ization: 'ize', 34 | ation: 'ate', 35 | ator: 'ate', 36 | alism: 'al', 37 | iveness: 'ive', 38 | fulness: 'ful', 39 | ousness: 'ous', 40 | aliti: 'al', 41 | iviti: 'ive', 42 | biliti: 'ble', 43 | logi: 'log' 44 | }; 45 | 46 | var step3list = { 47 | icate: 'ic', 48 | ative: '', 49 | alize: 'al', 50 | iciti: 'ic', 51 | ical: 'ic', 52 | ful: '', 53 | ness: '' 54 | }; 55 | 56 | var c = "[^aeiou]"; // consonant 57 | var v = "[aeiouy]"; // vowel 58 | var C = c + "[^aeiouy]*"; // consonant sequence 59 | var V = v + "[aeiou]*"; // vowel sequence 60 | 61 | var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0 62 | var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 63 | var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 64 | var s_v = "^(" + C + ")?" + v; // vowel in stem 65 | 66 | this.stemWord = function (w) { 67 | var stem; 68 | var suffix; 69 | var firstch; 70 | var origword = w; 71 | 72 | if (w.length < 3) 73 | return w; 74 | 75 | var re; 76 | var re2; 77 | var re3; 78 | var re4; 79 | 80 | firstch = w.substr(0,1); 81 | if (firstch == "y") 82 | w = firstch.toUpperCase() + w.substr(1); 83 | 84 | // Step 1a 85 | re = /^(.+?)(ss|i)es$/; 86 | re2 = /^(.+?)([^s])s$/; 87 | 88 | if (re.test(w)) 89 | w = w.replace(re,"$1$2"); 90 | else if (re2.test(w)) 91 | w = w.replace(re2,"$1$2"); 92 | 93 | // Step 1b 94 | re = /^(.+?)eed$/; 95 | re2 = /^(.+?)(ed|ing)$/; 96 | if (re.test(w)) { 97 | var fp = re.exec(w); 98 | re = new RegExp(mgr0); 99 | if (re.test(fp[1])) { 100 | re = /.$/; 101 | w = w.replace(re,""); 102 | } 103 | } 104 | else if (re2.test(w)) { 105 | var fp = re2.exec(w); 106 | stem = fp[1]; 107 | re2 = new RegExp(s_v); 108 | if (re2.test(stem)) { 109 | w = stem; 110 | re2 = /(at|bl|iz)$/; 111 | re3 = new RegExp("([^aeiouylsz])\\1$"); 112 | re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); 113 | if (re2.test(w)) 114 | w = w + "e"; 115 | else if (re3.test(w)) { 116 | re = /.$/; 117 | w = w.replace(re,""); 118 | } 119 | else if (re4.test(w)) 120 | w = w + "e"; 121 | } 122 | } 123 | 124 | // Step 1c 125 | re = /^(.+?)y$/; 126 | if (re.test(w)) { 127 | var fp = re.exec(w); 128 | stem = fp[1]; 129 | re = new RegExp(s_v); 130 | if (re.test(stem)) 131 | w = stem + "i"; 132 | } 133 | 134 | // Step 2 135 | re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; 136 | if (re.test(w)) { 137 | var fp = re.exec(w); 138 | stem = fp[1]; 139 | suffix = fp[2]; 140 | re = new RegExp(mgr0); 141 | if (re.test(stem)) 142 | w = stem + step2list[suffix]; 143 | } 144 | 145 | // Step 3 146 | re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; 147 | if (re.test(w)) { 148 | var fp = re.exec(w); 149 | stem = fp[1]; 150 | suffix = fp[2]; 151 | re = new RegExp(mgr0); 152 | if (re.test(stem)) 153 | w = stem + step3list[suffix]; 154 | } 155 | 156 | // Step 4 157 | re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; 158 | re2 = /^(.+?)(s|t)(ion)$/; 159 | if (re.test(w)) { 160 | var fp = re.exec(w); 161 | stem = fp[1]; 162 | re = new RegExp(mgr1); 163 | if (re.test(stem)) 164 | w = stem; 165 | } 166 | else if (re2.test(w)) { 167 | var fp = re2.exec(w); 168 | stem = fp[1] + fp[2]; 169 | re2 = new RegExp(mgr1); 170 | if (re2.test(stem)) 171 | w = stem; 172 | } 173 | 174 | // Step 5 175 | re = /^(.+?)e$/; 176 | if (re.test(w)) { 177 | var fp = re.exec(w); 178 | stem = fp[1]; 179 | re = new RegExp(mgr1); 180 | re2 = new RegExp(meq1); 181 | re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); 182 | if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) 183 | w = stem; 184 | } 185 | re = /ll$/; 186 | re2 = new RegExp(mgr1); 187 | if (re.test(w) && re2.test(w)) { 188 | re = /.$/; 189 | w = w.replace(re,""); 190 | } 191 | 192 | // and turn initial Y back to y 193 | if (firstch == "y") 194 | w = firstch.toLowerCase() + w.substr(1); 195 | return w; 196 | } 197 | } 198 | 199 | 200 | 201 | 202 | 203 | var splitChars = (function() { 204 | var result = {}; 205 | var singles = [96, 180, 187, 191, 215, 247, 749, 885, 903, 907, 909, 930, 1014, 1648, 206 | 1748, 1809, 2416, 2473, 2481, 2526, 2601, 2609, 2612, 2615, 2653, 2702, 207 | 2706, 2729, 2737, 2740, 2857, 2865, 2868, 2910, 2928, 2948, 2961, 2971, 208 | 2973, 3085, 3089, 3113, 3124, 3213, 3217, 3241, 3252, 3295, 3341, 3345, 209 | 3369, 3506, 3516, 3633, 3715, 3721, 3736, 3744, 3748, 3750, 3756, 3761, 210 | 3781, 3912, 4239, 4347, 4681, 4695, 4697, 4745, 4785, 4799, 4801, 4823, 211 | 4881, 5760, 5901, 5997, 6313, 7405, 8024, 8026, 8028, 8030, 8117, 8125, 212 | 8133, 8181, 8468, 8485, 8487, 8489, 8494, 8527, 11311, 11359, 11687, 11695, 213 | 11703, 11711, 11719, 11727, 11735, 12448, 12539, 43010, 43014, 43019, 43587, 214 | 43696, 43713, 64286, 64297, 64311, 64317, 64319, 64322, 64325, 65141]; 215 | var i, j, start, end; 216 | for (i = 0; i < singles.length; i++) { 217 | result[singles[i]] = true; 218 | } 219 | var ranges = [[0, 47], [58, 64], [91, 94], [123, 169], [171, 177], [182, 184], [706, 709], 220 | [722, 735], [741, 747], [751, 879], [888, 889], [894, 901], [1154, 1161], 221 | [1318, 1328], [1367, 1368], [1370, 1376], [1416, 1487], [1515, 1519], [1523, 1568], 222 | [1611, 1631], [1642, 1645], [1750, 1764], [1767, 1773], [1789, 1790], [1792, 1807], 223 | [1840, 1868], [1958, 1968], [1970, 1983], [2027, 2035], [2038, 2041], [2043, 2047], 224 | [2070, 2073], [2075, 2083], [2085, 2087], [2089, 2307], [2362, 2364], [2366, 2383], 225 | [2385, 2391], [2402, 2405], [2419, 2424], [2432, 2436], [2445, 2446], [2449, 2450], 226 | [2483, 2485], [2490, 2492], [2494, 2509], [2511, 2523], [2530, 2533], [2546, 2547], 227 | [2554, 2564], [2571, 2574], [2577, 2578], [2618, 2648], [2655, 2661], [2672, 2673], 228 | [2677, 2692], [2746, 2748], [2750, 2767], [2769, 2783], [2786, 2789], [2800, 2820], 229 | [2829, 2830], [2833, 2834], [2874, 2876], [2878, 2907], [2914, 2917], [2930, 2946], 230 | [2955, 2957], [2966, 2968], [2976, 2978], [2981, 2983], [2987, 2989], [3002, 3023], 231 | [3025, 3045], [3059, 3076], [3130, 3132], [3134, 3159], [3162, 3167], [3170, 3173], 232 | [3184, 3191], [3199, 3204], [3258, 3260], [3262, 3293], [3298, 3301], [3312, 3332], 233 | [3386, 3388], [3390, 3423], [3426, 3429], [3446, 3449], [3456, 3460], [3479, 3481], 234 | [3518, 3519], [3527, 3584], [3636, 3647], [3655, 3663], [3674, 3712], [3717, 3718], 235 | [3723, 3724], [3726, 3731], [3752, 3753], [3764, 3772], [3774, 3775], [3783, 3791], 236 | [3802, 3803], [3806, 3839], [3841, 3871], [3892, 3903], [3949, 3975], [3980, 4095], 237 | [4139, 4158], [4170, 4175], [4182, 4185], [4190, 4192], [4194, 4196], [4199, 4205], 238 | [4209, 4212], [4226, 4237], [4250, 4255], [4294, 4303], [4349, 4351], [4686, 4687], 239 | [4702, 4703], [4750, 4751], [4790, 4791], [4806, 4807], [4886, 4887], [4955, 4968], 240 | [4989, 4991], [5008, 5023], [5109, 5120], [5741, 5742], [5787, 5791], [5867, 5869], 241 | [5873, 5887], [5906, 5919], [5938, 5951], [5970, 5983], [6001, 6015], [6068, 6102], 242 | [6104, 6107], [6109, 6111], [6122, 6127], [6138, 6159], [6170, 6175], [6264, 6271], 243 | [6315, 6319], [6390, 6399], [6429, 6469], [6510, 6511], [6517, 6527], [6572, 6592], 244 | [6600, 6607], [6619, 6655], [6679, 6687], [6741, 6783], [6794, 6799], [6810, 6822], 245 | [6824, 6916], [6964, 6980], [6988, 6991], [7002, 7042], [7073, 7085], [7098, 7167], 246 | [7204, 7231], [7242, 7244], [7294, 7400], [7410, 7423], [7616, 7679], [7958, 7959], 247 | [7966, 7967], [8006, 8007], [8014, 8015], [8062, 8063], [8127, 8129], [8141, 8143], 248 | [8148, 8149], [8156, 8159], [8173, 8177], [8189, 8303], [8306, 8307], [8314, 8318], 249 | [8330, 8335], [8341, 8449], [8451, 8454], [8456, 8457], [8470, 8472], [8478, 8483], 250 | [8506, 8507], [8512, 8516], [8522, 8525], [8586, 9311], [9372, 9449], [9472, 10101], 251 | [10132, 11263], [11493, 11498], [11503, 11516], [11518, 11519], [11558, 11567], 252 | [11622, 11630], [11632, 11647], [11671, 11679], [11743, 11822], [11824, 12292], 253 | [12296, 12320], [12330, 12336], [12342, 12343], [12349, 12352], [12439, 12444], 254 | [12544, 12548], [12590, 12592], [12687, 12689], [12694, 12703], [12728, 12783], 255 | [12800, 12831], [12842, 12880], [12896, 12927], [12938, 12976], [12992, 13311], 256 | [19894, 19967], [40908, 40959], [42125, 42191], [42238, 42239], [42509, 42511], 257 | [42540, 42559], [42592, 42593], [42607, 42622], [42648, 42655], [42736, 42774], 258 | [42784, 42785], [42889, 42890], [42893, 43002], [43043, 43055], [43062, 43071], 259 | [43124, 43137], [43188, 43215], [43226, 43249], [43256, 43258], [43260, 43263], 260 | [43302, 43311], [43335, 43359], [43389, 43395], [43443, 43470], [43482, 43519], 261 | [43561, 43583], [43596, 43599], [43610, 43615], [43639, 43641], [43643, 43647], 262 | [43698, 43700], [43703, 43704], [43710, 43711], [43715, 43738], [43742, 43967], 263 | [44003, 44015], [44026, 44031], [55204, 55215], [55239, 55242], [55292, 55295], 264 | [57344, 63743], [64046, 64047], [64110, 64111], [64218, 64255], [64263, 64274], 265 | [64280, 64284], [64434, 64466], [64830, 64847], [64912, 64913], [64968, 65007], 266 | [65020, 65135], [65277, 65295], [65306, 65312], [65339, 65344], [65371, 65381], 267 | [65471, 65473], [65480, 65481], [65488, 65489], [65496, 65497]]; 268 | for (i = 0; i < ranges.length; i++) { 269 | start = ranges[i][0]; 270 | end = ranges[i][1]; 271 | for (j = start; j <= end; j++) { 272 | result[j] = true; 273 | } 274 | } 275 | return result; 276 | })(); 277 | 278 | function splitQuery(query) { 279 | var result = []; 280 | var start = -1; 281 | for (var i = 0; i < query.length; i++) { 282 | if (splitChars[query.charCodeAt(i)]) { 283 | if (start !== -1) { 284 | result.push(query.slice(start, i)); 285 | start = -1; 286 | } 287 | } else if (start === -1) { 288 | start = i; 289 | } 290 | } 291 | if (start !== -1) { 292 | result.push(query.slice(start)); 293 | } 294 | return result; 295 | } 296 | 297 | 298 | -------------------------------------------------------------------------------- /doc/_build/html/_static/nature.css: -------------------------------------------------------------------------------- 1 | /* 2 | * nature.css_t 3 | * ~~~~~~~~~~~~ 4 | * 5 | * Sphinx stylesheet -- nature theme. 6 | * 7 | * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. 8 | * :license: BSD, see LICENSE for details. 9 | * 10 | */ 11 | 12 | @import url("basic.css"); 13 | 14 | /* -- page layout ----------------------------------------------------------- */ 15 | 16 | body { 17 | font-family: Arial, sans-serif; 18 | font-size: 100%; 19 | background-color: #fff; 20 | color: #555; 21 | margin: 0; 22 | padding: 0; 23 | } 24 | 25 | div.documentwrapper { 26 | float: left; 27 | width: 100%; 28 | } 29 | 30 | div.bodywrapper { 31 | margin: 0 0 0 230px; 32 | } 33 | 34 | hr { 35 | border: 1px solid #B1B4B6; 36 | } 37 | 38 | div.document { 39 | background-color: #eee; 40 | } 41 | 42 | div.body { 43 | background-color: #ffffff; 44 | color: #3E4349; 45 | padding: 0 30px 30px 30px; 46 | font-size: 0.9em; 47 | } 48 | 49 | div.footer { 50 | color: #555; 51 | width: 100%; 52 | padding: 13px 0; 53 | text-align: center; 54 | font-size: 75%; 55 | } 56 | 57 | div.footer a { 58 | color: #444; 59 | text-decoration: underline; 60 | } 61 | 62 | div.related { 63 | background-color: #6BA81E; 64 | line-height: 32px; 65 | color: #fff; 66 | text-shadow: 0px 1px 0 #444; 67 | font-size: 0.9em; 68 | } 69 | 70 | div.related a { 71 | color: #E2F3CC; 72 | } 73 | 74 | div.sphinxsidebar { 75 | font-size: 0.75em; 76 | line-height: 1.5em; 77 | } 78 | 79 | div.sphinxsidebarwrapper{ 80 | padding: 20px 0; 81 | } 82 | 83 | div.sphinxsidebar h3, 84 | div.sphinxsidebar h4 { 85 | font-family: Arial, sans-serif; 86 | color: #222; 87 | font-size: 1.2em; 88 | font-weight: normal; 89 | margin: 0; 90 | padding: 5px 10px; 91 | background-color: #ddd; 92 | text-shadow: 1px 1px 0 white 93 | } 94 | 95 | div.sphinxsidebar h4{ 96 | font-size: 1.1em; 97 | } 98 | 99 | div.sphinxsidebar h3 a { 100 | color: #444; 101 | } 102 | 103 | 104 | div.sphinxsidebar p { 105 | color: #888; 106 | padding: 5px 20px; 107 | } 108 | 109 | div.sphinxsidebar p.topless { 110 | } 111 | 112 | div.sphinxsidebar ul { 113 | margin: 10px 20px; 114 | padding: 0; 115 | color: #000; 116 | } 117 | 118 | div.sphinxsidebar a { 119 | color: #444; 120 | } 121 | 122 | div.sphinxsidebar input { 123 | border: 1px solid #ccc; 124 | font-family: sans-serif; 125 | font-size: 1em; 126 | } 127 | 128 | div.sphinxsidebar .searchformwrapper { 129 | margin-left: 20px; 130 | margin-right: 20px; 131 | } 132 | 133 | /* -- body styles ----------------------------------------------------------- */ 134 | 135 | a { 136 | color: #005B81; 137 | text-decoration: none; 138 | } 139 | 140 | a:hover { 141 | color: #E32E00; 142 | text-decoration: underline; 143 | } 144 | 145 | div.body h1, 146 | div.body h2, 147 | div.body h3, 148 | div.body h4, 149 | div.body h5, 150 | div.body h6 { 151 | font-family: Arial, sans-serif; 152 | background-color: #BED4EB; 153 | font-weight: normal; 154 | color: #212224; 155 | margin: 30px 0px 10px 0px; 156 | padding: 5px 0 5px 10px; 157 | text-shadow: 0px 1px 0 white 158 | } 159 | 160 | div.body h1 { border-top: 20px solid white; margin-top: 0; font-size: 200%; } 161 | div.body h2 { font-size: 150%; background-color: #C8D5E3; } 162 | div.body h3 { font-size: 120%; background-color: #D8DEE3; } 163 | div.body h4 { font-size: 110%; background-color: #D8DEE3; } 164 | div.body h5 { font-size: 100%; background-color: #D8DEE3; } 165 | div.body h6 { font-size: 100%; background-color: #D8DEE3; } 166 | 167 | a.headerlink { 168 | color: #c60f0f; 169 | font-size: 0.8em; 170 | padding: 0 4px 0 4px; 171 | text-decoration: none; 172 | } 173 | 174 | a.headerlink:hover { 175 | background-color: #c60f0f; 176 | color: white; 177 | } 178 | 179 | div.body p, div.body dd, div.body li { 180 | line-height: 1.5em; 181 | } 182 | 183 | div.admonition p.admonition-title + p { 184 | display: inline; 185 | } 186 | 187 | div.highlight{ 188 | background-color: white; 189 | } 190 | 191 | div.note { 192 | background-color: #eee; 193 | border: 1px solid #ccc; 194 | } 195 | 196 | div.seealso { 197 | background-color: #ffc; 198 | border: 1px solid #ff6; 199 | } 200 | 201 | div.topic { 202 | background-color: #eee; 203 | } 204 | 205 | div.warning { 206 | background-color: #ffe4e4; 207 | border: 1px solid #f66; 208 | } 209 | 210 | p.admonition-title { 211 | display: inline; 212 | } 213 | 214 | p.admonition-title:after { 215 | content: ":"; 216 | } 217 | 218 | pre { 219 | padding: 10px; 220 | background-color: White; 221 | color: #222; 222 | line-height: 1.2em; 223 | border: 1px solid #C6C9CB; 224 | font-size: 1.1em; 225 | margin: 1.5em 0 1.5em 0; 226 | -webkit-box-shadow: 1px 1px 1px #d8d8d8; 227 | -moz-box-shadow: 1px 1px 1px #d8d8d8; 228 | } 229 | 230 | code { 231 | background-color: #ecf0f3; 232 | color: #222; 233 | /* padding: 1px 2px; */ 234 | font-size: 1.1em; 235 | font-family: monospace; 236 | } 237 | 238 | .viewcode-back { 239 | font-family: Arial, sans-serif; 240 | } 241 | 242 | div.viewcode-block:target { 243 | background-color: #f4debf; 244 | border-top: 1px solid #ac9; 245 | border-bottom: 1px solid #ac9; 246 | } 247 | 248 | div.code-block-caption { 249 | background-color: #ddd; 250 | color: #222; 251 | border: 1px solid #C6C9CB; 252 | } -------------------------------------------------------------------------------- /doc/_build/html/_static/pygments.css: -------------------------------------------------------------------------------- 1 | .highlight .hll { background-color: #ffffcc } 2 | .highlight { background: #f8f8f8; } 3 | .highlight .c { color: #8f5902; font-style: italic } /* Comment */ 4 | .highlight .err { color: #a40000; border: 1px solid #ef2929 } /* Error */ 5 | .highlight .g { color: #000000 } /* Generic */ 6 | .highlight .k { color: #204a87; font-weight: bold } /* Keyword */ 7 | .highlight .l { color: #000000 } /* Literal */ 8 | .highlight .n { color: #000000 } /* Name */ 9 | .highlight .o { color: #ce5c00; font-weight: bold } /* Operator */ 10 | .highlight .x { color: #000000 } /* Other */ 11 | .highlight .p { color: #000000; font-weight: bold } /* Punctuation */ 12 | .highlight .ch { color: #8f5902; font-style: italic } /* Comment.Hashbang */ 13 | .highlight .cm { color: #8f5902; font-style: italic } /* Comment.Multiline */ 14 | .highlight .cp { color: #8f5902; font-style: italic } /* Comment.Preproc */ 15 | .highlight .cpf { color: #8f5902; font-style: italic } /* Comment.PreprocFile */ 16 | .highlight .c1 { color: #8f5902; font-style: italic } /* Comment.Single */ 17 | .highlight .cs { color: #8f5902; font-style: italic } /* Comment.Special */ 18 | .highlight .gd { color: #a40000 } /* Generic.Deleted */ 19 | .highlight .ge { color: #000000; font-style: italic } /* Generic.Emph */ 20 | .highlight .gr { color: #ef2929 } /* Generic.Error */ 21 | .highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ 22 | .highlight .gi { color: #00A000 } /* Generic.Inserted */ 23 | .highlight .go { color: #000000; font-style: italic } /* Generic.Output */ 24 | .highlight .gp { color: #8f5902 } /* Generic.Prompt */ 25 | .highlight .gs { color: #000000; font-weight: bold } /* Generic.Strong */ 26 | .highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ 27 | .highlight .gt { color: #a40000; font-weight: bold } /* Generic.Traceback */ 28 | .highlight .kc { color: #204a87; font-weight: bold } /* Keyword.Constant */ 29 | .highlight .kd { color: #204a87; font-weight: bold } /* Keyword.Declaration */ 30 | .highlight .kn { color: #204a87; font-weight: bold } /* Keyword.Namespace */ 31 | .highlight .kp { color: #204a87; font-weight: bold } /* Keyword.Pseudo */ 32 | .highlight .kr { color: #204a87; font-weight: bold } /* Keyword.Reserved */ 33 | .highlight .kt { color: #204a87; font-weight: bold } /* Keyword.Type */ 34 | .highlight .ld { color: #000000 } /* Literal.Date */ 35 | .highlight .m { color: #0000cf; font-weight: bold } /* Literal.Number */ 36 | .highlight .s { color: #4e9a06 } /* Literal.String */ 37 | .highlight .na { color: #c4a000 } /* Name.Attribute */ 38 | .highlight .nb { color: #204a87 } /* Name.Builtin */ 39 | .highlight .nc { color: #000000 } /* Name.Class */ 40 | .highlight .no { color: #000000 } /* Name.Constant */ 41 | .highlight .nd { color: #5c35cc; font-weight: bold } /* Name.Decorator */ 42 | .highlight .ni { color: #ce5c00 } /* Name.Entity */ 43 | .highlight .ne { color: #cc0000; font-weight: bold } /* Name.Exception */ 44 | .highlight .nf { color: #000000 } /* Name.Function */ 45 | .highlight .nl { color: #f57900 } /* Name.Label */ 46 | .highlight .nn { color: #000000 } /* Name.Namespace */ 47 | .highlight .nx { color: #000000 } /* Name.Other */ 48 | .highlight .py { color: #000000 } /* Name.Property */ 49 | .highlight .nt { color: #204a87; font-weight: bold } /* Name.Tag */ 50 | .highlight .nv { color: #000000 } /* Name.Variable */ 51 | .highlight .ow { color: #204a87; font-weight: bold } /* Operator.Word */ 52 | .highlight .w { color: #f8f8f8; text-decoration: underline } /* Text.Whitespace */ 53 | .highlight .mb { color: #0000cf; font-weight: bold } /* Literal.Number.Bin */ 54 | .highlight .mf { color: #0000cf; font-weight: bold } /* Literal.Number.Float */ 55 | .highlight .mh { color: #0000cf; font-weight: bold } /* Literal.Number.Hex */ 56 | .highlight .mi { color: #0000cf; font-weight: bold } /* Literal.Number.Integer */ 57 | .highlight .mo { color: #0000cf; font-weight: bold } /* Literal.Number.Oct */ 58 | .highlight .sa { color: #4e9a06 } /* Literal.String.Affix */ 59 | .highlight .sb { color: #4e9a06 } /* Literal.String.Backtick */ 60 | .highlight .sc { color: #4e9a06 } /* Literal.String.Char */ 61 | .highlight .dl { color: #4e9a06 } /* Literal.String.Delimiter */ 62 | .highlight .sd { color: #8f5902; font-style: italic } /* Literal.String.Doc */ 63 | .highlight .s2 { color: #4e9a06 } /* Literal.String.Double */ 64 | .highlight .se { color: #4e9a06 } /* Literal.String.Escape */ 65 | .highlight .sh { color: #4e9a06 } /* Literal.String.Heredoc */ 66 | .highlight .si { color: #4e9a06 } /* Literal.String.Interpol */ 67 | .highlight .sx { color: #4e9a06 } /* Literal.String.Other */ 68 | .highlight .sr { color: #4e9a06 } /* Literal.String.Regex */ 69 | .highlight .s1 { color: #4e9a06 } /* Literal.String.Single */ 70 | .highlight .ss { color: #4e9a06 } /* Literal.String.Symbol */ 71 | .highlight .bp { color: #3465a4 } /* Name.Builtin.Pseudo */ 72 | .highlight .fm { color: #000000 } /* Name.Function.Magic */ 73 | .highlight .vc { color: #000000 } /* Name.Variable.Class */ 74 | .highlight .vg { color: #000000 } /* Name.Variable.Global */ 75 | .highlight .vi { color: #000000 } /* Name.Variable.Instance */ 76 | .highlight .vm { color: #000000 } /* Name.Variable.Magic */ 77 | .highlight .il { color: #0000cf; font-weight: bold } /* Literal.Number.Integer.Long */ -------------------------------------------------------------------------------- /doc/_build/html/_static/underscore.js: -------------------------------------------------------------------------------- 1 | // Underscore.js 1.3.1 2 | // (c) 2009-2012 Jeremy Ashkenas, DocumentCloud Inc. 3 | // Underscore is freely distributable under the MIT license. 4 | // Portions of Underscore are inspired or borrowed from Prototype, 5 | // Oliver Steele's Functional, and John Resig's Micro-Templating. 6 | // For all details and documentation: 7 | // http://documentcloud.github.com/underscore 8 | (function(){function q(a,c,d){if(a===c)return a!==0||1/a==1/c;if(a==null||c==null)return a===c;if(a._chain)a=a._wrapped;if(c._chain)c=c._wrapped;if(a.isEqual&&b.isFunction(a.isEqual))return a.isEqual(c);if(c.isEqual&&b.isFunction(c.isEqual))return c.isEqual(a);var e=l.call(a);if(e!=l.call(c))return false;switch(e){case "[object String]":return a==String(c);case "[object Number]":return a!=+a?c!=+c:a==0?1/a==1/c:a==+c;case "[object Date]":case "[object Boolean]":return+a==+c;case "[object RegExp]":return a.source== 9 | c.source&&a.global==c.global&&a.multiline==c.multiline&&a.ignoreCase==c.ignoreCase}if(typeof a!="object"||typeof c!="object")return false;for(var f=d.length;f--;)if(d[f]==a)return true;d.push(a);var f=0,g=true;if(e=="[object Array]"){if(f=a.length,g=f==c.length)for(;f--;)if(!(g=f in a==f in c&&q(a[f],c[f],d)))break}else{if("constructor"in a!="constructor"in c||a.constructor!=c.constructor)return false;for(var h in a)if(b.has(a,h)&&(f++,!(g=b.has(c,h)&&q(a[h],c[h],d))))break;if(g){for(h in c)if(b.has(c, 10 | h)&&!f--)break;g=!f}}d.pop();return g}var r=this,G=r._,n={},k=Array.prototype,o=Object.prototype,i=k.slice,H=k.unshift,l=o.toString,I=o.hasOwnProperty,w=k.forEach,x=k.map,y=k.reduce,z=k.reduceRight,A=k.filter,B=k.every,C=k.some,p=k.indexOf,D=k.lastIndexOf,o=Array.isArray,J=Object.keys,s=Function.prototype.bind,b=function(a){return new m(a)};if(typeof exports!=="undefined"){if(typeof module!=="undefined"&&module.exports)exports=module.exports=b;exports._=b}else r._=b;b.VERSION="1.3.1";var j=b.each= 11 | b.forEach=function(a,c,d){if(a!=null)if(w&&a.forEach===w)a.forEach(c,d);else if(a.length===+a.length)for(var e=0,f=a.length;e2;a== 12 | null&&(a=[]);if(y&&a.reduce===y)return e&&(c=b.bind(c,e)),f?a.reduce(c,d):a.reduce(c);j(a,function(a,b,i){f?d=c.call(e,d,a,b,i):(d=a,f=true)});if(!f)throw new TypeError("Reduce of empty array with no initial value");return d};b.reduceRight=b.foldr=function(a,c,d,e){var f=arguments.length>2;a==null&&(a=[]);if(z&&a.reduceRight===z)return e&&(c=b.bind(c,e)),f?a.reduceRight(c,d):a.reduceRight(c);var g=b.toArray(a).reverse();e&&!f&&(c=b.bind(c,e));return f?b.reduce(g,c,d,e):b.reduce(g,c)};b.find=b.detect= 13 | function(a,c,b){var e;E(a,function(a,g,h){if(c.call(b,a,g,h))return e=a,true});return e};b.filter=b.select=function(a,c,b){var e=[];if(a==null)return e;if(A&&a.filter===A)return a.filter(c,b);j(a,function(a,g,h){c.call(b,a,g,h)&&(e[e.length]=a)});return e};b.reject=function(a,c,b){var e=[];if(a==null)return e;j(a,function(a,g,h){c.call(b,a,g,h)||(e[e.length]=a)});return e};b.every=b.all=function(a,c,b){var e=true;if(a==null)return e;if(B&&a.every===B)return a.every(c,b);j(a,function(a,g,h){if(!(e= 14 | e&&c.call(b,a,g,h)))return n});return e};var E=b.some=b.any=function(a,c,d){c||(c=b.identity);var e=false;if(a==null)return e;if(C&&a.some===C)return a.some(c,d);j(a,function(a,b,h){if(e||(e=c.call(d,a,b,h)))return n});return!!e};b.include=b.contains=function(a,c){var b=false;if(a==null)return b;return p&&a.indexOf===p?a.indexOf(c)!=-1:b=E(a,function(a){return a===c})};b.invoke=function(a,c){var d=i.call(arguments,2);return b.map(a,function(a){return(b.isFunction(c)?c||a:a[c]).apply(a,d)})};b.pluck= 15 | function(a,c){return b.map(a,function(a){return a[c]})};b.max=function(a,c,d){if(!c&&b.isArray(a))return Math.max.apply(Math,a);if(!c&&b.isEmpty(a))return-Infinity;var e={computed:-Infinity};j(a,function(a,b,h){b=c?c.call(d,a,b,h):a;b>=e.computed&&(e={value:a,computed:b})});return e.value};b.min=function(a,c,d){if(!c&&b.isArray(a))return Math.min.apply(Math,a);if(!c&&b.isEmpty(a))return Infinity;var e={computed:Infinity};j(a,function(a,b,h){b=c?c.call(d,a,b,h):a;bd?1:0}),"value")};b.groupBy=function(a,c){var d={},e=b.isFunction(c)?c:function(a){return a[c]};j(a,function(a,b){var c=e(a,b);(d[c]||(d[c]=[])).push(a)});return d};b.sortedIndex=function(a, 17 | c,d){d||(d=b.identity);for(var e=0,f=a.length;e>1;d(a[g])=0})})};b.difference=function(a){var c=b.flatten(i.call(arguments,1));return b.filter(a,function(a){return!b.include(c,a)})};b.zip=function(){for(var a=i.call(arguments),c=b.max(b.pluck(a,"length")),d=Array(c),e=0;e=0;d--)b=[a[d].apply(this,b)];return b[0]}}; 24 | b.after=function(a,b){return a<=0?b():function(){if(--a<1)return b.apply(this,arguments)}};b.keys=J||function(a){if(a!==Object(a))throw new TypeError("Invalid object");var c=[],d;for(d in a)b.has(a,d)&&(c[c.length]=d);return c};b.values=function(a){return b.map(a,b.identity)};b.functions=b.methods=function(a){var c=[],d;for(d in a)b.isFunction(a[d])&&c.push(d);return c.sort()};b.extend=function(a){j(i.call(arguments,1),function(b){for(var d in b)a[d]=b[d]});return a};b.defaults=function(a){j(i.call(arguments, 25 | 1),function(b){for(var d in b)a[d]==null&&(a[d]=b[d])});return a};b.clone=function(a){return!b.isObject(a)?a:b.isArray(a)?a.slice():b.extend({},a)};b.tap=function(a,b){b(a);return a};b.isEqual=function(a,b){return q(a,b,[])};b.isEmpty=function(a){if(b.isArray(a)||b.isString(a))return a.length===0;for(var c in a)if(b.has(a,c))return false;return true};b.isElement=function(a){return!!(a&&a.nodeType==1)};b.isArray=o||function(a){return l.call(a)=="[object Array]"};b.isObject=function(a){return a===Object(a)}; 26 | b.isArguments=function(a){return l.call(a)=="[object Arguments]"};if(!b.isArguments(arguments))b.isArguments=function(a){return!(!a||!b.has(a,"callee"))};b.isFunction=function(a){return l.call(a)=="[object Function]"};b.isString=function(a){return l.call(a)=="[object String]"};b.isNumber=function(a){return l.call(a)=="[object Number]"};b.isNaN=function(a){return a!==a};b.isBoolean=function(a){return a===true||a===false||l.call(a)=="[object Boolean]"};b.isDate=function(a){return l.call(a)=="[object Date]"}; 27 | b.isRegExp=function(a){return l.call(a)=="[object RegExp]"};b.isNull=function(a){return a===null};b.isUndefined=function(a){return a===void 0};b.has=function(a,b){return I.call(a,b)};b.noConflict=function(){r._=G;return this};b.identity=function(a){return a};b.times=function(a,b,d){for(var e=0;e/g,">").replace(/"/g,""").replace(/'/g,"'").replace(/\//g,"/")};b.mixin=function(a){j(b.functions(a), 28 | function(c){K(c,b[c]=a[c])})};var L=0;b.uniqueId=function(a){var b=L++;return a?a+b:b};b.templateSettings={evaluate:/<%([\s\S]+?)%>/g,interpolate:/<%=([\s\S]+?)%>/g,escape:/<%-([\s\S]+?)%>/g};var t=/.^/,u=function(a){return a.replace(/\\\\/g,"\\").replace(/\\'/g,"'")};b.template=function(a,c){var d=b.templateSettings,d="var __p=[],print=function(){__p.push.apply(__p,arguments);};with(obj||{}){__p.push('"+a.replace(/\\/g,"\\\\").replace(/'/g,"\\'").replace(d.escape||t,function(a,b){return"',_.escape("+ 29 | u(b)+"),'"}).replace(d.interpolate||t,function(a,b){return"',"+u(b)+",'"}).replace(d.evaluate||t,function(a,b){return"');"+u(b).replace(/[\r\n\t]/g," ")+";__p.push('"}).replace(/\r/g,"\\r").replace(/\n/g,"\\n").replace(/\t/g,"\\t")+"');}return __p.join('');",e=new Function("obj","_",d);return c?e(c,b):function(a){return e.call(this,a,b)}};b.chain=function(a){return b(a).chain()};var m=function(a){this._wrapped=a};b.prototype=m.prototype;var v=function(a,c){return c?b(a).chain():a},K=function(a,c){m.prototype[a]= 30 | function(){var a=i.call(arguments);H.call(a,this._wrapped);return v(c.apply(b,a),this._chain)}};b.mixin(b);j("pop,push,reverse,shift,sort,splice,unshift".split(","),function(a){var b=k[a];m.prototype[a]=function(){var d=this._wrapped;b.apply(d,arguments);var e=d.length;(a=="shift"||a=="splice")&&e===0&&delete d[0];return v(d,this._chain)}});j(["concat","join","slice"],function(a){var b=k[a];m.prototype[a]=function(){return v(b.apply(this._wrapped,arguments),this._chain)}});m.prototype.chain=function(){this._chain= 31 | true;return this};m.prototype.value=function(){return this._wrapped}}).call(this); 32 | -------------------------------------------------------------------------------- /doc/_build/html/genindex.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 6 | 7 | 8 | 9 | 10 | Index — AC_tools 1.0 documentation 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 30 | 31 |
32 |
33 |
34 |
35 | 36 | 37 |

Index

38 | 39 |
40 | 41 |
42 | 43 | 44 |
45 |
46 |
47 | 63 |
64 |
65 | 74 | 78 | 79 | -------------------------------------------------------------------------------- /doc/_build/html/index.html: -------------------------------------------------------------------------------- 1 | 2 | 4 | 5 | 6 | 7 | 8 | 9 | Welcome to AC_tools’s documentation! — AC_tools 1.0 documentation 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 29 | 30 |
31 |
32 |
33 |
34 | 35 |
36 |

Welcome to AC_tools’s documentation!

37 |
38 |
39 |
40 |
41 |

Indices and tables

42 | 47 |
48 | 49 | 50 |
51 |
52 |
53 | 82 |
83 |
84 | 93 | 97 | 98 | -------------------------------------------------------------------------------- /doc/_build/html/objects.inv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tsherwen/AC_tools/eb308d5f547fbe1dfb131c83e9504f5f5c8f6328/doc/_build/html/objects.inv -------------------------------------------------------------------------------- /doc/_build/html/search.html: -------------------------------------------------------------------------------- 1 | 2 | 4 | 5 | 6 | 7 | 8 | 9 | Search — AC_tools 1.0 documentation 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 24 | 25 | 26 | 27 | 28 | 29 | 38 | 39 |
40 |
41 |
42 |
43 | 44 |

Search

45 |
46 | 47 |

48 | Please activate JavaScript to enable the search 49 | functionality. 50 |

51 |
52 |

53 | From here you can search these documents. Enter your search 54 | words into the box below and click "search". Note that the search 55 | function will automatically search for all of the words. Pages 56 | containing fewer words won't appear in the result list. 57 |

58 |
59 | 60 | 61 | 62 |
63 | 64 |
65 | 66 |
67 | 68 |
69 |
70 |
71 | 75 |
76 |
77 | 86 | 90 | 91 | -------------------------------------------------------------------------------- /doc/_build/html/searchindex.js: -------------------------------------------------------------------------------- 1 | Search.setIndex({docnames:["index"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.cpp":1,"sphinx.domains.javascript":1,"sphinx.domains.math":2,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,"sphinx.ext.intersphinx":1,sphinx:54},filenames:["index.rst"],objects:{},objnames:{},objtypes:{},terms:{index:0,modul:0,page:0,search:0},titles:["Welcome to AC_tools\u2019s documentation!"],titleterms:{ac_tool:0,document:0,indic:0,tabl:0,welcom:0}}) -------------------------------------------------------------------------------- /doc/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Configuration file for the Sphinx documentation builder. 4 | # 5 | # This file does only contain a selection of the most common options. For a 6 | # full list see the documentation: 7 | # http://www.sphinx-doc.org/en/master/config 8 | 9 | # -- Path setup -------------------------------------------------------------- 10 | 11 | # If extensions (or modules to document with autodoc) are in another directory, 12 | # add these directories to sys.path here. If the directory is relative to the 13 | # documentation root, use os.path.abspath to make it absolute, like shown here. 14 | # 15 | import os 16 | import sys 17 | sys.path.insert(0, os.path.abspath('../')) 18 | 19 | 20 | # -- Project information ----------------------------------------------------- 21 | 22 | project = u'AC_tools' 23 | copyright = u'2019, T.Sherwen' 24 | author = u'T.Sherwen' 25 | 26 | # The short X.Y version 27 | version = u'' 28 | # The full version, including alpha/beta/rc tags 29 | release = u'1.0' 30 | 31 | 32 | # -- General configuration --------------------------------------------------- 33 | 34 | # If your documentation needs a minimal Sphinx version, state it here. 35 | # 36 | # needs_sphinx = '1.0' 37 | html_theme = 'nature' 38 | # Add any Sphinx extension module names here, as strings. They can be 39 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 40 | # ones. 41 | extensions = [ 42 | 'sphinx.ext.autodoc', 43 | 'sphinx.ext.intersphinx', 44 | 'sphinx.ext.coverage', 45 | 'sphinx.ext.mathjax', 46 | 'sphinx.ext.ifconfig', 47 | 'sphinx.ext.githubpages', 48 | 'sphinx.ext.napoleon', 49 | ] 50 | 51 | napoleon_google_docstring = False 52 | napoleon_use_param = False 53 | napoleon_use_ivar = True 54 | # Add any paths that contain templates here, relative to this directory. 55 | templates_path = ['_templates'] 56 | 57 | # The suffix(es) of source filenames. 58 | # You can specify multiple suffix as a list of string: 59 | # 60 | # source_suffix = ['.rst', '.md'] 61 | source_suffix = '.rst' 62 | 63 | # The master toctree document. 64 | master_doc = 'index' 65 | 66 | # The language for content autogenerated by Sphinx. Refer to documentation 67 | # for a list of supported languages. 68 | # 69 | # This is also used if you do content translation via gettext catalogs. 70 | # Usually you set "language" from the command line for these cases. 71 | language = None 72 | 73 | # List of patterns, relative to source directory, that match files and 74 | # directories to ignore when looking for source files. 75 | # This pattern also affects html_static_path and html_extra_path. 76 | exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store'] 77 | 78 | # The name of the Pygments (syntax highlighting) style to use. 79 | pygments_style = None 80 | 81 | 82 | # -- Options for HTML output ------------------------------------------------- 83 | 84 | # The theme to use for HTML and HTML Help pages. See the documentation for 85 | # a list of builtin themes. 86 | # 87 | #html_theme = 'alabaster' 88 | 89 | # Theme options are theme-specific and customize the look and feel of a theme 90 | # further. For a list of options available for each theme, see the 91 | # documentation. 92 | # 93 | # html_theme_options = {} 94 | 95 | # Add any paths that contain custom static files (such as style sheets) here, 96 | # relative to this directory. They are copied after the builtin static files, 97 | # so a file named "default.css" will overwrite the builtin "default.css". 98 | html_static_path = ['_static'] 99 | 100 | # Custom sidebar templates, must be a dictionary that maps document names 101 | # to template names. 102 | # 103 | # The default sidebars (for documents that don't match any pattern) are 104 | # defined by theme itself. Builtin themes are using these templates by 105 | # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', 106 | # 'searchbox.html']``. 107 | # 108 | # html_sidebars = {} 109 | 110 | 111 | # -- Options for HTMLHelp output --------------------------------------------- 112 | 113 | # Output file base name for HTML help builder. 114 | htmlhelp_basename = 'AC_toolsdoc' 115 | 116 | 117 | # -- Options for LaTeX output ------------------------------------------------ 118 | 119 | latex_elements = { 120 | # The paper size ('letterpaper' or 'a4paper'). 121 | # 122 | # 'papersize': 'letterpaper', 123 | 124 | # The font size ('10pt', '11pt' or '12pt'). 125 | # 126 | # 'pointsize': '10pt', 127 | 128 | # Additional stuff for the LaTeX preamble. 129 | # 130 | # 'preamble': '', 131 | 132 | # Latex figure (float) alignment 133 | # 134 | # 'figure_align': 'htbp', 135 | } 136 | 137 | # Grouping the document tree into LaTeX files. List of tuples 138 | # (source start file, target name, title, 139 | # author, documentclass [howto, manual, or own class]). 140 | latex_documents = [ 141 | (master_doc, 'AC_tools.tex', u'AC\\_tools Documentation', 142 | u'T.Sherwen', 'manual'), 143 | ] 144 | 145 | 146 | # -- Options for manual page output ------------------------------------------ 147 | 148 | # One entry per manual page. List of tuples 149 | # (source start file, name, description, authors, manual section). 150 | man_pages = [ 151 | (master_doc, 'ac_tools', u'AC_tools Documentation', 152 | [author], 1) 153 | ] 154 | 155 | 156 | # -- Options for Texinfo output ---------------------------------------------- 157 | 158 | # Grouping the document tree into Texinfo files. List of tuples 159 | # (source start file, target name, title, author, 160 | # dir menu entry, description, category) 161 | texinfo_documents = [ 162 | (master_doc, 'AC_tools', u'AC_tools Documentation', 163 | author, 'AC_tools', 'One line description of project.', 164 | 'Miscellaneous'), 165 | ] 166 | 167 | 168 | # -- Options for Epub output ------------------------------------------------- 169 | 170 | # Bibliographic Dublin Core info. 171 | epub_title = project 172 | 173 | # The unique identifier of the text. This can be a ISBN number 174 | # or the project homepage. 175 | # 176 | # epub_identifier = '' 177 | 178 | # A unique identification for the text. 179 | # 180 | # epub_uid = '' 181 | 182 | # A list of files that should not be packed into the epub file. 183 | epub_exclude_files = ['search.html'] 184 | 185 | 186 | # -- Extension configuration ------------------------------------------------- 187 | 188 | # -- Options for intersphinx extension --------------------------------------- 189 | 190 | # Example configuration for intersphinx: refer to the Python standard library. 191 | intersphinx_mapping = {'https://docs.python.org/': None} 192 | -------------------------------------------------------------------------------- /doc/index.rst: -------------------------------------------------------------------------------- 1 | .. AC_tools documentation master file, created by 2 | sphinx-quickstart on Tue Feb 26 09:51:25 2019. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to AC_tools's documentation! 7 | ==================================== 8 | 9 | .. toctree:: 10 | :maxdepth: 4 11 | :caption: Contents: 12 | 13 | 14 | 15 | Indices and tables 16 | ================== 17 | 18 | * :ref:`genindex` 19 | * :ref:`modindex` 20 | * :ref:`search` 21 | -------------------------------------------------------------------------------- /doc/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.http://sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /environment.yaml: -------------------------------------------------------------------------------- 1 | name: ac_tools_py3_7_base_TESTv2 2 | channels: 3 | - defaults 4 | dependencies: 5 | - python=3.7 6 | - cartopy 7 | - matplotlib 8 | - netcdf4 9 | - numpy 10 | - pandas 11 | - pytest 12 | - scipy 13 | - pyyaml 14 | - xarray 15 | -------------------------------------------------------------------------------- /examples/plot_global_ozone.py: -------------------------------------------------------------------------------- 1 | import AC_tools as AC 2 | 3 | # Download the example data if it is not already downloaded. 4 | from AC_tools.Scripts import get_data_files 5 | 6 | # Specify the working directory 7 | wd = "../data" 8 | 9 | # Get the GeosChem species data from the wd 10 | my_data = AC.get_GC_output(wd, species='O3') 11 | 12 | 13 | # Get a 2d slice from the 3d array 14 | my_data = my_data[:, :, 0, 0] 15 | 16 | # Turn from part per part to part per billion 17 | my_data = my_data*1E9 18 | 19 | # Create the plot 20 | AC.map_plot(my_data) 21 | 22 | # Save the plot and show it. 23 | AC.save_plot("my_plot") 24 | AC.show_plot() 25 | -------------------------------------------------------------------------------- /readthedocs.yml: -------------------------------------------------------------------------------- 1 | uild: 2 | image: latest 3 | 4 | python: 5 | version: 3.6 6 | 7 | requirements_file: doc/requirements.txt -------------------------------------------------------------------------------- /scripts/.gitignore: -------------------------------------------------------------------------------- 1 | *.dat -------------------------------------------------------------------------------- /scripts/2D_GEOSChem_slice_subregion_plotter_example.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Plotter for 2D slices of GEOS-Chem output NetCDFs files. 5 | 6 | NOTES 7 | --- 8 | - This is setup for Cly, but many other options (plot/species) are availible 9 | by just updating passed variables/plotting function called. 10 | """ 11 | 12 | import AC_tools as AC 13 | import numpy as np 14 | import matplotlib.pyplot as plt 15 | 16 | 17 | def main(): 18 | """ 19 | Basic plotter of NetCDF files using AC_tools 20 | """ 21 | # --- Local settings hardwired here... 22 | fam = 'Cly' # Family to plot 23 | # print species in family for reference... 24 | print((AC.GC_var(fam))) 25 | 26 | # --- Get working directory etc from command line (as a dictionary object) 27 | # (1st argument is fil directory with folder, 2nd is filename) 28 | Var_rc = AC.get_default_variable_dict() 29 | # Get details on extracted data (inc. resolution) 30 | Data_rc = AC.get_shared_data_as_dict(Var_rc=Var_rc) 31 | 32 | # --- extract data and units of data for family/species... 33 | arr, units = AC.fam_data_extractor(wd=Var_rc['wd'], fam=fam, 34 | res=Data_rc['res'], rtn_units=True, annual_mean=False) 35 | 36 | # --- Process data (add and extra processing of data here... ) 37 | # take average over time 38 | print((arr.shape)) 39 | arr = arr.mean(axis=-1) 40 | # Select surface values 41 | print((arr.shape)) 42 | arr = arr[..., 0] 43 | # convert to pptv 44 | arr = arr*1E12 45 | units = 'pptv' 46 | 47 | # --- Plot up data... 48 | print((arr.shape)) 49 | # - Plot a (very) simple plot ... 50 | # AC.map_plot( arr.T, res=Data_rc['res'] ) 51 | # - plot a slightly better plot... 52 | # (loads of options here - just type help(AC.plot_spatial_figure) in ipython) 53 | # set range for data... 54 | fixcb = np.array([0., 100.]) 55 | # number of ticks on colorbar (make sure the fixcb range divides by this) 56 | nticks = 6 57 | interval = (1/3.) # number of lat/lon labels... (x*15 degrees... ) 58 | # set limits of plot 59 | lat_min = 5. 60 | lat_max = 75. 61 | lon_min = -30. 62 | lon_max = 60. 63 | left_cb_pos = 0.85 # set X (fractional) position 64 | axis_titles = True # add labels for lat and lon 65 | # title for plot 66 | title = "Plot of annual average {}".format(fam) 67 | # save as pdf (just set to True) or show? 68 | # figsize = (7,5) # figsize to use? (e.g. square or rectangular plot) 69 | 70 | # call plotter... 71 | AC.plot_spatial_figure(arr, res=Data_rc['res'], units=units, fixcb=fixcb, 72 | lat_min=lat_min, lat_max=lat_max, lon_min=lon_min, lon_max=lon_max, 73 | axis_titles=axis_titles, left_cb_pos=left_cb_pos, 74 | nticks=nticks, interval=interval, title=title, show=False) 75 | 76 | # are the spacings right? - if not just up 77 | bottom = 0.1 78 | top = 0.9 79 | left = 0.1 80 | right = 0.9 81 | fig = plt.gcf() 82 | fig.subplots_adjust(bottom=bottom, top=top, left=left, right=right) 83 | 84 | # show and save as PDF? 85 | plt.savefig('pete_plot.png') 86 | AC.show_plot() 87 | 88 | 89 | if __name__ == "__main__": 90 | main() 91 | -------------------------------------------------------------------------------- /scripts/AC_tools: -------------------------------------------------------------------------------- 1 | ../../AC_tools -------------------------------------------------------------------------------- /scripts/Basic_GEOSChem_bpch_plotter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # modules 3 | import AC_tools as AC 4 | import numpy as np 5 | import sys 6 | import matplotlib.pyplot as plt 7 | 8 | # Setup, choose species 9 | species = 'O3' # 'CO2' 10 | RMM_species = 16.*3. 11 | res = '4x5' # ( e.g. '4x5', '2x2.5', '0.5x0.666', '0.25x0.3125' ) 12 | unit, scale = AC.tra_unit(species, scale=True) 13 | 14 | # debug/print verbose output? 15 | debug = True 16 | 17 | # Only consider GEOS-Chem chemical troposphere 18 | trop_limit = True 19 | calc_burden = False # True 20 | 21 | try: # chcck if a directory was given ad command line 22 | wd = sys.argv[1] 23 | except: # Otherwise use path below 24 | wd = '' 25 | 26 | # get data as 4D array ( lon, lat, alt, time ) 27 | mixing_ratio = AC.get_GC_output(wd, species=species, category='IJ-AVG-$', 28 | trop_limit=trop_limit) 29 | print((mixing_ratio.shape)) 30 | 31 | # Get data to calculate burden 32 | if calc_burden: 33 | 34 | # Get air mass as numpy array 35 | air_mass = AC.get_GC_output(wd, vars=['BXHGHT_S__AD'], 36 | trop_limit=trop_limit) 37 | # get time in troposphere as fraction from ctm.bpchh diagnostics 38 | time_in_trop = AC.get_GC_output(wd, vars=['TIME_TPS__TIMETROP'], 39 | trop_limit=trop_limit) 40 | # print shapes of array to screen 41 | print([i.shape for i in (mixing_ratio, air_mass, time_in_trop)]) 42 | 43 | # calc the total & mean mass of a speices - select data you want to calc & print 44 | # mass (convert to g) / RMM air 45 | air_moles = (air_mass * 1E3) / (.78*(2.*14.)+.22*(2.*16.)) 46 | 47 | # moles * v/v 48 | species_moles = air_moles * mixing_ratio / scale 49 | 50 | # convert to mass 51 | species_mass = species_moles * RMM_species 52 | print((species_mass.shape)) 53 | 54 | # Get Global Burden, converting to Tg and removing stratosphere 55 | print(( 56 | np.sum(np.mean(species_mass[:, :, :38, :]*time_in_trop, axis=3)) / 1E12)) 57 | 58 | # select data you want to plot - set as mean() of time, but simply remove 'mean(axis=3)' & give number to select desired timestamp 59 | mixing_ratio = mixing_ratio[:, :, :, :].mean(axis=3) 60 | 61 | # select surface layer 62 | print((mixing_ratio.shape)) 63 | mixing_ratio = mixing_ratio[:, :, 0] 64 | print((mixing_ratio.shape)) 65 | 66 | # Transpose to X vs. Y coordinate 67 | mixing_ratio = np.transpose(mixing_ratio) 68 | print((mixing_ratio.shape, res)) 69 | 70 | # plot surface 71 | #plt, cb = AC.basic_map_plot( mixing_ratio, species, unit, res=res, debug=debug ) 72 | AC.map_plot(mixing_ratio, title=species, units=unit, 73 | res=res, wd=wd, grid='off', debug=debug) 74 | 75 | # Show plot 76 | plt.show() 77 | -------------------------------------------------------------------------------- /scripts/Basic_planeflight_plotter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # ------------ Planeflight Plotter - tms ------------------------------------- 3 | # -------------- 4 | import glob 5 | import sys 6 | import matplotlib.pyplot as plt 7 | import AC_tools as AC 8 | 9 | # --------- SET PLOTTING HERE ---------------------------------------- 10 | # ------------- 11 | # Plot out data? 12 | # debug=True 13 | 14 | # Where are the model files? And What Species do you want to plot? 15 | # ---- Get inputs from command line/defaults 16 | try: # chcck if a directory was given at command line 17 | wd = sys.argv[1] 18 | except: # Otherwise use path below 19 | wd = '' 20 | 21 | # Which species to plot? (must be in list form) 22 | try: # chcck if a directory was given at command line 23 | species_to_plot = sys.argv[2] 24 | except: # Otherwise use path below 25 | species_to_plot = 'Cl2' # 'O3'#'ClNO2'#'Cl' 26 | 27 | # What years, months, days to plot? (must be in list form) 28 | # set day_to_use by adjusting range 29 | years_to_use, months_to_use = ['2005'], ['05', '06', '07'] 30 | days_to_use = ["{0:0>2}".format(i) for i in range(1, 31, 1)] 31 | print((years_to_use, months_to_use, days_to_use)) 32 | 33 | # Which locations? (where 10 = 1000 Hpa, 08 = 800 Hpa etc ... ) (must be in list form) 34 | # e.g. locations=['TX1','LA1'] # must be as a list of strings 35 | # must be as a list of strings 36 | # locations=['WE'] 37 | locations = ['WEY'] 38 | # locations=['CVO'] 39 | # locations=['BEI'] 40 | print(locations) 41 | 42 | # Scaling (e.g. pptv or ppbv ) 43 | units, scale = 'p.p.t.v.', 1E12 44 | #units, scale = 'p.p.b.v.', 1E9 45 | #units, scale = 's$^{-1}$', 1 46 | 47 | # Model version 48 | ver = '3.0' 49 | 50 | # look in the "plane_flight_logs" directory 51 | wd = wd + '/plane_flight_logs/plane.log.*' 52 | #wd = wd+ '/plane.log.*' 53 | print(wd) 54 | print((sorted(glob.glob(wd)))) 55 | 56 | # Asectics 57 | fontsize = 10 58 | 59 | # ----------- START PLOTTING HERE ---------------------------------------- 60 | # ------------- 61 | 62 | # Get species name in TRA_?? (planefligth output) form 63 | # if 'TRA' in species_to_plot: 64 | species_to_plot = [AC.what_species_am_i(species_to_plot, ver=ver, invert=True)] 65 | # else: 66 | # species_to_plot=[species_to_plot] 67 | 68 | # setup figure 69 | fig = plt.figure(figsize=(15, 6), dpi=80, facecolor='w', edgecolor='k') 70 | 71 | # Loop sites in site list and plot species 72 | for i, site in enumerate(locations): 73 | 74 | # extract data from planeflight (csv) files 75 | model, names = AC.readfile(sorted(glob.glob(wd)), site, 76 | years_to_use, months_to_use, days_to_use) 77 | 78 | # get species index in list 79 | k = names.index(species_to_plot[0]) 80 | 81 | # plot up extracted data 82 | plt.plot(AC.year_to_since_2006(model), model[:, k]*scale, color=plt.cm.jet(1.*i/len(locations)), 83 | label='{0}'.format(locations[i])) 84 | 85 | # Beatify plot 86 | plt.xlabel('(CVAO days)', fontsize=fontsize) 87 | plt.ylabel('({})'.format(units), fontsize=fontsize) 88 | plt.legend(loc='upper right', fontsize=fontsize) 89 | plt.grid(b=None, which='major', axis='both', alpha=0.3) 90 | plt.rcParams.update({'font.size': fontsize}) 91 | 92 | # Show plt 93 | plt.show() 94 | -------------------------------------------------------------------------------- /scripts/KPP_analyse_vertical_Ox_loss_by_route_BPCH.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Plot Vertical loss of Ox by family. Function can also print budget to csv. 5 | 6 | This is an example script to use AC_tools KPP mechanism parsing/tagging functions. The 7 | 8 | python AC_tools/Scripts/analyse_vertical_Ox_loss_by_route_in_KPP_mech.py 9 | 10 | """ 11 | import AC_tools as AC 12 | import numpy as np 13 | import sys 14 | import matplotlib.pyplot as plt 15 | import pandas as pd 16 | from netCDF4 import Dataset 17 | import os 18 | 19 | 20 | def main(wd=None, CODE_wd=None): 21 | """ 22 | Driver for Ox loss analysis via KPP in GEOS-Chem from bpch output 23 | 24 | Notes 25 | ----- 26 | - comment/uncomment functions as required 27 | """ 28 | # Manually let locations of Ox loss here 29 | root = '/users/ts551/scratch/GC/' 30 | CODE_wd = root+'/Code/Code.v11-02_Cl_v3_0/' 31 | wd = root+'rundirs/GC_v11_2d_plus_Clv3/geosfp_4x5_tropchem_Cl.v3_0.1year.2016.tagged/' 32 | Mechanism = 'Tropchem' 33 | # Get all the necessary data as as a dictionary object 34 | Ox_fam_dict = AC.get_Ox_fam_dicts_BPCH(wd=wd, CODE_wd=CODE_wd, 35 | weight_by_molecs=True, 36 | Mechanism=Mechanism) 37 | # Plot vertical odd oxygen (Ox) loss via route (chemical family) 38 | Data_rc = Ox_fam_dict['Data_rc'] 39 | alt_array = Data_rc['alt'] 40 | AC.plot_vertical_fam_loss_by_route(Ox_fam_dict=Ox_fam_dict, 41 | Mechanism=Mechanism, 42 | alt_array=alt_array) 43 | 44 | # Get all the necessary data as as a dictionary object 45 | # (Not weighted by molecules) 46 | Ox_fam_dict = AC.get_Ox_fam_dicts_BPCH(wd=wd, CODE_wd=CODE_wd, 47 | weight_by_molecs=False, 48 | rm_strat=True, 49 | Mechanism=Mechanism) 50 | # Analyse odd oxygen (Ox) loss budget via route (chemical family) 51 | AC.calc_fam_loss_by_route(Ox_fam_dict=Ox_fam_dict, Mechanism=Mechanism) 52 | 53 | 54 | if __name__ == "__main__": 55 | main() 56 | -------------------------------------------------------------------------------- /scripts/KPP_analyse_vertical_Ox_loss_by_route_NetCDF.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Plot Vertical loss of Ox by family. Function can also print budget to csv. 5 | 6 | This is an example script to use AC_tools KPP mechanism parsing/tagging functions. The 7 | 8 | python AC_tools/Scripts/analyse_vertical_Ox_loss_by_route_in_KPP_mech.py 9 | 10 | """ 11 | import AC_tools as AC 12 | import numpy as np 13 | import sys 14 | import matplotlib.pyplot as plt 15 | import pandas as pd 16 | from netCDF4 import Dataset 17 | import os 18 | 19 | 20 | def main(wd=None, CODE_wd=None, verbose=False, debug=False): 21 | """ 22 | Driver for Ox loss analysis via KPP in GEOS-Chem from NetCDF output 23 | 24 | Notes 25 | ----- 26 | - comment/uncomment functions as required 27 | """ 28 | # - Local variables 29 | fam = 'LOx' 30 | ref_spec = 'O3' 31 | # Manually set locations of model output with the Ox loss diagnostic 32 | root = '/users/ts551/scratch/GC/' 33 | CODE_wd = root + '/Code/Code.BleedingEdge/' 34 | wd = root + '/rundirs/' 35 | wd += 'merra2_4x5_standard.v12.9.1.BASE.Oi.MacDonald2014.tagged/' 36 | wd += '/OutputDir/' 37 | # Mechanism = 'Tropchem' 38 | Mechanism = 'Standard' 39 | # Get locations of model output/core 40 | assert os.path.exists(wd), 'working directory not found @: {}'.format(wd) 41 | CODE_wd = '/{}/KPP/{}/'.format(CODE_wd, Mechanism) 42 | assert os.path.exists(CODE_wd), 'code directory not found @: ' + CODE_wd 43 | 44 | # - Model output from tagged mechanism run 45 | 46 | # Get all the necessary data as as a dictionary object? 47 | # NOTE: this is a previous approach using dictionaries 48 | 49 | # Get a dictionary of information about the model run 50 | # NOTE: Should this approach be retired? Yes. 51 | # full_vert_grid = True 52 | # VarDict = AC.get_default_variable_dict(full_vert_grid=full_vert_grid, 53 | # wd=wd) 54 | # Now Get the StateMet object... for time in troposphere diagnostic 55 | StateMet = AC.get_StateMet_ds(wd=wd) 56 | 57 | # - KPP mechanism 58 | # - Analyse the Ox loss budget's numerical terms 59 | # Get the dictionary of the KPP mechanism. 60 | Ox_fam_dict = AC.get_Ox_fam_dicts(fam=fam, ref_spec=ref_spec, 61 | Mechanism=Mechanism, 62 | # tag_prefix=tag_prefix, 63 | wd=wd, CODE_wd=CODE_wd, 64 | StateMet=StateMet, 65 | rm_strat=True, 66 | weight_by_molecs=True, 67 | ) 68 | 69 | LatLonAlt_dict = AC.gchemgrid(rtn_dict=True) 70 | alt_array = LatLonAlt_dict['c_km_geos5'] 71 | # Plot vertical odd oxygen (Ox) loss via route (chemical family) 72 | suffix = 'v12.9.1_.png' 73 | AC.plot_vertical_fam_loss_by_route(Ox_fam_dict=Ox_fam_dict, 74 | alt_array=alt_array, 75 | Mechanism=Mechanism, 76 | suffix=suffix) 77 | 78 | # - Analyse the Ox loss budget's numerical terms 79 | # Get the dictionary of the KPP mechanism. 80 | Ox_fam_dict = AC.get_Ox_fam_dicts(fam=fam, ref_spec=ref_spec, 81 | Mechanism=Mechanism, 82 | # tag_prefix=tag_prefix, 83 | wd=wd, CODE_wd=CODE_wd, 84 | StateMet=StateMet, 85 | rm_strat=True, 86 | weight_by_molecs=False, 87 | ) 88 | 89 | # Analyse odd oxygen (Ox) loss budget via route (chemical family) 90 | suffix = 'v12.9.1' 91 | df = AC.calc_fam_loss_by_route(Ox_fam_dict=Ox_fam_dict, 92 | Mechanism=Mechanism, 93 | suffix=suffix) 94 | -------------------------------------------------------------------------------- /scripts/KPP_mk_tagged_GC_Mechanism.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Automatically tag reactions in a GEOS-Chem (KPP) mechanism for a family/string 5 | 6 | Notes 7 | ------- 8 | - This script using AC_tools contains functions to extract data from KPP mechanism files. This utilizes the fact that: 9 | - KPP mechanisms are constructed and information can be extracted from both the produced files (e.g. gckpp_Monitor.F90) and those the mechanism is constructed from (e.g. *.eqn) 10 | - When using P/L tagging, the output P/L information (e.g. rxns in family and their stoichiometry in gckpp_Monitor.F90) can be used to add more specific tags/for post-processing 11 | 12 | """ 13 | import numpy as np 14 | import pandas as pd 15 | import xarray as xr 16 | from netCDF4 import Dataset 17 | import AC_tools as AC 18 | import matplotlib.pyplot as plt 19 | import datetime as datetime 20 | import glob 21 | import re 22 | 23 | 24 | def main(folder=None, print_formatted_KPP_file=True, GC_version=None, 25 | verbose=True, mechanism='Standard', debug=False): 26 | """ 27 | Parse combined KPP file (>= v11-2d) to dictionary of pd.DataFrames, then 28 | print these into files than can be pasted into a GEOS-Chem KPP mechanism 29 | 30 | Parameters 31 | ------- 32 | print_formatted_KPP_file (bool): Save the uniformly formated .eqn file 33 | folder (str): folder of GEOS-Chem code directory 34 | mechanism (str): KPP mechanism to create tag files for (e.g. Standard) 35 | GC_version (str): version of GEOS-Chem (e.g. v11-2) 36 | debug (bool): print out extra information for debugging 37 | verbose (bool): print out extra information during processing 38 | 39 | Returns 40 | ------- 41 | (None) 42 | 43 | Notes 44 | ----- 45 | - The script below assumes this workflow: 46 | (1a) Make sure formatting is uniform in the *.eqn file (if not do step 1b) 47 | ( check this by seeing if diff between EXISTING_MECH_???.eqn current .eqn) 48 | (1b) Over write the .eqn in the mechanism directory with the new .eqn file 49 | (2) Compile the via build_mechanism.sh script (or "kpp gckpp.kpp") 50 | (3) If no compile issues, then tag the mechansim 51 | (4) Update the gckpp.kpp file to include the names of the new P/L tags 52 | ( lines outputted in 'gckpp.kpp_extra_lines_for_tagged_mech_?' file ) 53 | (5) Run "make realclean" in the code directory and compile 54 | (7) Run the model with the compilie executable inc. P/L tags 55 | (6) P/L ouput can then be analysed by standard approaches 56 | ( AC_tools contains some functions that can automate this too ) 57 | - Please see the GEOS-Chem wiki for more info. on Prod-loss tagging 58 | http://wiki.seas.harvard.edu/geos-chem/index.php/FlexChem 59 | - Please note that you may need to update "MAX_FAMILIES" in kpp (as on the wiki) 60 | """ 61 | import re 62 | # - Local settings 63 | # GEOS-chem version? 64 | if isinstance(GC_version, type(None)): 65 | # TODO - get this online from log files 66 | # (already implemented in AC_Tools) 67 | # GC_version = 'v11-2' 68 | GC_version = 'v12.9.1' 69 | # Add mechanism name to string (and KPP folder) if not in folder string 70 | if not all([i in folder for i in ['KPP', mechanism]]): 71 | folder += 'KPP/{}/'.format(mechanism) 72 | 73 | # - Process input files 74 | # Create a dictionary to store various (e.g. gas-phase, Heterogeneous) 75 | KPP_dicts = {} 76 | # Get the mechanism KPP file 77 | if debug: 78 | print(folder, glob.glob(folder+'/*.eqn')) 79 | filename = glob.glob(folder+'/*.eqn')[0].split('/')[-1] 80 | # Get header lines from *.eqn file 81 | headers = AC.KPP_eqn_file_headers(folder=folder, filename=filename) 82 | # Get species and details on species as a DataFrame 83 | species_df = AC.KPP_eqn_file_species(folder=folder, filename=filename) 84 | # Get dictionaries of all reactions 85 | rxn_dicts = AC.get_dicts_of_KPP_eqn_file_reactions(folder=folder, 86 | filename=filename) 87 | # Process rxns to be in dictionaries of DataFrames 88 | # (with extra diagnostic columns, inc. reactants, products, metadata,...) 89 | rxn_dicts = AC.process_KPP_rxn_dicts2dfs(rxn_dicts=rxn_dicts) 90 | # Update the numbering of DataFrame indexes... 91 | Gas_dict = rxn_dicts['Gas-phase'] 92 | Het_dict = rxn_dicts['Heterogeneous'] 93 | Hv_dict = rxn_dicts['Photolysis'] 94 | # Update index start points 95 | Het_dict.index = Het_dict.index + Gas_dict.shape[0] 96 | Hv_dict.index = Hv_dict.index + Gas_dict.shape[0] + Het_dict.shape[0] 97 | rxn_dicts['Heterogeneous'] = Het_dict 98 | rxn_dicts['Photolysis'] = Hv_dict 99 | 100 | # - Print out input KPP files with updated formatting (prior to tagging) 101 | # (Uniform formatting required for parsing - this step may not be required) 102 | if print_formatted_KPP_file: 103 | extr_str = 'EXISTING_MECH_{}_{}'.format(mechanism, GC_version) 104 | AC.print_out_dfs2KPP_eqn_file(headers=headers, species_df=species_df, 105 | rxn_dicts=rxn_dicts, extr_str=extr_str, 106 | ) 107 | 108 | # - Get outputted KPP files and process these... 109 | # Get outputted KPP mechanism 110 | KPP_mech = AC.get_dict_of_KPP_mech(wd=folder, GC_version=GC_version) 111 | 112 | # ---------------------- Tagging of KPP Mechanism 113 | # Initialise dictionary to store tags used for reactions 114 | tagged_rxns = {} 115 | current_tag = 'T000' 116 | tag_prefix = 'T' 117 | AttrsVar = 'search_str' 118 | # Use a counter to track number of reactions tagged (NOTE: not tags) 119 | counter = 0 120 | 121 | # - Tag LOx reactions 122 | # Get tagged LOx family reactions (as a dictionary) 123 | fam = 'LOx' 124 | df_fam = AC.get_reactants_and_products4tagged_fam(folder=folder, 125 | KPP_output_mech=KPP_mech, 126 | fam=fam) 127 | # Loop reaction indexes for LOx family and add tags 128 | for n_key_, key_ in enumerate(rxn_dicts.keys()): 129 | df_tmp = rxn_dicts[key_].copy() 130 | # Get indices of rxnes in tagged family 131 | rxns_in_mech = sorted(df_tmp[df_tmp.index.isin(df_fam.index)].index) 132 | print(len(rxns_in_mech)) 133 | for idx in rxns_in_mech: 134 | # Update the counter (NOTE: counter starts from 1) 135 | counter += 1 136 | # retrive the reaction string 137 | rxn_str = df_tmp.loc[idx]['rxn_str'] 138 | # Get a new tag and add to the reaction string 139 | current_tag = AC.get_next_KPP_PL_tag(current_tag, 140 | tag_prefix=tag_prefix) 141 | rxn_str += ' + ' + current_tag 142 | df_tmp.loc[idx, 'rxn_str'] = rxn_str 143 | # For # rxn tagged - save the reaction, its tag and its family 144 | tmp_dict = {'tag': current_tag, AttrsVar: fam, 'rxn_str': rxn_str} 145 | tagged_rxns[counter] = tmp_dict 146 | # Now update the DataFrame in the rxn_dicts dictionary 147 | rxn_dicts[key_] = df_tmp 148 | 149 | # - Add tags for other species too? 150 | # Number of reactions already tagged? 151 | counter = max(tagged_rxns.keys()) 152 | current_tag = '{}{}'.format(tag_prefix, counter) 153 | search_strs = 'BrSAL', 'CH3Br', 'CH3Cl', 'CH2Cl2', 'CHCl3', '0.150IBr', 154 | search_strs += 'HOBr', 'ClNO2', 155 | # Add tags for halogen families 156 | rxn_dicts, tagged_rxns = AC.add_tags4strs2mech(rxn_dicts, counter=counter, 157 | search_strs=search_strs, 158 | tagged_rxns=tagged_rxns, 159 | # debug=debug 160 | ) 161 | counter = max(tagged_rxns.keys()) 162 | current_tag = '{}{}'.format(tag_prefix, counter) 163 | 164 | # -------------------------------- 165 | # --- A couple of additional examples of functionality are pasted below 166 | # - Add tags for when a species is a reactant 167 | # Number of reactions already tagged? 168 | # counter = max(tagged_rxns.keys()) 169 | # Add tags for OH reactivity 170 | # search_strs = [ 'OH ', ] 171 | # rxn_dicts, tagged_rxns = AC.add_tags4strs2mech(rxn_dicts, 172 | # counter=counter, 173 | # search_reactants=True, 174 | # search_strs=search_strs, 175 | # tagged_rxns=tagged_rxns, ) 176 | # counter = max(tagged_rxns.keys()) 177 | 178 | # - Add tags for when a species is a product 179 | # Number of reactions already tagged? 180 | # counter = max(tagged_rxns.keys()) 181 | # Add tags for OH reactivity 182 | # search_strs = [ 'ACET ', ] 183 | # rxn_dicts, tagged_rxns = AC.add_tags4strs2mech(rxn_dicts, 184 | # counter=counter, 185 | # search_products=True, 186 | # search_strs=search_strs, 187 | # tagged_rxns=tagged_rxns, ) 188 | # counter = max(tagged_rxns.keys()) 189 | # -------------------------------- 190 | 191 | counter = max(tagged_rxns.keys()) 192 | current_tag = '{}{}'.format(tag_prefix, counter) 193 | search_strs = [ 194 | 'CH4', 'C2H6', 'C3H8', 'ACET', 'MOH', 'PRPE', 'ALK4', 'CH2O', 'CH2O', 195 | 'ALD2', 'ISOP' 196 | ] 197 | search_strs = [i+' ' for i in search_strs] 198 | # Add tags for halogen families 199 | rxn_dicts, tagged_rxns = AC.add_tags4strs2mech(rxn_dicts, counter=counter, 200 | search_strs=search_strs, 201 | tagged_rxns=tagged_rxns, 202 | search_reactants=True, ) 203 | counter = max(tagged_rxns.keys()) 204 | current_tag = '{}{}'.format(tag_prefix, counter) 205 | 206 | # - Add the species to the species_df 207 | # Number of reactions tagged 208 | alltags = [tagged_rxns[i]['tag'] for i in list(tagged_rxns.keys())] 209 | tags = list(sorted(set(alltags))) 210 | ptr_str = '# of rxns tagged = {} (of which unique = {})' 211 | if verbose: 212 | print(ptr_str.format(len(alltags), len(tags))) 213 | # Make a DataFrame from the dictionary of *all* tagged rxns 214 | df_tags = pd.DataFrame(tagged_rxns).T 215 | # Now make a DataFrame with details per tag (listing fams for tags) 216 | df_spec_tmp = pd.DataFrame([[False]]*len(tags), columns=['inactive']) 217 | df_spec_tmp.index = tags 218 | for tag in df_spec_tmp.index: 219 | infams = df_tags.loc[df_tags['tag'] == tag][AttrsVar].values.tolist() 220 | Description = 'Prod. tag description: {}'.format(', '.join(infams)) 221 | print(tag, infams, Description) 222 | df_spec_tmp.loc[df_spec_tmp.index == tag, 'Description'] = Description 223 | df_spec_tmp.sort_values('Description') 224 | # Add to existing DataFrame 225 | species_df = pd.concat([species_df, df_spec_tmp]) 226 | 227 | # - Print out updated KPP .eqn file (with tags) 228 | extr_str = 'TAGGED_MECH_{}_{}'.format(mechanism, GC_version) 229 | AC.print_out_dfs2KPP_eqn_file(headers=headers, species_df=species_df, 230 | rxn_dicts=rxn_dicts, 231 | extr_str=extr_str) 232 | 233 | # - Save out the tags and the reactions tagged 234 | # (to use for post-processing of tagged output) 235 | extr_str = '{}_{}'.format(mechanism, GC_version) 236 | savetitle = 'Tagged_reactions_in_{}.csv'.format(extr_str) 237 | df_tags.to_csv(savetitle) 238 | 239 | # - Save out lines that need to be added to the gckpp.kpp file 240 | AC.print_out_lines_for_gckpp_file(tags=tags, extr_str=extr_str) 241 | 242 | # - Save out the lines to be pasted into the GC species database yaml file 243 | range = np.arange(1, int(current_tag[1:])) 244 | tags = ['P{}{:0>3}'.format(tag_prefix, i) for i in range] 245 | tags += ['{}{:0>3}'.format(tag_prefix, i) for i in range] 246 | AC.prt_lines4species_database_yml(tags, extr_str=extr_str) 247 | 248 | 249 | if __name__ == "__main__": 250 | main() 251 | -------------------------------------------------------------------------------- /scripts/SMVGEAR_process_prod_loss_tags.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import sys 3 | import AC_tools as AC 4 | """ 5 | Example processing of SMVGEAR prod/loss tags for GEOS-Chem diganotic (ND65). A variety 6 | of functions for working with KPP/SMVGEAR tags are in AC_tools 7 | (funcs4GESOSC/variables). 8 | 9 | NOTES 10 | --- 11 | - This code is for working with smvgear diagnostics. for KPP P/L see 12 | Tagged_GC_KPP_Mechanism4family.py 13 | - details on the GEOS-Chem diagnostic are in the GEOS-Chem manual 14 | (http://acmg.seas.harvard.edu/geos/doc/man/chapter_13.html) 15 | """ 16 | 17 | # --- Master debug setting 18 | DEBUG = True 19 | 20 | 21 | def main(trop_limit=True, res='4x5', debug=False): 22 | """ 23 | Get prod loss output for a family and print this to screen 24 | """ 25 | # --- Get family from Command line (and other vars) 26 | wd = sys.argv[1] 27 | spec = sys.argv[2] 28 | # version? 29 | ver = AC.iGEOSChem_ver(wd) 30 | 31 | # --- Get all tags for this family (through dictionary route) 32 | # ( e.g. 'PIOx', 'LIOx', 'P_Iy', 'L_Iy' ) 33 | nums, rxns, tags, Coe = AC.prod_loss_4_spec(wd, spec, ver=ver) 34 | # beatify reaction strings 35 | rxnstr_l = [''.join(i[4:]) for i in rxns] 36 | # one consider one tag per reaction and tagged reactions 37 | try: 38 | tags = [i[0] for i in tags] # just consider first tag 39 | except: 40 | print('WARNING! - attempting to process just tagged reactions') 41 | detail_zip = list(zip(rxnstr_l, list(zip(nums, tags)))) 42 | untagged = [n for n, i in enumerate(tags) if (len(i) < 1)] 43 | print(('Untagged reactions: ', [detail_zip[i] for i in untagged])) 44 | tags = [i for n, i in enumerate(tags) if (n not in untagged)] 45 | tags = [i[0] for i in tags] # just consider first tag 46 | # tags.pop( tags.index('LR71') ) # rm tag for ClOO loss... 47 | 48 | # --- Extract prod loss for these tracers 49 | # get prod loss IDs 50 | PDs = [AC.PLO3_to_PD(i, ver=ver, wd=wd, fp=True) for i in tags] 51 | # extract en mass 52 | fam_loss = AC.get_GC_output(wd, vars=['PORL_L_S__'+i for i in PDs], 53 | trop_limit=trop_limit, r_list=True) 54 | # print [ ( i.shape, i.sum() ) for i in fam_loss ] 55 | # Get reference species for family ( e.g. so output is in X g of Y ) 56 | ref_spec = AC.get_ref_spec(spec) 57 | # get shared variable arrrays 58 | s_area = get_surface_area(res=res)[..., 0] # m2 land map 59 | # convert to mass terms ( in g X ) 60 | fam_loss = convert_molec_cm3_s_2_g_X_s_BPCH(ars=fam_loss, 61 | ref_spec=ref_spec, wd=wd, conbine_ars=False, 62 | rm_strat=True, month_eq=True) 63 | print([i.shape for i in fam_loss]) 64 | 65 | # sum and convert to Gg 66 | p_l = [i.sum() / 1E9 for i in fam_loss] 67 | 68 | # --- print output as: reaction, magnitude, percent of family 69 | pcent = [np.sum(i)/np.sum(p_l)*100 for i in p_l] 70 | d = dict(list(zip(tags, list(zip(rxnstr_l, p_l, pcent))))) 71 | df = pd.DataFrame(d).T 72 | df.columns = ['rxn', 'Gg X', '% of total'] 73 | # sort 74 | df = df.sort_values(['% of total'], ascending=False) 75 | print(df) 76 | 77 | 78 | if __name__ == "__main__": 79 | main(debug=DEBUG) 80 | -------------------------------------------------------------------------------- /scripts/__init__.py: -------------------------------------------------------------------------------- 1 | from . import get_data_files 2 | from . import pf2NetCDF 3 | from . import bpch2netCDF 4 | from . import Tagged_GC_KPP_Mechanism4family 5 | -------------------------------------------------------------------------------- /scripts/animate_array.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | from matplotlib import animation 4 | import matplotlib.pyplot as plt 5 | import time 6 | import numpy as np 7 | import gc 8 | import datetime 9 | import AC_tools as AC 10 | """ 11 | Animate a NetCDF array to give a video of 2D (surface) 12 | """ 13 | 14 | # --- Settings for calling main as via scripting 15 | specs = ['O3', 'NO2', 'PAN', 'ALD2'] 16 | pcent = True 17 | ClearFlo_unit = True 18 | # verbose and debug settings for script main call 19 | debug = True 20 | 21 | 22 | def main(spec='NO', pcent=False, fixcb=None, limit_by_dates=False, 23 | extend='neither', ClearFlo_unit=False, verbose=True, debug=False): 24 | """ 25 | Extract data array from given location and make Animation 26 | """ 27 | 28 | # Get data in array ( time, lat, lon ) and dates (datetime.datetime ) 29 | arr, dates = get_data_dates(spec=spec, limit_by_dates=limit_by_dates, 30 | debug=debug) 31 | if debug: 32 | print([(i[:5], i.shape) for i in (arr, dates)]) 33 | 34 | # Get titles and other varibles for run (e.g. res, lons and lats for GC config. ) 35 | lat, lon, units, fname, res, title, scale = get_run_info(spec=spec, 36 | pcent=pcent, ClearFlo_unit=ClearFlo_unit) 37 | arr = arr*scale 38 | 39 | # Setup figure and axis 40 | fig, ax = setup_figure_and_axis() 41 | 42 | # Set manual limit colormap ( colorbar )? 43 | # fixcb = np.array([ 0, 100]) 44 | # extend ='both' 45 | # extend ='max' 46 | 47 | # Setup first frame for re-use ( inc. basemap ) and get key variables 48 | cmap, specplt, lvls, cnorm, m, fixcb, fixcb_buffered = setup_plot2animate( 49 | arr, fig=fig, ax=ax, lat=lat, lon=lon, 50 | units=units, res=res, fixcb=fixcb, debug=debug) 51 | 52 | # setup figure ascetics 53 | setup_figure_ascetics(dates, cmap=cmap, cnorm=cnorm, 54 | units=units, fig=fig, title=title, extend=extend, 55 | arr=arr, fixcb=fixcb, fixcb_buffered=fixcb_buffered, debug=debug) 56 | 57 | # animate the array and save as 58 | animate_array(arr, dates, specplt, lvls=lvls, cnorm=cnorm, 59 | cmap=cmap, debug=debug, fig=fig, m=m, lon=lon, lat=lat, 60 | spec=spec, fname=fname) 61 | 62 | 63 | def extract_data_dates(spec='O3', file=None, dates_variable='time', 64 | fill_invalid_with_mean=True, limit_by_dates=False, 65 | sdate=datetime.datetime(2005, 0o1, 0o1), ver='1.7', 66 | edate=datetime.datetime(2005, 0o1, 0o7), debug=False): 67 | """ Extracts dates and data from a given location """ 68 | 69 | from pandas import DataFrame 70 | import numpy as np 71 | from netCDF4 import Dataset 72 | import datetime 73 | 74 | # # <= Kludge: convert to tracer name used in NetCDF for extraction 75 | from AC.variables import what_species_am_i 76 | pspec = what_species_am_i(input=spec, ver=ver, invert=True, debug=debug) 77 | 78 | with Dataset(file, 'r') as rootgrp: 79 | 80 | if debug: 81 | print([i for i in rootgrp.variables]) 82 | 83 | # Return data as an array 84 | # arr = np.ma.array( rootgrp.variables[ pspec ] ) 85 | arr = np.array(rootgrp.variables[pspec]) 86 | print((rootgrp.variables[pspec])) 87 | print((np.array(rootgrp.variables[pspec]))) 88 | 89 | # get dates 90 | dates = np.ma.array(rootgrp.variables[dates_variable]) 91 | 92 | if debug: 93 | print([(type(i), i.shape) for i in (arr, dates)]) 94 | print([(i.min(), i.max(), i.mean()) for i in (arr, dates)]) 95 | 96 | # Mask with invalid values ( fill values in array) 97 | arr = np.ma.masked_invalid(arr) 98 | if debug: 99 | print([(i.min(), i.max(), i.mean()) for i in [arr]]) 100 | 101 | # Make sure dates are as datetime and in a numpy array 102 | dates = [datetime.datetime.fromtimestamp(i) for i in dates] 103 | dates = np.array(dates) 104 | 105 | if debug: 106 | print([i.shape for i in (arr, dates)]) 107 | 108 | print((edate, sdate, dates[0])) 109 | print([type(i) for i in (edate, sdate, dates[0])]) 110 | 111 | print((dates > sdate)) 112 | print((dates < edate)) 113 | 114 | # Limit to given dates ( e.g. 1st month 2005) 115 | if limit_by_dates: 116 | dates = dates[np.where(dates >= sdate)] 117 | print([i.shape for i in (arr, dates)]) 118 | dates = dates[np.where(dates < edate)] 119 | print([i.shape for i in (arr, dates)]) 120 | # Kludge, remove 1st dimension added by method. <= improve this. 121 | arr = arr[np.where(dates >= sdate), ...][0, ...] 122 | print([i.shape for i in (arr, dates)]) 123 | arr = arr[np.where(dates < edate), ...][0, ...] 124 | print([i.shape for i in (arr, dates)]) 125 | 126 | return arr, dates 127 | 128 | 129 | def get_data_dates(spec='O3', dates_variable='time', 130 | fill_invalid_with_mean=True, limit_by_dates=False, 131 | sdate=datetime.datetime(2005, 0o1, 0o1), pcent=False, 132 | edate=datetime.datetime(2005, 0o1, 0o7), debug=False): 133 | """ Set dirs/files to use. 134 | Extracts dates and data from a given location 135 | NOTE: 136 | (A) If more than 1 file given then a difference, in actual or 137 | percent (if pcent==True) terms is returned 138 | 139 | """ 140 | import numpy as np 141 | from AC.core import get_dir 142 | 143 | # Set Directory ( comment out if using different earth0 user ) 144 | wd = get_dir('npwd') 145 | 146 | # Set files to use 147 | files = [ 148 | 'pf_iGEOSChem_1.7_v10_G5_EU.EOH_run.x50SUMMER_ClearFlo.EMEPx4.RETROx4.no.EOH.emiss.OH_output_3D.nc', 149 | 'pf_iGEOSChem_1.7_v10_G5_EU.EOH_run.x50SUMMER_ClearFlo.EMEPx4.RETROx4.OH_output.ALD2_3D.nc' 150 | ] 151 | files = [wd+i for i in files] 152 | print(files) 153 | 154 | # Extract data, by looping file list 155 | data_l, dates_l = [], [] 156 | for file in files: 157 | data_, dates_ = extract_data_dates(spec=spec, file=file, 158 | dates_variable=dates_variable, limit_by_dates=limit_by_dates, 159 | fill_invalid_with_mean=fill_invalid_with_mean, debug=debug) 160 | dates_l += [dates_] 161 | data_l += [data_] 162 | 163 | # If more than one data set - get difference 164 | if len(files) > 1: 165 | # Get change in concentration 166 | # get % change in conc 167 | if pcent: 168 | arr = (data_l[1]-data_l[0]) / data_l[0] * 100 169 | else: 170 | arr = data_l[1]-data_l[0] 171 | dates = dates_l[0] # assume date arrays are the same 172 | else: 173 | dates, arr = dates_l[0], data_l[0] 174 | 175 | # Kludge for testing ( just use 1st few times ) 176 | dates, arr = dates[:10], arr[:10, ...] 177 | 178 | # The corner point is a NaN for 0.25 output. <= improve this. 179 | # Set this ( not visible on the plot window ) to mean to allow for save 180 | if fill_invalid_with_mean: 181 | np.ma.set_fill_value(arr, arr.mean()) 182 | arr = arr.filled() 183 | 184 | return arr, dates 185 | 186 | 187 | def get_run_info(spec='O3', res='0.25x0.3125', region='EU', fname='', 188 | scale=1, pcent=False, ClearFlo_unit=False): 189 | """ get descriptive variables for run period ( e.g. res ) """ 190 | from AC.core import get_latlonalt4res 191 | from AC.variables import tra_unit, latex_spec_name 192 | 193 | # Set variables (e.g. res) or automation of variable setting here 194 | # res = get_run_descriptors() 195 | # Kludge set res manually 196 | res = '0.5x0.666' 197 | 198 | # Get lat and lon of GC grid for given resolution 199 | lon, lat, NIU = get_latlonalt4res(res=res) 200 | 201 | # Set units based on species name + get scaling 202 | if pcent: 203 | units = '%' 204 | else: 205 | units, scale = tra_unit(spec, ClearFlo_unit=ClearFlo_unit, scale=True) 206 | 207 | # Set filename from run detail 208 | fname += '{}_{}_{}_{}_{}.mp4'.format(region, res, 209 | spec, units, time.strftime("%y_%m_%d_%H_%M")) 210 | 211 | # setup plot title 212 | title = 'Surface {} / {}'.format(latex_spec_name(spec), units) 213 | 214 | return lat, lon, units, fname, res, title, scale 215 | 216 | 217 | def setup_figure_and_axis(): 218 | """ Initialise figure and axis """ 219 | 220 | # Setup plot 221 | fig, ax = plt.subplots(figsize=[16, 9]) 222 | ax.set_aspect('equal') 223 | ax.autoscale_view(False) 224 | 225 | return fig, ax 226 | 227 | 228 | def setup_figure_ascetics(dates, f_size=10, title=None, cmap=None, 229 | units=None, cnorm=None, fig=None, format='%.0f', 230 | extend='neither', arr=None, fixcb=None, 231 | fixcb_buffered=None, debug=False): 232 | """ Add colorbar, logos and titles to figure """ 233 | 234 | # if on Univeristy of York/NCAS servers, add logos 235 | import platform 236 | # earth0 = 'Linux-3.0.101-0.47.52-default-x86_64-with-SuSE-11-x86_64' 237 | earth0 = 'Linux-3.0.101-0.47.71-default-x86_64-with-SuSE-11-x86_64' 238 | if platform.platform() == earth0: 239 | from plotting_special import add_logos_NCAS_york_bottom 240 | 241 | # add title and logos for NCAS/NERC 242 | fig = add_logos_NCAS_york_bottom(fig) 243 | fig.suptitle(title, fontsize=f_size*2, x=.55, y=.95) 244 | 245 | # Add colorbar 246 | from plotting_special import mk_cb 247 | mk_cb(fig, units=units, left=0.925, bottom=0.2, width=0.015, height=0.6, 248 | cmap=cmap, vmin=fixcb_buffered[0], vmax=fixcb_buffered[1], 249 | f_size=f_size, format=format, norm=cnorm, extend=extend,) 250 | 251 | # Add text showing model time on figure 252 | plt.figtext(x=0.5, y=0.875, s=dates[0], fontsize=f_size) 253 | 254 | 255 | def setup_plot2animate(arr, fig=None, ax=None, lat=None, lon=None, 256 | units=None, contour=False, res=None, f_size=20, min_change=0.5, 257 | period=1, adjust_window=3, alpha=0.85, npoints=50, 258 | everyother=1, interval=1, resolution='f', drawcountries=True, 259 | cnorm=None, lvls=None, fixcb=None, positive=None, 260 | nticks=10, cb_sigfig=2, debug=False): 261 | 262 | from plotting_special import get_basemap, get_colormap, \ 263 | get_human_readable_gradations 264 | 265 | # --- settings 266 | plt.ioff() # turn off interactive plotting 267 | global specplt 268 | 269 | print((3, 'detail on output: ', [[np.ma.min(i), np.ma.max(i), 270 | np.ma.mean(i), type(i), i.shape] for i in [arr]])) 271 | 272 | # Setup basemap 273 | m = get_basemap(lat=lat, lon=lon, resolution=resolution, res=res, 274 | everyother=everyother, interval=interval, f_size=f_size, 275 | drawcountries=drawcountries) 276 | 277 | # adjust window size 278 | m.ax = ax 279 | plt.xlim(lon[0+adjust_window], lon[-1-adjust_window]) 280 | plt.ylim(lat[0+adjust_window], lat[-1-adjust_window]) 281 | 282 | # --- Setup grid and Pcolor/contour 283 | x, y = np.meshgrid(lon, lat) 284 | 285 | # Make sure array is a masked numpy array 286 | arr = np.ma.array(arr) 287 | 288 | # Get min and max value in whole array 289 | fixcb = [arr.min(), arr.max()] 290 | 291 | # Set readable levels for cb, then use these to dictate cmap 292 | lvls = get_human_readable_gradations(vmax=fixcb[1], 293 | vmin=fixcb[0], nticks=nticks, 294 | cb_sigfig=cb_sigfig) 295 | 296 | # Setup colour or pcolor plot 297 | if contour: 298 | # updated to use readable levels, this needs testing 299 | # lvls = np.linspace(minc, maxc+min_change, npoints ) 300 | cnorm = mpl.colors.Normalize(vmin=fixcb[0], vmax=fixcb[1]) 301 | specplt = m.contourf(x, y, arr[0, :, :], lvls, cmap=cmap, 302 | latlon=True, norm=cnorm, antialiased=True) 303 | 304 | fname = 'contour_'+fname 305 | 306 | else: 307 | # Setup Colormap 308 | cmap, fixcb_buffered = get_colormap(np.array(fixcb), 309 | nticks=nticks, fixcb=fixcb, buffer_cmap_upper=True) 310 | 311 | specplt = m.pcolor(lon, lat, arr[0, :, :], cmap=cmap) 312 | # , antialiased=True ) 313 | 314 | if debug: 315 | print([len(i) for i in (lat, lon, x, y)]) 316 | print((arr.shape, type(arr))) 317 | print((arr.shape, type(arr))) 318 | 319 | return cmap, specplt, lvls, cnorm, m, fixcb, fixcb_buffered 320 | 321 | 322 | def animate_array(arr, dates, specplt, spec='O3', min_change=0.5, 323 | period=1, adjust_window=3, alpha=0.85, npoints=50, wd='./', 324 | lvls=None, cnorm=None, cmap=None, contour=False, 325 | fig=None, m=None, lon=None, lat=None, fname=None, debug=False): 326 | """ Animates array with specplt as first frame""" 327 | 328 | # clean memory 329 | gc.collect() 330 | 331 | # Function to loop frame and save to animation 332 | def plotgc(i): 333 | global specplt 334 | print((i, 'for {} @ model time of {} @ real time of {}'.format(spec, 335 | dates[i], time.strftime('%Y %m %d %H:%M:%S')))) 336 | if contour: 337 | # Remove previous plot data, but retain basemap.... 338 | for c in specplt.collections: 339 | c.remove() 340 | # Fill basemap 341 | specplt = m.contourf(x, y, arr[i, :, :], lvls, cmap=cmap, 342 | latlon=True, norm=cnorm, alpha=alpha, antialiased=True) 343 | else: 344 | # Remove previous plot data, but retain basemap.... 345 | specplt.remove() 346 | # Fill basemap/ plot 347 | specplt = m.pcolor(lon, lat, arr[i, :, :], cmap=cmap) 348 | 349 | # Clean memory 350 | gc.collect() 351 | 352 | # Update date 353 | fig.texts[-1].set_text(dates[i]) 354 | return specplt 355 | 356 | # Animate 357 | ani = animation.FuncAnimation(fig, plotgc, 358 | frames=np.arange(len(dates)-2), blit=True) 359 | 360 | # Save 361 | ani.save(wd + fname, 'ffmpeg', fps=24, extra_args=['-vcodec', 362 | 'libx264', '-pix_fmt', 'yuv420p']) 363 | print(('Video saved & Closed as/at: ', fname)) 364 | 365 | # Clean memory 366 | # gc.collect() 367 | # plt.clf() 368 | # del specplt 369 | 370 | 371 | if __name__ == "__main__": 372 | for spec in specs: 373 | main(spec=spec, pcent=pcent, ClearFlo_unit=ClearFlo_unit, 374 | debug=debug) 375 | -------------------------------------------------------------------------------- /scripts/bpch2netCDF.py: -------------------------------------------------------------------------------- 1 | ../AC_tools/bpch2netCDF.py -------------------------------------------------------------------------------- /scripts/check_met.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | """ 3 | Check GEOS-Chem met fields for zero in arrays. 4 | 5 | NOTES: 6 | - credit: mje, adapted by tms 7 | 8 | """ 9 | # Import modules 10 | from netCDF4 import Dataset 11 | import numpy as np 12 | import glob as glob 13 | import matplotlib.pyplot as plt 14 | 15 | # ---- Setup 16 | # 17 | # For what years and months? ("*" for all ) 18 | years = '2013' 19 | months = '11' 20 | # on what grid? 21 | # grid = 'GEOS_0.25x0.3125_ch' # GEOS_0.25x0.3125_eu 22 | grid = 'GEOS_2x2.5' 23 | # where is the data? 24 | #data_root = '/work/data/GEOS' 25 | data_root = '/shared/earthfs/GEOS' 26 | # meterology 27 | met = 'GEOS_FP' 28 | # add extension? 29 | ext = '*' # '*.I3*' # '*.nc*' # I3 files or all? 30 | # directory to check? 31 | dir = '/'.join((data_root, grid, met, years, months, ext)) 32 | # get files 33 | files = glob.glob(dir) 34 | files.sort() 35 | # print detail to screen? 36 | prt_detail = False 37 | 38 | # --- loop files and test for zero values 39 | print(files) 40 | counter = 0 41 | for file in files: 42 | # Debug? - print file accessed... 43 | # print file 44 | # Open NetCDF as dataset ("d" ) 45 | with Dataset(file) as d: 46 | # loop keys 47 | for key in list(d.variables.keys()): 48 | # check on a per field basis 49 | field = d[key][:] 50 | # If multi-dimensional 51 | if (len(field.shape) > 1): 52 | # print zero fields 53 | if (field.min() == 0.): 54 | # print to screen 55 | print((file, key, field.shape)) 56 | # Print detail? 57 | if prt_detail: 58 | for j in np.arange(0, 8): 59 | for k in np.arange(0, 72): 60 | if (field[j, k, :, :].min() == 0.): 61 | print((key, j, k, file)) 62 | -------------------------------------------------------------------------------- /scripts/find_replace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #~/.scripts/git-sub 3 | #Author: Khaja Minhajuddin 4 | #script which does a global search and replace in the git repository 5 | #it takes two arguments 6 | #e.g. git sub OLD NEW 7 | 8 | old=$1 9 | new=$2 10 | 11 | for file in $(git grep $old | cut -d':' -f 1 | uniq) 12 | do 13 | echo "replacing '$old' with '$new' in '$file'" 14 | sed -i -e "s/$old/$new/g" $file 15 | done -------------------------------------------------------------------------------- /scripts/get_data_files.py: -------------------------------------------------------------------------------- 1 | """ 2 | Download the example/reference data files for AC_tools from an external source 3 | """ 4 | 5 | import os 6 | import logging 7 | # temporarily restore urllib2 (as default urllib not working) 8 | #import urllib 9 | #import urllib2 10 | try: 11 | from urllib.request import urlopen 12 | from urllib.error import HTTPError 13 | # except ModuleNotFoundError: 14 | except ImportError: 15 | from urllib.request import urlopen 16 | from urllib.error import HTTPError 17 | 18 | # Set up logging only if running as a script 19 | if __name__ == '__main__': 20 | FORMAT = "%(levelname)8s - %(message)s @---> %(filename)s:%(lineno)s %(funcName)s()" 21 | logging.basicConfig(filename='AC_tools.log', filemode='w', level=logging.DEBUG, 22 | format=FORMAT) 23 | logging.getLogger().setLevel(logging.DEBUG) 24 | 25 | # Location of files on York visible apache server (atmosviz1) 26 | data_dir = "../data" 27 | data_url = "https://webfiles.york.ac.uk/WACL/SHERWEN_TOMAS/AC_tools/" 28 | 29 | # List of files to fetch 30 | file_list = [ 31 | "HEMCO_Diagnostics.nc", 32 | "ctm.nc", 33 | "diaginfo.dat", 34 | "test.bpch", 35 | "test.log", 36 | "tracerinfo.dat", 37 | "LM/LANDMAP_LWI_ctm_4x5/ctm.nc", 38 | "LM/LANDMAP_LWI_ctm_025x03125/ctm.nc", 39 | "LM/LANDMAP_LWI_ctm_025x03125_CH/ctm.nc", 40 | "LM/LANDMAP_LWI_ctm_025x03125_WA/ctm.nc", 41 | "LM/LANDMAP_LWI_ctm_05x0666/ctm.nc", 42 | "LM/LANDMAP_LWI_ctm_2x25/ctm.nc", 43 | "LM/LANDMAP_LWI_ctm_0125x0125/ctm.nc", 44 | "GEOS_ChemSpecies_fullchem_v0.1.0.csv" 45 | ] 46 | 47 | 48 | def main(): 49 | """ 50 | Driver to download data/reference files for AC_tools 51 | """ 52 | if not os.path.exists(data_dir): 53 | os.makedirs(data_dir) 54 | 55 | for _file in file_list: 56 | new_filename = os.path.join(data_dir, _file) 57 | file_url = data_url + _file 58 | 59 | # If file does not exist donwload the file 60 | if not os.path.isfile(new_filename): 61 | logging.debug(new_filename + " not found. Downloading now.") 62 | download_file(new_filename, file_url) 63 | 64 | else: 65 | # If file exists make sure it is the correct size 66 | url_size = int(urlopen(file_url).info()['Content-Length']) 67 | file_size = int(os.stat(new_filename).st_size) 68 | if not url_size == file_size: 69 | logging.warning("{fn} appears to be the wrong size\ 70 | {size1} vs {size2}".format( 71 | fn=new_filename, size1=url_size, size2=file_size)) 72 | logging.warning("Redownloading now") 73 | download_file(new_filename, file_url) 74 | 75 | 76 | def download_file(new_filename, file_url): 77 | """ 78 | Download a file from a given URL 79 | """ 80 | if not os.path.exists(os.path.dirname(new_filename)): 81 | try: 82 | os.makedirs(os.path.dirname(new_filename)) 83 | except: 84 | logging.error( 85 | "Could not create folder for {file}".format(file=new_filename)) 86 | 87 | try: 88 | new_file = open(new_filename, 'wb') 89 | logging.debug("downloading from {url}".format(url=file_url)) 90 | prt_str = "Downloading file ({}), which may take some time." 91 | print(prt_str.format(new_filename)) 92 | file_data = urlopen(file_url).read() 93 | new_file.write(file_data) 94 | print("Download complete.") 95 | logging.debug(new_filename+" downloaded.") 96 | new_file.close() 97 | except HTTPError as error_code: 98 | if error_code.code == 404: 99 | logging.error("{The following was not found on the server:") 100 | logging.error("{url}".format(url=file_url)) 101 | else: 102 | logging.error("Failed to get {url} with HTTP error {error_code}" 103 | .format(url=file_url, error_code=error_code)) 104 | except: 105 | logging.error("Failed to download {url}".format(url=file_url)) 106 | 107 | 108 | # Run whether as script or as import 109 | main() 110 | -------------------------------------------------------------------------------- /scripts/mk_pf_MUTD_2D_ROW_EU_grid_surface.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # --- Packages 3 | import numpy as np 4 | from time import gmtime, strftime 5 | import time 6 | import glob 7 | import AC_tools as AC 8 | import sys 9 | import pandas as pd 10 | 11 | 12 | def main(filename=None, LAT_var='LAT', LON_var='LON', 13 | PRESS_var='PRESS', loc_var='TYPE', Username='Tomas Sherwen', 14 | slist=None, Extra_spacings=True, 15 | freq='H', start_year=2015, end_year=2017, debug=False): 16 | """ 17 | This programme makes the planeflight*.dat files required to output for 18 | an entire grid (e.g. Europe.) 19 | 20 | Parameters 21 | ------- 22 | time_str (str): format of time columne in input .csv file 23 | 24 | Returns 25 | ------- 26 | 27 | Notes 28 | ----- 29 | - This programme can be used to produce files to output data for ship 30 | and aricraft campaigns 31 | """ 32 | # --- Local settings 33 | res = '0.25x0.3125' # res='0.5x0.666' 34 | tpwd = AC.get_dir('tpwd') 35 | #start_year, end_year = 2006,2007 36 | # location of files? (from 1st argument of command line) 37 | if isinstance(filename, type(None)): 38 | filename = sys.argv[1] 39 | else: 40 | filename = 'EU_GRID_{}.dat'.format(res) 41 | 42 | # set species list to output if not provided 43 | if isinstance(slist, type(None)): 44 | # Which (halogen) code version is being used? 45 | # ver = '1.6' # Iodine simulation in v9-2 46 | # ver = '2.0' # Iodine + Bromine simulation 47 | ver = '3.0' # Cl-Br-I simulation 48 | # Get Variables to output (e.g. tracers, species, met values ) 49 | slist = AC.pf_var('slist', ver=ver, fill_var_with_zeroes=True) 50 | 51 | # --- Read in site Detail 52 | location = AC.readin_gaw_sites(filename, all=True) 53 | numbers, locs, lats, lons, pres = [location[:, i] for i in range(5)] 54 | lats, lons, pres = [np.float64(i) for i in (lats, lons, pres)] 55 | locs = np.array(locs) 56 | print((lats[0:4])) 57 | 58 | # --- Set Variables 59 | # slist = pf_var( 'slist_v9_2_NREA_red_NOy', ver=ver )#'slist_v9_2_NREA_red' ) 60 | #slist = pf_var( 'slist_ClearFlo', ver=ver ) 61 | slist = AC.pf_var('slist_PEN_WEY_LEI', ver=ver) 62 | # kludge additional of ethanol 63 | slist = slist + ['TRA_86'] 64 | nvar = len(slist) 65 | # dates 66 | dates = pd.date_range(datetime.datetime(start_year, 1, 1), 67 | datetime.datetime(end_year, 12, 31, 23), freq='H') 68 | # dictionary of variables 69 | d = { 70 | 'datetime': dates, 'LAT': [LAT[n]]*nvar, 'LON': [LON[n]]*nvar, 71 | 'TYPE': [TYPE[n]]*nvar, 'PRESS': [PRESS[n]]*nvar} 72 | df = pd.DataFrame(d) 73 | 74 | # --- Print out files 75 | AC.prt_PlaneFlight_files(df=df, slist=slist, Extra_spacings=Extra_spacings) 76 | 77 | 78 | if __name__ == "__main__": 79 | main() 80 | -------------------------------------------------------------------------------- /scripts/mk_planeflight_input_file_for_campaign_locs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # --- Packages 3 | import numpy as np 4 | from time import gmtime, strftime 5 | import time 6 | import glob 7 | import AC_tools as AC 8 | import sys 9 | import pandas as pd 10 | 11 | 12 | def main(filename=None, slist=None, time_str='%H:%M', date_str='%d%m%Y', 13 | LAT_var='LAT', LON_var='LON', PRESS_var='PRESS', loc_var='TYPE', 14 | Username='Tomas Sherwen', GC_ver='v12.0.0', GC_ver_above_v12=True): 15 | """ 16 | This programme makes the planeflight*.dat files required to output for 17 | specific locations and times in the model (e.g. along a ship cruise or 18 | airborne campaign track). 19 | 20 | Parameters 21 | ------- 22 | time_str (str): format of time columne in input .csv file 23 | 24 | Returns 25 | ------- 26 | 27 | Notes 28 | ----- 29 | - This programme can be used to produce files to output data for ship 30 | and aricraft campaigns 31 | """ 32 | # --- Local settings 33 | # location of files? (from 1st argument of command line) 34 | if isinstance(filename, type(None)): 35 | filename = sys.argv[1] 36 | # set species list to output if not provided 37 | if isinstance(slist, type(None)): 38 | # Which (halogen) code version is being used? 39 | # ver = '1.6' # Iodine simulation in v9-2 40 | # ver = '2.0' # Iodine + Bromine simulation 41 | ver = '3.0' # Cl-Br-I simulation 42 | # Get Variables to output (e.g. tracers, species, met values ) 43 | slist = AC.pf_var('slist', ver=ver, fill_var_with_zeroes=True) 44 | 45 | # --- Read in a .csv file of aircraft/cruise data etc... 46 | # (must contrain DATE, TIME, LAT, LON, PRESS, (hPa), TYPE (code ) ) 47 | # (the tag for output (referred to as TYPE in GC) can be upto 4 chars ) 48 | LOCS_df = pd.read_csv(filename) 49 | # or multiple input files 50 | # mk_Campaign_csv_from_multiple_csv_files() 51 | # vars_ = ['LAT', 'LON', 'PRESS', 'TYPE' ] 52 | # LAT, LON, PRESS, TYPE = [ LOCS_df[i].values for i in vars_ ] 53 | # nvar = len( slist ) 54 | df = LOCS_df 55 | 56 | # --- Print out files 57 | if (GC_ver == 'v12.0.0') or GC_ver_above_v12: 58 | AC.prt_PlaneFlight_files_v12_plus(df=df, slist=slist, Extra_spacings=Extra_spacings, 59 | LON_var=LON_var, LAT_var=LAT_var, PRESS_var=PRESS_var, loc_var=loc_var, 60 | Username=Username) 61 | else: 62 | AC.prt_PlaneFlight_files(df=df, slist=slist, Extra_spacings=Extra_spacings, 63 | LON_var=LON_var, LAT_var=LAT_var, PRESS_var=PRESS_var, loc_var=loc_var, 64 | Username=Username) 65 | 66 | 67 | if __name__ == "__main__": 68 | main() 69 | -------------------------------------------------------------------------------- /scripts/mk_planeflight_input_file_for_point_locs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # --- Packages 3 | import numpy as np 4 | from time import gmtime, strftime 5 | import datetime as datetime 6 | import time 7 | import glob 8 | import pandas as pd 9 | import sys 10 | import AC_tools as AC 11 | 12 | 13 | def main(filename=None, LAT_var='LAT', LON_var='LON', 14 | PRESS_var='PRESS', loc_var='TYPE', Username='Tomas Sherwen', 15 | slist=None, Extra_spacings=False, freq='H', start_year=2014, 16 | end_year=2016, GC_ver_above_v12=True, GC_ver='v12.0.0', 17 | debug=False): 18 | """ 19 | Mk planeflight input files for GEOS-Chem ND40 diagnostic from a csv file 20 | contains sites of interest (TYPE) and their descriptors (LON, LAT, PRESS) 21 | 22 | Parameters 23 | ------- 24 | freq (str): output frequency of diagnostic ('H' or 'D') 25 | start_year,end_year (int): start and end year to output. 26 | wd (str): the working (code) directory to search for files in 27 | loc_var (str): name for (e.g. plane name), could be more than one. 28 | LAT_var, LON_var, PRESS_var (str): name for pressure(HPa),lat and lon in df 29 | Username (str): name of the programme's user 30 | Extra_spacings (bool): add extra spacing? (needed for large amounts of 31 | output, like nested grids) 32 | Notes 33 | ----- 34 | - command line call with the following: 35 | "python mk_planeflight_input_file_for_point_locs.py " 36 | ( default file provided (Planeflight_point_sites.csv) to edit) 37 | - This programme can be used to produce files to output data for 38 | observational sites (e.g. goverment air quality sites) 39 | - This programme makes the planeflight*.dat files required to output 40 | for specific locations and times in the model. 41 | - The deafult settting is for hourly output. 42 | """ 43 | print(('filename:{}'.format(filename))) 44 | # --- Local settings 45 | # file of locations? (from 1st argument of command line) 46 | if isinstance(filename, type(None)): 47 | if __name__ == "__main__": 48 | filename = sys.argv[1] 49 | else: 50 | filename = 'Planeflight_point_sites.csv' 51 | 52 | # set species list to output if not provided 53 | if isinstance(slist, type(None)): 54 | # Which (halogen) code version is being used? 55 | # ver = '1.6' # Iodine simulation in v9-2 56 | # ver = '2.0' # Iodine + Bromine simulation 57 | ver = '3.0' # Cl-Br-I simulation 58 | # Get Variables to output (e.g. tracers, species, met values ) 59 | slist = AC.pf_var('slist', ver=ver, fill_var_with_zeroes=True) 60 | 61 | # --- Read in site Detail 62 | # ( must contain LAT, LON, PRESS, TYPE (name of sites) ) 63 | 64 | LOCS_df = pd.read_csv(filename) 65 | vars_ = ['LAT', 'LON', 'PRESS', 'TYPE'] 66 | LAT, LON, PRESS, TYPE = [LOCS_df[i].values for i in vars_] 67 | 68 | # --- Make DataFrame of locations 69 | dates = pd.date_range(datetime.datetime(start_year, 1, 1), 70 | datetime.datetime(end_year, 12, 31, 23), freq='H') 71 | # for each location make a DataFrame, then conbime 72 | dfs = [] 73 | for n, type_ in enumerate(TYPE): 74 | # dictionary of data 75 | nvar = len(dates) 76 | d = { 77 | 'datetime': dates, 'LAT': [LAT[n]]*nvar, 'LON': [LON[n]]*nvar, 78 | 'TYPE': [TYPE[n]]*nvar, 'PRESS': [PRESS[n]]*nvar} 79 | dfs += [pd.DataFrame(d, index=np.arange(nvar)+(n*1E6))] 80 | # combine all TYPE (sites) and sort by date 81 | df = pd.concat(dfs).sort_values('datetime', ascending=True) 82 | 83 | # --- Print out files 84 | if (GC_ver == 'v12.0.0') or GC_ver_above_v12: 85 | AC.prt_PlaneFlight_files_v12_plus( 86 | df=df, slist=slist, Extra_spacings=Extra_spacings) 87 | else: 88 | AC.prt_PlaneFlight_files( 89 | df=df, slist=slist, Extra_spacings=Extra_spacings) 90 | 91 | 92 | if __name__ == "__main__": 93 | main() 94 | -------------------------------------------------------------------------------- /scripts/netCDF_splitter2var_2D.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Split off 2D variable from file with other variables 5 | 6 | Notes 7 | ---- 8 | - based on software carpentary example. 9 | http://damienirving.github.io/capstone-oceanography/03-data-provenance.html 10 | """ 11 | # Modules to import 12 | from netCDF4 import Dataset 13 | import numpy as np 14 | import pylab as pl 15 | import calendar 16 | # add extra's for copied function... 17 | import os 18 | import sys 19 | import argparse 20 | import datetime 21 | 22 | # --- verbose and debug settings for script main call 23 | VERBOSE = False 24 | DEBUG = False 25 | 26 | 27 | def main(filename=None, VarName='OLSON', verbose=False, debug=False): 28 | """ 29 | Driver to split off variables 30 | """ 31 | # Get the file name and location 32 | wd, fn = get_file_loc_and_name() 33 | # name output file if name not given 34 | if isinstance(filename, type(None)): 35 | filename = wd.split('/')[-2] 36 | if debug: 37 | print((wd, fn, filename)) 38 | inFile = wd+'/'+fn 39 | 40 | # Set output name 41 | outfile_name = inFile+'.out' 42 | 43 | # Read input data 44 | VarData, input_DATA = read_data(inFile, VarName=VarName) 45 | 46 | # Set values? 47 | # print type(VarData) 48 | # print [ (i.shape, i.mean(), i.min(), i.max()) for i in VarData] 49 | # VarData[VarData>1] = 1 50 | # print [ (i.shape, i.mean(), i.min(), i.max()) for i in VarData] 51 | 52 | # --- Write the output file 53 | outfile = Dataset(outfile_name, 'w', format='NETCDF4') 54 | set_global_atts(input_DATA, outfile) 55 | copy_dimensions(input_DATA, outfile) 56 | copy_variables(input_DATA, outfile, VarName=VarName) 57 | # overwite data 58 | outfile[VarName][:] = VarData 59 | # Close file 60 | outfile.close() 61 | 62 | 63 | def get_file_loc_and_name(): 64 | """ Get file location and name """ 65 | 66 | # Use command line grab function 67 | import sys 68 | 69 | # Get arguments from command line 70 | wd = sys.argv[1] 71 | fn = sys.argv[2] 72 | 73 | return wd, fn 74 | 75 | 76 | def copy_dimensions(infile, outfile): 77 | """ 78 | Copy the dimensions of the infile to the outfile 79 | """ 80 | for dimName, dimData in iter(list(infile.dimensions.items())): 81 | outfile.createDimension(dimName, len(dimData)) 82 | 83 | 84 | def copy_variables(infile, outfile, VarName='OLSON'): 85 | """ 86 | Create variables corresponding to the file dimensions 87 | by copying from infile 88 | """ 89 | # Get vars 90 | var_list = ['lon', 'lat', 'time'] 91 | # Also consider LANDMAP value 92 | var_list += [VarName] 93 | # Now loop 94 | for var_name in var_list: 95 | varin = infile.variables[var_name] 96 | outVar = outfile.createVariable(var_name, varin.datatype, 97 | varin.dimensions, 98 | ) 99 | outVar[:] = varin[:] 100 | 101 | var_atts = {} 102 | for att in varin.ncattrs(): 103 | if not att == '_FillValue': 104 | var_atts[att] = eval('varin.'+att) 105 | outVar.setncatts(var_atts) 106 | 107 | 108 | def read_data(ifile, VarName='OLSON'): 109 | """ 110 | Read data from ifile corresponding to the VarName 111 | """ 112 | input_DATA = Dataset(ifile) 113 | VarData = input_DATA.variables[VarName][:] 114 | 115 | return VarData, input_DATA 116 | 117 | 118 | def set_global_atts(infile, outfile): 119 | """Set the global attributes for outfile. 120 | 121 | Note that the global attributes are simply copied from infile. 122 | """ 123 | 124 | global_atts = {} 125 | for att in infile.ncattrs(): 126 | global_atts[att] = eval('infile.'+att) 127 | 128 | # set attributes 129 | outfile.setncatts(global_atts) 130 | 131 | 132 | if __name__ == "__main__": 133 | main(verbose=VERBOSE, debug=DEBUG) 134 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from setuptools import setup, find_packages 3 | import os 4 | 5 | VERSION = '0.1.1' 6 | DISTNAME = 'AC_tools' 7 | DESCRIPTION = "Atmospheric Chemistry (AC) tools" 8 | AUTHOR = 'Tomas Sherwen, Ben Newsome' 9 | AUTHOR_EMAIL = 'tomas.sherwen@york.ac.uk' 10 | URL = 'https://github.com/tsherwen/AC_tools' 11 | LICENSE = 'MIT' 12 | PYTHON_REQUIRES = '>=3.5' 13 | 14 | on_rtd = os.environ.get('READTHEDOCS') == 'True' 15 | if on_rtd: 16 | INSTALL_REQUIRES = [] 17 | else: 18 | INSTALL_REQUIRES = [ 19 | 'cartopy', 20 | 'matplotlib', 21 | 'netcdf4', 22 | 'numpy', 23 | 'pandas', 24 | 'pytest', 25 | 'scipy', 26 | 'xarray', 27 | 'pyyaml', 28 | ] 29 | 30 | CLASSIFIERS = [ 31 | 'Development Status :: 4 - Beta', 32 | 'License :: OSI Approved :: MIT License', 33 | 'Operating System :: OS Independent', 34 | 'Intended Audience :: Science/Research', 35 | 'Programming Language :: Python', 36 | 'Programming Language :: Python :: 3', 37 | 'Programming Language :: Python :: 3.5', 38 | 'Programming Language :: Python :: 3.6', 39 | 'Topic :: Scientific/Engineering', 40 | ] 41 | 42 | 43 | def readme(): 44 | with open('README.rst') as f: 45 | return f.read() 46 | 47 | 48 | setup(name=DISTNAME, 49 | version=VERSION, 50 | license=LICENSE, 51 | author=AUTHOR, 52 | author_email=AUTHOR_EMAIL, 53 | classifiers=CLASSIFIERS, 54 | description=DESCRIPTION, 55 | long_description=readme(), 56 | python_requires=PYTHON_REQUIRES, 57 | install_requires=INSTALL_REQUIRES, 58 | url=URL, 59 | packages=find_packages()) 60 | --------------------------------------------------------------------------------