├── .gitignore ├── README.md ├── ancillary ├── 2016 │ ├── smk_merge_dates_201501.txt │ ├── smk_merge_dates_201502.txt │ ├── smk_merge_dates_201503.txt │ ├── smk_merge_dates_201504.txt │ ├── smk_merge_dates_201504.txt.easter │ ├── smk_merge_dates_201504.txt.no_easter │ ├── smk_merge_dates_201505.txt │ ├── smk_merge_dates_201506.txt │ ├── smk_merge_dates_201507.txt │ ├── smk_merge_dates_201508.txt │ ├── smk_merge_dates_201509.txt │ ├── smk_merge_dates_201510.txt │ ├── smk_merge_dates_201511.txt │ ├── smk_merge_dates_201512.txt │ ├── smk_merge_dates_201512.txt.old │ ├── smk_merge_dates_201601.txt │ ├── smk_merge_dates_201602.txt │ ├── smk_merge_dates_201603.txt │ ├── smk_merge_dates_201604.txt │ ├── smk_merge_dates_201605.txt │ ├── smk_merge_dates_201606.txt │ ├── smk_merge_dates_201607.txt │ ├── smk_merge_dates_201608.txt │ ├── smk_merge_dates_201609.txt │ ├── smk_merge_dates_201610.txt │ ├── smk_merge_dates_201611.txt │ ├── smk_merge_dates_201612.txt │ └── smk_merge_dates_201612.txt.no26decholiday ├── USA_LAND.txt ├── create_merge_dates_ann.py └── griddesc.txt ├── bin └── emisqa ├── docs └── usage.md ├── emisqa ├── __init__.py ├── camx │ ├── __init__.py │ └── read_uam.py ├── chem_mechs.py ├── cmaq │ ├── __init__.py │ └── read_ncf.py ├── csv │ ├── __init__.py │ └── read_csv.py ├── data_file.py ├── dataout │ ├── __init__.py │ └── data_out.py ├── dateloop │ ├── __init__.py │ └── inday.py ├── default_paths.py ├── formulas.py ├── helpers.py ├── inline │ ├── __init__.py │ └── stack_group.py ├── run_parse.py ├── run_select.py ├── runtypes │ ├── __init__.py │ ├── add_files.py │ ├── avgdv.py │ ├── dump_dv.py │ ├── hour_dump.py │ ├── hourly_domain.py │ ├── mm_domain.py │ ├── pe.py │ ├── raw_diff.py │ ├── raw_dump.py │ ├── single_domain.py │ ├── sumdv.py │ └── sumhour.py └── species_array.py └── setup.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | .pypirc 132 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | emisqa 2 | Python-based air emissions QA and reporting tool for CMAQ and CAMx model ready emissions inputs 3 | -------------------------------------------------------------------------------- /ancillary/2016/smk_merge_dates_201501.txt: -------------------------------------------------------------------------------- 1 | Date, aveday_N, aveday_Y, mwdss_N, mwdss_Y, week_N, week_Y, all 2 | 20150101, 20160105, 20160101, 20160105, 20160101, 20160107, 20160101, 20150101 3 | 20150102, 20160105, 20160102, 20160105, 20160102, 20160108, 20160102, 20150102 4 | 20150103, 20160105, 20160105, 20160109, 20160109, 20160109, 20160109, 20150103 5 | 20150104, 20160105, 20160105, 20160110, 20160110, 20160110, 20160110, 20150104 6 | 20150105, 20160105, 20160105, 20160104, 20160104, 20160104, 20160104, 20150105 7 | 20150106, 20160105, 20160105, 20160105, 20160105, 20160105, 20160105, 20150106 8 | 20150107, 20160105, 20160105, 20160105, 20160105, 20160106, 20160106, 20150107 9 | 20150108, 20160105, 20160105, 20160105, 20160105, 20160107, 20160107, 20150108 10 | 20150109, 20160105, 20160105, 20160105, 20160105, 20160108, 20160108, 20150109 11 | 20150110, 20160105, 20160105, 20160109, 20160109, 20160109, 20160109, 20150110 12 | 20150111, 20160105, 20160105, 20160110, 20160110, 20160110, 20160110, 20150111 13 | 20150112, 20160105, 20160105, 20160104, 20160104, 20160104, 20160104, 20150112 14 | 20150113, 20160105, 20160105, 20160105, 20160105, 20160105, 20160105, 20150113 15 | 20150114, 20160105, 20160105, 20160105, 20160105, 20160106, 20160106, 20150114 16 | 20150115, 20160105, 20160105, 20160105, 20160105, 20160107, 20160107, 20150115 17 | 20150116, 20160105, 20160105, 20160105, 20160105, 20160108, 20160108, 20150116 18 | 20150117, 20160105, 20160105, 20160109, 20160109, 20160109, 20160109, 20150117 19 | 20150118, 20160105, 20160105, 20160110, 20160110, 20160110, 20160110, 20150118 20 | 20150119, 20160105, 20160105, 20160104, 20160104, 20160104, 20160104, 20150119 21 | 20150120, 20160105, 20160105, 20160105, 20160105, 20160105, 20160105, 20150120 22 | 20150121, 20160105, 20160105, 20160105, 20160105, 20160106, 20160106, 20150121 23 | 20150122, 20160105, 20160105, 20160105, 20160105, 20160107, 20160107, 20150122 24 | 20150123, 20160105, 20160105, 20160105, 20160105, 20160108, 20160108, 20150123 25 | 20150124, 20160105, 20160105, 20160109, 20160109, 20160109, 20160109, 20150124 26 | 20150125, 20160105, 20160105, 20160110, 20160110, 20160110, 20160110, 20150125 27 | 20150126, 20160105, 20160105, 20160104, 20160104, 20160104, 20160104, 20150126 28 | 20150127, 20160105, 20160105, 20160105, 20160105, 20160105, 20160105, 20150127 29 | 20150128, 20160105, 20160105, 20160105, 20160105, 20160106, 20160106, 20150128 30 | 20150129, 20160105, 20160105, 20160105, 20160105, 20160107, 20160107, 20150129 31 | 20150130, 20160105, 20160105, 20160105, 20160105, 20160108, 20160108, 20150130 32 | 20150131, 20160105, 20160105, 20160109, 20160109, 20160109, 20160109, 20150131 33 | -------------------------------------------------------------------------------- /ancillary/2016/smk_merge_dates_201502.txt: -------------------------------------------------------------------------------- 1 | Date, aveday_N, aveday_Y, mwdss_N, mwdss_Y, week_N, week_Y, all 2 | 20150201, 20160202, 20160202, 20160214, 20160214, 20160214, 20160214, 20150201 3 | 20150202, 20160202, 20160202, 20160208, 20160208, 20160208, 20160208, 20150202 4 | 20150203, 20160202, 20160202, 20160209, 20160209, 20160209, 20160209, 20150203 5 | 20150204, 20160202, 20160202, 20160209, 20160209, 20160210, 20160210, 20150204 6 | 20150205, 20160202, 20160202, 20160209, 20160209, 20160211, 20160211, 20150205 7 | 20150206, 20160202, 20160202, 20160209, 20160209, 20160212, 20160212, 20150206 8 | 20150207, 20160202, 20160202, 20160213, 20160213, 20160213, 20160213, 20150207 9 | 20150208, 20160202, 20160202, 20160214, 20160214, 20160214, 20160214, 20150208 10 | 20150209, 20160202, 20160202, 20160208, 20160208, 20160208, 20160208, 20150209 11 | 20150210, 20160202, 20160202, 20160209, 20160209, 20160209, 20160209, 20150210 12 | 20150211, 20160202, 20160202, 20160209, 20160209, 20160210, 20160210, 20150211 13 | 20150212, 20160202, 20160202, 20160209, 20160209, 20160211, 20160211, 20150212 14 | 20150213, 20160202, 20160202, 20160209, 20160209, 20160212, 20160212, 20150213 15 | 20150214, 20160202, 20160202, 20160213, 20160213, 20160213, 20160213, 20150214 16 | 20150215, 20160202, 20160202, 20160214, 20160214, 20160214, 20160214, 20150215 17 | 20150216, 20160202, 20160202, 20160208, 20160208, 20160208, 20160208, 20150216 18 | 20150217, 20160202, 20160202, 20160209, 20160209, 20160209, 20160209, 20150217 19 | 20150218, 20160202, 20160202, 20160209, 20160209, 20160210, 20160210, 20150218 20 | 20150219, 20160202, 20160202, 20160209, 20160209, 20160211, 20160211, 20150219 21 | 20150220, 20160202, 20160202, 20160209, 20160209, 20160212, 20160212, 20150220 22 | 20150221, 20160202, 20160202, 20160213, 20160213, 20160213, 20160213, 20150221 23 | 20150222, 20160202, 20160202, 20160214, 20160214, 20160214, 20160214, 20150222 24 | 20150223, 20160202, 20160202, 20160208, 20160208, 20160208, 20160208, 20150223 25 | 20150224, 20160202, 20160202, 20160209, 20160209, 20160209, 20160209, 20150224 26 | 20150225, 20160202, 20160202, 20160209, 20160209, 20160210, 20160210, 20150225 27 | 20150226, 20160202, 20160202, 20160209, 20160209, 20160211, 20160211, 20150226 28 | 20150227, 20160202, 20160202, 20160209, 20160209, 20160212, 20160212, 20150227 29 | 20150228, 20160202, 20160202, 20160213, 20160213, 20160213, 20160213, 20150228 30 | -------------------------------------------------------------------------------- /ancillary/2016/smk_merge_dates_201503.txt: -------------------------------------------------------------------------------- 1 | Date, aveday_N, aveday_Y, mwdss_N, mwdss_Y, week_N, week_Y, all 2 | 20150301, 20160308, 20160308, 20160313, 20160313, 20160313, 20160313, 20150301 3 | 20150302, 20160308, 20160308, 20160307, 20160307, 20160307, 20160307, 20150302 4 | 20150303, 20160308, 20160308, 20160308, 20160308, 20160308, 20160308, 20150303 5 | 20150304, 20160308, 20160308, 20160308, 20160308, 20160309, 20160309, 20150304 6 | 20150305, 20160308, 20160308, 20160308, 20160308, 20160310, 20160310, 20150305 7 | 20150306, 20160308, 20160308, 20160308, 20160308, 20160311, 20160311, 20150306 8 | 20150307, 20160308, 20160308, 20160312, 20160312, 20160312, 20160312, 20150307 9 | 20150308, 20160308, 20160308, 20160313, 20160313, 20160313, 20160313, 20150308 10 | 20150309, 20160308, 20160308, 20160307, 20160307, 20160307, 20160307, 20150309 11 | 20150310, 20160308, 20160308, 20160308, 20160308, 20160308, 20160308, 20150310 12 | 20150311, 20160308, 20160308, 20160308, 20160308, 20160309, 20160309, 20150311 13 | 20150312, 20160308, 20160308, 20160308, 20160308, 20160310, 20160310, 20150312 14 | 20150313, 20160308, 20160308, 20160308, 20160308, 20160311, 20160311, 20150313 15 | 20150314, 20160308, 20160308, 20160312, 20160312, 20160312, 20160312, 20150314 16 | 20150315, 20160308, 20160308, 20160313, 20160313, 20160313, 20160313, 20150315 17 | 20150316, 20160308, 20160308, 20160307, 20160307, 20160307, 20160307, 20150316 18 | 20150317, 20160308, 20160308, 20160308, 20160308, 20160308, 20160308, 20150317 19 | 20150318, 20160308, 20160308, 20160308, 20160308, 20160309, 20160309, 20150318 20 | 20150319, 20160308, 20160308, 20160308, 20160308, 20160310, 20160310, 20150319 21 | 20150320, 20160308, 20160308, 20160308, 20160308, 20160311, 20160311, 20150320 22 | 20150321, 20160308, 20160308, 20160312, 20160312, 20160312, 20160312, 20150321 23 | 20150322, 20160308, 20160308, 20160313, 20160313, 20160313, 20160313, 20150322 24 | 20150323, 20160308, 20160308, 20160307, 20160307, 20160307, 20160307, 20150323 25 | 20150324, 20160308, 20160308, 20160308, 20160308, 20160308, 20160308, 20150324 26 | 20150325, 20160308, 20160308, 20160308, 20160308, 20160309, 20160309, 20150325 27 | 20150326, 20160308, 20160308, 20160308, 20160308, 20160310, 20160310, 20150326 28 | 20150327, 20160308, 20160325, 20160308, 20160325, 20160311, 20160325, 20150327 29 | 20150328, 20160308, 20160326, 20160312, 20160326, 20160312, 20160326, 20150328 30 | 20150329, 20160308, 20160308, 20160313, 20160313, 20160313, 20160313, 20150329 31 | 20150330, 20160308, 20160308, 20160307, 20160307, 20160307, 20160307, 20150330 32 | 20150331, 20160308, 20160308, 20160308, 20160308, 20160308, 20160308, 20150331 33 | -------------------------------------------------------------------------------- /ancillary/2016/smk_merge_dates_201504.txt: -------------------------------------------------------------------------------- 1 | smk_merge_dates_201504.txt.no_easter -------------------------------------------------------------------------------- /ancillary/2016/smk_merge_dates_201504.txt.easter: -------------------------------------------------------------------------------- 1 | Date, aveday_N, aveday_Y, mwdss_N, mwdss_Y, week_N, week_Y, all 2 | 20150401, 20160405, 20160405, 20160405, 20160405, 20160406, 20160406, 20150401 3 | 20150402, 20160405, 20160405, 20160405, 20160405, 20160407, 20160407, 20150402 4 | 20150403, 20160405, 20160403, 20160405, 20160403, 20160408, 20160403, 20150403 5 | 20150404, 20160405, 20160404, 20160409, 20160404, 20160409, 20160404, 20150404 6 | 20150405, 20160405, 20160405, 20160410, 20160410, 20160410, 20160410, 20150405 7 | 20150406, 20160405, 20160405, 20160404, 20160404, 20160404, 20160404, 20150406 8 | 20150407, 20160405, 20160405, 20160405, 20160405, 20160405, 20160405, 20150407 9 | 20150408, 20160405, 20160405, 20160405, 20160405, 20160406, 20160406, 20150408 10 | 20150409, 20160405, 20160405, 20160405, 20160405, 20160407, 20160407, 20150409 11 | 20150410, 20160405, 20160405, 20160405, 20160405, 20160408, 20160408, 20150410 12 | 20150411, 20160405, 20160405, 20160409, 20160409, 20160409, 20160409, 20150411 13 | 20150412, 20160405, 20160405, 20160410, 20160410, 20160410, 20160410, 20150412 14 | 20150413, 20160405, 20160405, 20160404, 20160404, 20160404, 20160404, 20150413 15 | 20150414, 20160405, 20160405, 20160405, 20160405, 20160405, 20160405, 20150414 16 | 20150415, 20160405, 20160405, 20160405, 20160405, 20160406, 20160406, 20150415 17 | 20150416, 20160405, 20160405, 20160405, 20160405, 20160407, 20160407, 20150416 18 | 20150417, 20160405, 20160405, 20160405, 20160405, 20160408, 20160408, 20150417 19 | 20150418, 20160405, 20160405, 20160409, 20160409, 20160409, 20160409, 20150418 20 | 20150419, 20160405, 20160405, 20160410, 20160410, 20160410, 20160410, 20150419 21 | 20150420, 20160405, 20160405, 20160404, 20160404, 20160404, 20160404, 20150420 22 | 20150421, 20160405, 20160405, 20160405, 20160405, 20160405, 20160405, 20150421 23 | 20150422, 20160405, 20160405, 20160405, 20160405, 20160406, 20160406, 20150422 24 | 20150423, 20160405, 20160405, 20160405, 20160405, 20160407, 20160407, 20150423 25 | 20150424, 20160405, 20160405, 20160405, 20160405, 20160408, 20160408, 20150424 26 | 20150425, 20160405, 20160405, 20160409, 20160409, 20160409, 20160409, 20150425 27 | 20150426, 20160405, 20160405, 20160410, 20160410, 20160410, 20160410, 20150426 28 | 20150427, 20160405, 20160405, 20160404, 20160404, 20160404, 20160404, 20150427 29 | 20150428, 20160405, 20160405, 20160405, 20160405, 20160405, 20160405, 20150428 30 | 20150429, 20160405, 20160405, 20160405, 20160405, 20160406, 20160406, 20150429 31 | 20150430, 20160405, 20160405, 20160405, 20160405, 20160407, 20160407, 20150430 32 | -------------------------------------------------------------------------------- /ancillary/2016/smk_merge_dates_201504.txt.no_easter: -------------------------------------------------------------------------------- 1 | Date, aveday_N, aveday_Y, mwdss_N, mwdss_Y, week_N, week_Y, all 2 | 20150401, 20160405, 20160405, 20160405, 20160405, 20160406, 20160406, 20150401 3 | 20150402, 20160405, 20160405, 20160405, 20160405, 20160407, 20160407, 20150402 4 | 20150403, 20160405, 20160405, 20160405, 20160405, 20160408, 20160408, 20150403 5 | 20150404, 20160405, 20160405, 20160409, 20160409, 20160409, 20160409, 20150404 6 | 20150405, 20160405, 20160405, 20160410, 20160410, 20160410, 20160410, 20150405 7 | 20150406, 20160405, 20160405, 20160404, 20160404, 20160404, 20160404, 20150406 8 | 20150407, 20160405, 20160405, 20160405, 20160405, 20160405, 20160405, 20150407 9 | 20150408, 20160405, 20160405, 20160405, 20160405, 20160406, 20160406, 20150408 10 | 20150409, 20160405, 20160405, 20160405, 20160405, 20160407, 20160407, 20150409 11 | 20150410, 20160405, 20160405, 20160405, 20160405, 20160408, 20160408, 20150410 12 | 20150411, 20160405, 20160405, 20160409, 20160409, 20160409, 20160409, 20150411 13 | 20150412, 20160405, 20160405, 20160410, 20160410, 20160410, 20160410, 20150412 14 | 20150413, 20160405, 20160405, 20160404, 20160404, 20160404, 20160404, 20150413 15 | 20150414, 20160405, 20160405, 20160405, 20160405, 20160405, 20160405, 20150414 16 | 20150415, 20160405, 20160405, 20160405, 20160405, 20160406, 20160406, 20150415 17 | 20150416, 20160405, 20160405, 20160405, 20160405, 20160407, 20160407, 20150416 18 | 20150417, 20160405, 20160405, 20160405, 20160405, 20160408, 20160408, 20150417 19 | 20150418, 20160405, 20160405, 20160409, 20160409, 20160409, 20160409, 20150418 20 | 20150419, 20160405, 20160405, 20160410, 20160410, 20160410, 20160410, 20150419 21 | 20150420, 20160405, 20160405, 20160404, 20160404, 20160404, 20160404, 20150420 22 | 20150421, 20160405, 20160405, 20160405, 20160405, 20160405, 20160405, 20150421 23 | 20150422, 20160405, 20160405, 20160405, 20160405, 20160406, 20160406, 20150422 24 | 20150423, 20160405, 20160405, 20160405, 20160405, 20160407, 20160407, 20150423 25 | 20150424, 20160405, 20160405, 20160405, 20160405, 20160408, 20160408, 20150424 26 | 20150425, 20160405, 20160405, 20160409, 20160409, 20160409, 20160409, 20150425 27 | 20150426, 20160405, 20160405, 20160410, 20160410, 20160410, 20160410, 20150426 28 | 20150427, 20160405, 20160405, 20160404, 20160404, 20160404, 20160404, 20150427 29 | 20150428, 20160405, 20160405, 20160405, 20160405, 20160405, 20160405, 20150428 30 | 20150429, 20160405, 20160405, 20160405, 20160405, 20160406, 20160406, 20150429 31 | 20150430, 20160405, 20160405, 20160405, 20160405, 20160407, 20160407, 20150430 32 | -------------------------------------------------------------------------------- /ancillary/2016/smk_merge_dates_201505.txt: -------------------------------------------------------------------------------- 1 | Date, aveday_N, aveday_Y, mwdss_N, mwdss_Y, week_N, week_Y, all 2 | 20150501, 20160503, 20160503, 20160503, 20160503, 20160506, 20160506, 20150501 3 | 20150502, 20160503, 20160503, 20160507, 20160507, 20160507, 20160507, 20150502 4 | 20150503, 20160503, 20160503, 20160508, 20160508, 20160508, 20160508, 20150503 5 | 20150504, 20160503, 20160503, 20160502, 20160502, 20160502, 20160502, 20150504 6 | 20150505, 20160503, 20160503, 20160503, 20160503, 20160503, 20160503, 20150505 7 | 20150506, 20160503, 20160503, 20160503, 20160503, 20160504, 20160504, 20150506 8 | 20150507, 20160503, 20160503, 20160503, 20160503, 20160505, 20160505, 20150507 9 | 20150508, 20160503, 20160503, 20160503, 20160503, 20160506, 20160506, 20150508 10 | 20150509, 20160503, 20160503, 20160507, 20160507, 20160507, 20160507, 20150509 11 | 20150510, 20160503, 20160503, 20160508, 20160508, 20160508, 20160508, 20150510 12 | 20150511, 20160503, 20160503, 20160502, 20160502, 20160502, 20160502, 20150511 13 | 20150512, 20160503, 20160503, 20160503, 20160503, 20160503, 20160503, 20150512 14 | 20150513, 20160503, 20160503, 20160503, 20160503, 20160504, 20160504, 20150513 15 | 20150514, 20160503, 20160503, 20160503, 20160503, 20160505, 20160505, 20150514 16 | 20150515, 20160503, 20160503, 20160503, 20160503, 20160506, 20160506, 20150515 17 | 20150516, 20160503, 20160503, 20160507, 20160507, 20160507, 20160507, 20150516 18 | 20150517, 20160503, 20160503, 20160508, 20160508, 20160508, 20160508, 20150517 19 | 20150518, 20160503, 20160503, 20160502, 20160502, 20160502, 20160502, 20150518 20 | 20150519, 20160503, 20160503, 20160503, 20160503, 20160503, 20160503, 20150519 21 | 20150520, 20160503, 20160503, 20160503, 20160503, 20160504, 20160504, 20150520 22 | 20150521, 20160503, 20160503, 20160503, 20160503, 20160505, 20160505, 20150521 23 | 20150522, 20160503, 20160503, 20160503, 20160503, 20160506, 20160506, 20150522 24 | 20150523, 20160503, 20160503, 20160507, 20160507, 20160507, 20160507, 20150523 25 | 20150524, 20160503, 20160503, 20160508, 20160508, 20160508, 20160508, 20150524 26 | 20150525, 20160503, 20160503, 20160502, 20160502, 20160502, 20160502, 20150525 27 | 20150526, 20160503, 20160503, 20160503, 20160503, 20160503, 20160503, 20150526 28 | 20150527, 20160503, 20160503, 20160503, 20160503, 20160504, 20160504, 20150527 29 | 20150528, 20160503, 20160503, 20160503, 20160503, 20160505, 20160505, 20150528 30 | 20150529, 20160503, 20160503, 20160503, 20160503, 20160506, 20160506, 20150529 31 | 20150530, 20160503, 20160530, 20160507, 20160530, 20160507, 20160530, 20150530 32 | 20150531, 20160503, 20160531, 20160508, 20160531, 20160508, 20160531, 20150531 33 | -------------------------------------------------------------------------------- /ancillary/2016/smk_merge_dates_201506.txt: -------------------------------------------------------------------------------- 1 | Date, aveday_N, aveday_Y, mwdss_N, mwdss_Y, week_N, week_Y, all 2 | 20150601, 20160607, 20160607, 20160606, 20160606, 20160606, 20160606, 20150601 3 | 20150602, 20160607, 20160607, 20160607, 20160607, 20160607, 20160607, 20150602 4 | 20150603, 20160607, 20160607, 20160607, 20160607, 20160608, 20160608, 20150603 5 | 20150604, 20160607, 20160607, 20160607, 20160607, 20160609, 20160609, 20150604 6 | 20150605, 20160607, 20160607, 20160607, 20160607, 20160610, 20160610, 20150605 7 | 20150606, 20160607, 20160607, 20160611, 20160611, 20160611, 20160611, 20150606 8 | 20150607, 20160607, 20160607, 20160612, 20160612, 20160612, 20160612, 20150607 9 | 20150608, 20160607, 20160607, 20160606, 20160606, 20160606, 20160606, 20150608 10 | 20150609, 20160607, 20160607, 20160607, 20160607, 20160607, 20160607, 20150609 11 | 20150610, 20160607, 20160607, 20160607, 20160607, 20160608, 20160608, 20150610 12 | 20150611, 20160607, 20160607, 20160607, 20160607, 20160609, 20160609, 20150611 13 | 20150612, 20160607, 20160607, 20160607, 20160607, 20160610, 20160610, 20150612 14 | 20150613, 20160607, 20160607, 20160611, 20160611, 20160611, 20160611, 20150613 15 | 20150614, 20160607, 20160607, 20160612, 20160612, 20160612, 20160612, 20150614 16 | 20150615, 20160607, 20160607, 20160606, 20160606, 20160606, 20160606, 20150615 17 | 20150616, 20160607, 20160607, 20160607, 20160607, 20160607, 20160607, 20150616 18 | 20150617, 20160607, 20160607, 20160607, 20160607, 20160608, 20160608, 20150617 19 | 20150618, 20160607, 20160607, 20160607, 20160607, 20160609, 20160609, 20150618 20 | 20150619, 20160607, 20160607, 20160607, 20160607, 20160610, 20160610, 20150619 21 | 20150620, 20160607, 20160607, 20160611, 20160611, 20160611, 20160611, 20150620 22 | 20150621, 20160607, 20160607, 20160612, 20160612, 20160612, 20160612, 20150621 23 | 20150622, 20160607, 20160607, 20160606, 20160606, 20160606, 20160606, 20150622 24 | 20150623, 20160607, 20160607, 20160607, 20160607, 20160607, 20160607, 20150623 25 | 20150624, 20160607, 20160607, 20160607, 20160607, 20160608, 20160608, 20150624 26 | 20150625, 20160607, 20160607, 20160607, 20160607, 20160609, 20160609, 20150625 27 | 20150626, 20160607, 20160607, 20160607, 20160607, 20160610, 20160610, 20150626 28 | 20150627, 20160607, 20160607, 20160611, 20160611, 20160611, 20160611, 20150627 29 | 20150628, 20160607, 20160607, 20160612, 20160612, 20160612, 20160612, 20150628 30 | 20150629, 20160607, 20160607, 20160606, 20160606, 20160606, 20160606, 20150629 31 | 20150630, 20160607, 20160607, 20160607, 20160607, 20160607, 20160607, 20150630 32 | -------------------------------------------------------------------------------- /ancillary/2016/smk_merge_dates_201507.txt: -------------------------------------------------------------------------------- 1 | Date, aveday_N, aveday_Y, mwdss_N, mwdss_Y, week_N, week_Y, all 2 | 20150701, 20160712, 20160712, 20160712, 20160712, 20160713, 20160713, 20150701 3 | 20150702, 20160712, 20160712, 20160712, 20160712, 20160714, 20160714, 20150702 4 | 20150703, 20160712, 20160712, 20160712, 20160712, 20160715, 20160715, 20150703 5 | 20150704, 20160712, 20160704, 20160716, 20160704, 20160716, 20160704, 20150704 6 | 20150705, 20160712, 20160705, 20160717, 20160705, 20160717, 20160705, 20150705 7 | 20150706, 20160712, 20160712, 20160711, 20160711, 20160711, 20160711, 20150706 8 | 20150707, 20160712, 20160712, 20160712, 20160712, 20160712, 20160712, 20150707 9 | 20150708, 20160712, 20160712, 20160712, 20160712, 20160713, 20160713, 20150708 10 | 20150709, 20160712, 20160712, 20160712, 20160712, 20160714, 20160714, 20150709 11 | 20150710, 20160712, 20160712, 20160712, 20160712, 20160715, 20160715, 20150710 12 | 20150711, 20160712, 20160712, 20160716, 20160716, 20160716, 20160716, 20150711 13 | 20150712, 20160712, 20160712, 20160717, 20160717, 20160717, 20160717, 20150712 14 | 20150713, 20160712, 20160712, 20160711, 20160711, 20160711, 20160711, 20150713 15 | 20150714, 20160712, 20160712, 20160712, 20160712, 20160712, 20160712, 20150714 16 | 20150715, 20160712, 20160712, 20160712, 20160712, 20160713, 20160713, 20150715 17 | 20150716, 20160712, 20160712, 20160712, 20160712, 20160714, 20160714, 20150716 18 | 20150717, 20160712, 20160712, 20160712, 20160712, 20160715, 20160715, 20150717 19 | 20150718, 20160712, 20160712, 20160716, 20160716, 20160716, 20160716, 20150718 20 | 20150719, 20160712, 20160712, 20160717, 20160717, 20160717, 20160717, 20150719 21 | 20150720, 20160712, 20160712, 20160711, 20160711, 20160711, 20160711, 20150720 22 | 20150721, 20160712, 20160712, 20160712, 20160712, 20160712, 20160712, 20150721 23 | 20150722, 20160712, 20160712, 20160712, 20160712, 20160713, 20160713, 20150722 24 | 20150723, 20160712, 20160712, 20160712, 20160712, 20160714, 20160714, 20150723 25 | 20150724, 20160712, 20160712, 20160712, 20160712, 20160715, 20160715, 20150724 26 | 20150725, 20160712, 20160712, 20160716, 20160716, 20160716, 20160716, 20150725 27 | 20150726, 20160712, 20160712, 20160717, 20160717, 20160717, 20160717, 20150726 28 | 20150727, 20160712, 20160712, 20160711, 20160711, 20160711, 20160711, 20150727 29 | 20150728, 20160712, 20160712, 20160712, 20160712, 20160712, 20160712, 20150728 30 | 20150729, 20160712, 20160712, 20160712, 20160712, 20160713, 20160713, 20150729 31 | 20150730, 20160712, 20160712, 20160712, 20160712, 20160714, 20160714, 20150730 32 | 20150731, 20160712, 20160712, 20160712, 20160712, 20160715, 20160715, 20150731 33 | -------------------------------------------------------------------------------- /ancillary/2016/smk_merge_dates_201508.txt: -------------------------------------------------------------------------------- 1 | Date, aveday_N, aveday_Y, mwdss_N, mwdss_Y, week_N, week_Y, all 2 | 20150801, 20160802, 20160802, 20160813, 20160813, 20160813, 20160813, 20150801 3 | 20150802, 20160802, 20160802, 20160814, 20160814, 20160814, 20160814, 20150802 4 | 20150803, 20160802, 20160802, 20160808, 20160808, 20160808, 20160808, 20150803 5 | 20150804, 20160802, 20160802, 20160809, 20160809, 20160809, 20160809, 20150804 6 | 20150805, 20160802, 20160802, 20160809, 20160809, 20160810, 20160810, 20150805 7 | 20150806, 20160802, 20160802, 20160809, 20160809, 20160811, 20160811, 20150806 8 | 20150807, 20160802, 20160802, 20160809, 20160809, 20160812, 20160812, 20150807 9 | 20150808, 20160802, 20160802, 20160813, 20160813, 20160813, 20160813, 20150808 10 | 20150809, 20160802, 20160802, 20160814, 20160814, 20160814, 20160814, 20150809 11 | 20150810, 20160802, 20160802, 20160808, 20160808, 20160808, 20160808, 20150810 12 | 20150811, 20160802, 20160802, 20160809, 20160809, 20160809, 20160809, 20150811 13 | 20150812, 20160802, 20160802, 20160809, 20160809, 20160810, 20160810, 20150812 14 | 20150813, 20160802, 20160802, 20160809, 20160809, 20160811, 20160811, 20150813 15 | 20150814, 20160802, 20160802, 20160809, 20160809, 20160812, 20160812, 20150814 16 | 20150815, 20160802, 20160802, 20160813, 20160813, 20160813, 20160813, 20150815 17 | 20150816, 20160802, 20160802, 20160814, 20160814, 20160814, 20160814, 20150816 18 | 20150817, 20160802, 20160802, 20160808, 20160808, 20160808, 20160808, 20150817 19 | 20150818, 20160802, 20160802, 20160809, 20160809, 20160809, 20160809, 20150818 20 | 20150819, 20160802, 20160802, 20160809, 20160809, 20160810, 20160810, 20150819 21 | 20150820, 20160802, 20160802, 20160809, 20160809, 20160811, 20160811, 20150820 22 | 20150821, 20160802, 20160802, 20160809, 20160809, 20160812, 20160812, 20150821 23 | 20150822, 20160802, 20160802, 20160813, 20160813, 20160813, 20160813, 20150822 24 | 20150823, 20160802, 20160802, 20160814, 20160814, 20160814, 20160814, 20150823 25 | 20150824, 20160802, 20160802, 20160808, 20160808, 20160808, 20160808, 20150824 26 | 20150825, 20160802, 20160802, 20160809, 20160809, 20160809, 20160809, 20150825 27 | 20150826, 20160802, 20160802, 20160809, 20160809, 20160810, 20160810, 20150826 28 | 20150827, 20160802, 20160802, 20160809, 20160809, 20160811, 20160811, 20150827 29 | 20150828, 20160802, 20160802, 20160809, 20160809, 20160812, 20160812, 20150828 30 | 20150829, 20160802, 20160802, 20160813, 20160813, 20160813, 20160813, 20150829 31 | 20150830, 20160802, 20160802, 20160814, 20160814, 20160814, 20160814, 20150830 32 | 20150831, 20160802, 20160802, 20160808, 20160808, 20160808, 20160808, 20150831 33 | -------------------------------------------------------------------------------- /ancillary/2016/smk_merge_dates_201509.txt: -------------------------------------------------------------------------------- 1 | Date, aveday_N, aveday_Y, mwdss_N, mwdss_Y, week_N, week_Y, all 2 | 20150901, 20160913, 20160913, 20160913, 20160913, 20160913, 20160913, 20150901 3 | 20150902, 20160913, 20160913, 20160913, 20160913, 20160914, 20160914, 20150902 4 | 20150903, 20160913, 20160913, 20160913, 20160913, 20160915, 20160915, 20150903 5 | 20150904, 20160913, 20160913, 20160913, 20160913, 20160916, 20160916, 20150904 6 | 20150905, 20160913, 20160905, 20160917, 20160905, 20160917, 20160905, 20150905 7 | 20150906, 20160913, 20160906, 20160918, 20160906, 20160918, 20160906, 20150906 8 | 20150907, 20160913, 20160913, 20160912, 20160912, 20160912, 20160912, 20150907 9 | 20150908, 20160913, 20160913, 20160913, 20160913, 20160913, 20160913, 20150908 10 | 20150909, 20160913, 20160913, 20160913, 20160913, 20160914, 20160914, 20150909 11 | 20150910, 20160913, 20160913, 20160913, 20160913, 20160915, 20160915, 20150910 12 | 20150911, 20160913, 20160913, 20160913, 20160913, 20160916, 20160916, 20150911 13 | 20150912, 20160913, 20160913, 20160917, 20160917, 20160917, 20160917, 20150912 14 | 20150913, 20160913, 20160913, 20160918, 20160918, 20160918, 20160918, 20150913 15 | 20150914, 20160913, 20160913, 20160912, 20160912, 20160912, 20160912, 20150914 16 | 20150915, 20160913, 20160913, 20160913, 20160913, 20160913, 20160913, 20150915 17 | 20150916, 20160913, 20160913, 20160913, 20160913, 20160914, 20160914, 20150916 18 | 20150917, 20160913, 20160913, 20160913, 20160913, 20160915, 20160915, 20150917 19 | 20150918, 20160913, 20160913, 20160913, 20160913, 20160916, 20160916, 20150918 20 | 20150919, 20160913, 20160913, 20160917, 20160917, 20160917, 20160917, 20150919 21 | 20150920, 20160913, 20160913, 20160918, 20160918, 20160918, 20160918, 20150920 22 | 20150921, 20160913, 20160913, 20160912, 20160912, 20160912, 20160912, 20150921 23 | 20150922, 20160913, 20160913, 20160913, 20160913, 20160913, 20160913, 20150922 24 | 20150923, 20160913, 20160913, 20160913, 20160913, 20160914, 20160914, 20150923 25 | 20150924, 20160913, 20160913, 20160913, 20160913, 20160915, 20160915, 20150924 26 | 20150925, 20160913, 20160913, 20160913, 20160913, 20160916, 20160916, 20150925 27 | 20150926, 20160913, 20160913, 20160917, 20160917, 20160917, 20160917, 20150926 28 | 20150927, 20160913, 20160913, 20160918, 20160918, 20160918, 20160918, 20150927 29 | 20150928, 20160913, 20160913, 20160912, 20160912, 20160912, 20160912, 20150928 30 | 20150929, 20160913, 20160913, 20160913, 20160913, 20160913, 20160913, 20150929 31 | 20150930, 20160913, 20160913, 20160913, 20160913, 20160914, 20160914, 20150930 32 | -------------------------------------------------------------------------------- /ancillary/2016/smk_merge_dates_201510.txt: -------------------------------------------------------------------------------- 1 | Date, aveday_N, aveday_Y, mwdss_N, mwdss_Y, week_N, week_Y, all 2 | 20151001, 20161004, 20161004, 20161004, 20161004, 20161006, 20161006, 20151001 3 | 20151002, 20161004, 20161004, 20161004, 20161004, 20161007, 20161007, 20151002 4 | 20151003, 20161004, 20161004, 20161008, 20161008, 20161008, 20161008, 20151003 5 | 20151004, 20161004, 20161004, 20161009, 20161009, 20161009, 20161009, 20151004 6 | 20151005, 20161004, 20161004, 20161003, 20161003, 20161003, 20161003, 20151005 7 | 20151006, 20161004, 20161004, 20161004, 20161004, 20161004, 20161004, 20151006 8 | 20151007, 20161004, 20161004, 20161004, 20161004, 20161005, 20161005, 20151007 9 | 20151008, 20161004, 20161004, 20161004, 20161004, 20161006, 20161006, 20151008 10 | 20151009, 20161004, 20161004, 20161004, 20161004, 20161007, 20161007, 20151009 11 | 20151010, 20161004, 20161004, 20161008, 20161008, 20161008, 20161008, 20151010 12 | 20151011, 20161004, 20161004, 20161009, 20161009, 20161009, 20161009, 20151011 13 | 20151012, 20161004, 20161004, 20161003, 20161003, 20161003, 20161003, 20151012 14 | 20151013, 20161004, 20161004, 20161004, 20161004, 20161004, 20161004, 20151013 15 | 20151014, 20161004, 20161004, 20161004, 20161004, 20161005, 20161005, 20151014 16 | 20151015, 20161004, 20161004, 20161004, 20161004, 20161006, 20161006, 20151015 17 | 20151016, 20161004, 20161004, 20161004, 20161004, 20161007, 20161007, 20151016 18 | 20151017, 20161004, 20161004, 20161008, 20161008, 20161008, 20161008, 20151017 19 | 20151018, 20161004, 20161004, 20161009, 20161009, 20161009, 20161009, 20151018 20 | 20151019, 20161004, 20161004, 20161003, 20161003, 20161003, 20161003, 20151019 21 | 20151020, 20161004, 20161004, 20161004, 20161004, 20161004, 20161004, 20151020 22 | 20151021, 20161004, 20161004, 20161004, 20161004, 20161005, 20161005, 20151021 23 | 20151022, 20161004, 20161004, 20161004, 20161004, 20161006, 20161006, 20151022 24 | 20151023, 20161004, 20161004, 20161004, 20161004, 20161007, 20161007, 20151023 25 | 20151024, 20161004, 20161004, 20161008, 20161008, 20161008, 20161008, 20151024 26 | 20151025, 20161004, 20161004, 20161009, 20161009, 20161009, 20161009, 20151025 27 | 20151026, 20161004, 20161004, 20161003, 20161003, 20161003, 20161003, 20151026 28 | 20151027, 20161004, 20161004, 20161004, 20161004, 20161004, 20161004, 20151027 29 | 20151028, 20161004, 20161004, 20161004, 20161004, 20161005, 20161005, 20151028 30 | 20151029, 20161004, 20161004, 20161004, 20161004, 20161006, 20161006, 20151029 31 | 20151030, 20161004, 20161004, 20161004, 20161004, 20161007, 20161007, 20151030 32 | 20151031, 20161004, 20161004, 20161008, 20161008, 20161008, 20161008, 20151031 33 | -------------------------------------------------------------------------------- /ancillary/2016/smk_merge_dates_201511.txt: -------------------------------------------------------------------------------- 1 | Date, aveday_N, aveday_Y, mwdss_N, mwdss_Y, week_N, week_Y, all 2 | 20151101, 20161108, 20161108, 20161113, 20161113, 20161113, 20161113, 20151101 3 | 20151102, 20161108, 20161108, 20161107, 20161107, 20161107, 20161107, 20151102 4 | 20151103, 20161108, 20161108, 20161108, 20161108, 20161108, 20161108, 20151103 5 | 20151104, 20161108, 20161108, 20161108, 20161108, 20161109, 20161109, 20151104 6 | 20151105, 20161108, 20161108, 20161108, 20161108, 20161110, 20161110, 20151105 7 | 20151106, 20161108, 20161108, 20161108, 20161108, 20161111, 20161111, 20151106 8 | 20151107, 20161108, 20161108, 20161112, 20161112, 20161112, 20161112, 20151107 9 | 20151108, 20161108, 20161108, 20161113, 20161113, 20161113, 20161113, 20151108 10 | 20151109, 20161108, 20161108, 20161107, 20161107, 20161107, 20161107, 20151109 11 | 20151110, 20161108, 20161108, 20161108, 20161108, 20161108, 20161108, 20151110 12 | 20151111, 20161108, 20161108, 20161108, 20161108, 20161109, 20161109, 20151111 13 | 20151112, 20161108, 20161108, 20161108, 20161108, 20161110, 20161110, 20151112 14 | 20151113, 20161108, 20161108, 20161108, 20161108, 20161111, 20161111, 20151113 15 | 20151114, 20161108, 20161108, 20161112, 20161112, 20161112, 20161112, 20151114 16 | 20151115, 20161108, 20161108, 20161113, 20161113, 20161113, 20161113, 20151115 17 | 20151116, 20161108, 20161108, 20161107, 20161107, 20161107, 20161107, 20151116 18 | 20151117, 20161108, 20161108, 20161108, 20161108, 20161108, 20161108, 20151117 19 | 20151118, 20161108, 20161108, 20161108, 20161108, 20161109, 20161109, 20151118 20 | 20151119, 20161108, 20161108, 20161108, 20161108, 20161110, 20161110, 20151119 21 | 20151120, 20161108, 20161108, 20161108, 20161108, 20161111, 20161111, 20151120 22 | 20151121, 20161108, 20161108, 20161112, 20161112, 20161112, 20161112, 20151121 23 | 20151122, 20161108, 20161108, 20161113, 20161113, 20161113, 20161113, 20151122 24 | 20151123, 20161108, 20161108, 20161107, 20161107, 20161107, 20161107, 20151123 25 | 20151124, 20161108, 20161124, 20161108, 20161124, 20161108, 20161124, 20151124 26 | 20151125, 20161108, 20161125, 20161108, 20161125, 20161109, 20161125, 20151125 27 | 20151126, 20161108, 20161126, 20161108, 20161126, 20161110, 20161126, 20151126 28 | 20151127, 20161108, 20161108, 20161108, 20161108, 20161111, 20161111, 20151127 29 | 20151128, 20161108, 20161108, 20161112, 20161112, 20161112, 20161112, 20151128 30 | 20151129, 20161108, 20161108, 20161113, 20161113, 20161113, 20161113, 20151129 31 | 20151130, 20161108, 20161108, 20161107, 20161107, 20161107, 20161107, 20151130 32 | -------------------------------------------------------------------------------- /ancillary/2016/smk_merge_dates_201512.txt: -------------------------------------------------------------------------------- 1 | Date, aveday_N, aveday_Y, mwdss_N, mwdss_Y, week_N, week_Y, all 2 | 20151201, 20161206, 20161206, 20161206, 20161206, 20161206, 20161206, 20151201 3 | 20151202, 20161206, 20161206, 20161206, 20161206, 20161207, 20161207, 20151202 4 | 20151203, 20161206, 20161206, 20161206, 20161206, 20161208, 20161208, 20151203 5 | 20151204, 20161206, 20161206, 20161206, 20161206, 20161209, 20161209, 20151204 6 | 20151205, 20161206, 20161206, 20161210, 20161210, 20161210, 20161210, 20151205 7 | 20151206, 20161206, 20161206, 20161211, 20161211, 20161211, 20161211, 20151206 8 | 20151207, 20161206, 20161206, 20161205, 20161205, 20161205, 20161205, 20151207 9 | 20151208, 20161206, 20161206, 20161206, 20161206, 20161206, 20161206, 20151208 10 | 20151209, 20161206, 20161206, 20161206, 20161206, 20161207, 20161207, 20151209 11 | 20151210, 20161206, 20161206, 20161206, 20161206, 20161208, 20161208, 20151210 12 | 20151211, 20161206, 20161206, 20161206, 20161206, 20161209, 20161209, 20151211 13 | 20151212, 20161206, 20161206, 20161210, 20161210, 20161210, 20161210, 20151212 14 | 20151213, 20161206, 20161206, 20161211, 20161211, 20161211, 20161211, 20151213 15 | 20151214, 20161206, 20161206, 20161205, 20161205, 20161205, 20161205, 20151214 16 | 20151215, 20161206, 20161206, 20161206, 20161206, 20161206, 20161206, 20151215 17 | 20151216, 20161206, 20161206, 20161206, 20161206, 20161207, 20161207, 20151216 18 | 20151217, 20161206, 20161206, 20161206, 20161206, 20161208, 20161208, 20151217 19 | 20151218, 20161206, 20161206, 20161206, 20161206, 20161209, 20161209, 20151218 20 | 20151219, 20161206, 20161206, 20161210, 20161210, 20161210, 20161210, 20151219 21 | 20151220, 20161206, 20161206, 20161211, 20161211, 20161211, 20161211, 20151220 22 | 20151221, 20161206, 20161206, 20161205, 20161205, 20161205, 20161205, 20151221 23 | 20151222, 20161206, 20161206, 20161206, 20161206, 20161206, 20161206, 20151222 24 | 20151223, 20161206, 20161206, 20161206, 20161206, 20161207, 20161207, 20151223 25 | 20151224, 20161206, 20161224, 20161206, 20161224, 20161208, 20161224, 20151224 26 | 20151225, 20161206, 20161225, 20161206, 20161225, 20161209, 20161225, 20151225 27 | 20151226, 20161206, 20161226, 20161210, 20161226, 20161210, 20161226, 20151226 28 | 20151227, 20161206, 20161206, 20161211, 20161211, 20161211, 20161211, 20151227 29 | 20151228, 20161206, 20161206, 20161205, 20161205, 20161205, 20161205, 20151228 30 | 20151229, 20161206, 20161206, 20161206, 20161206, 20161206, 20161206, 20151229 31 | 20151230, 20161206, 20161206, 20161206, 20161206, 20161207, 20161207, 20151230 32 | 20151231, 20161206, 20161206, 20161206, 20161206, 20161208, 20161208, 20151231 33 | -------------------------------------------------------------------------------- /ancillary/2016/smk_merge_dates_201512.txt.old: -------------------------------------------------------------------------------- 1 | Date, aveday_N, aveday_Y, mwdss_N, mwdss_Y, week_N, week_Y, all 2 | 20151201, 20161206, 20161206, 20161206, 20161206, 20161208, 20161208, 20151201 3 | 20151202, 20161206, 20161206, 20161206, 20161206, 20161209, 20161209, 20151202 4 | 20151203, 20161206, 20161206, 20161210, 20161210, 20161210, 20161210, 20151203 5 | 20151204, 20161206, 20161206, 20161211, 20161211, 20161211, 20161211, 20151204 6 | 20151205, 20161206, 20161206, 20161205, 20161205, 20161205, 20161205, 20151205 7 | 20151206, 20161206, 20161206, 20161206, 20161206, 20161206, 20161206, 20151206 8 | 20151207, 20161206, 20161206, 20161206, 20161206, 20161207, 20161207, 20151207 9 | 20151208, 20161206, 20161206, 20161206, 20161206, 20161208, 20161208, 20151208 10 | 20151209, 20161206, 20161206, 20161206, 20161206, 20161209, 20161209, 20151209 11 | 20151210, 20161206, 20161206, 20161210, 20161210, 20161210, 20161210, 20151210 12 | 20151211, 20161206, 20161206, 20161211, 20161211, 20161211, 20161211, 20151211 13 | 20151212, 20161206, 20161206, 20161205, 20161205, 20161205, 20161205, 20151212 14 | 20151213, 20161206, 20161206, 20161206, 20161206, 20161206, 20161206, 20151213 15 | 20151214, 20161206, 20161206, 20161206, 20161206, 20161207, 20161207, 20151214 16 | 20151215, 20161206, 20161206, 20161206, 20161206, 20161208, 20161208, 20151215 17 | 20151216, 20161206, 20161206, 20161206, 20161206, 20161209, 20161209, 20151216 18 | 20151217, 20161206, 20161206, 20161210, 20161210, 20161210, 20161210, 20151217 19 | 20151218, 20161206, 20161206, 20161211, 20161211, 20161211, 20161211, 20151218 20 | 20151219, 20161206, 20161206, 20161205, 20161205, 20161205, 20161205, 20151219 21 | 20151220, 20161206, 20161206, 20161206, 20161206, 20161206, 20161206, 20151220 22 | 20151221, 20161206, 20161206, 20161206, 20161206, 20161207, 20161207, 20151221 23 | 20151222, 20161206, 20161206, 20161206, 20161206, 20161208, 20161208, 20151222 24 | 20151223, 20161206, 20161206, 20161206, 20161206, 20161209, 20161209, 20151223 25 | 20151224, 20161206, 20161224, 20161210, 20161224, 20161210, 20161224, 20151224 26 | 20151225, 20161206, 20161225, 20161211, 20161225, 20161211, 20161225, 20151225 27 | 20151226, 20161206, 20161226, 20161205, 20161226, 20161205, 20161226, 20151226 28 | 20151227, 20161206, 20161206, 20161206, 20161206, 20161206, 20161206, 20151227 29 | 20151228, 20161206, 20161206, 20161206, 20161206, 20161207, 20161207, 20151228 30 | 20151229, 20161206, 20161206, 20161206, 20161206, 20161208, 20161208, 20151229 31 | 20151230, 20161206, 20161206, 20161206, 20161206, 20161209, 20161209, 20151230 32 | 20151231, 20161206, 20161206, 20161210, 20161210, 20161210, 20161210, 20151231 33 | -------------------------------------------------------------------------------- /ancillary/2016/smk_merge_dates_201601.txt: -------------------------------------------------------------------------------- 1 | Date, aveday_N, aveday_Y, mwdss_N, mwdss_Y, week_N, week_Y, all 2 | 20160101, 20160105, 20160101, 20160105, 20160101, 20160108, 20160101, 20160101 3 | 20160102, 20160105, 20160102, 20160109, 20160102, 20160109, 20160102, 20160102 4 | 20160103, 20160105, 20160105, 20160110, 20160110, 20160110, 20160110, 20160103 5 | 20160104, 20160105, 20160105, 20160104, 20160104, 20160104, 20160104, 20160104 6 | 20160105, 20160105, 20160105, 20160105, 20160105, 20160105, 20160105, 20160105 7 | 20160106, 20160105, 20160105, 20160105, 20160105, 20160106, 20160106, 20160106 8 | 20160107, 20160105, 20160105, 20160105, 20160105, 20160107, 20160107, 20160107 9 | 20160108, 20160105, 20160105, 20160105, 20160105, 20160108, 20160108, 20160108 10 | 20160109, 20160105, 20160105, 20160109, 20160109, 20160109, 20160109, 20160109 11 | 20160110, 20160105, 20160105, 20160110, 20160110, 20160110, 20160110, 20160110 12 | 20160111, 20160105, 20160105, 20160104, 20160104, 20160104, 20160104, 20160111 13 | 20160112, 20160105, 20160105, 20160105, 20160105, 20160105, 20160105, 20160112 14 | 20160113, 20160105, 20160105, 20160105, 20160105, 20160106, 20160106, 20160113 15 | 20160114, 20160105, 20160105, 20160105, 20160105, 20160107, 20160107, 20160114 16 | 20160115, 20160105, 20160105, 20160105, 20160105, 20160108, 20160108, 20160115 17 | 20160116, 20160105, 20160105, 20160109, 20160109, 20160109, 20160109, 20160116 18 | 20160117, 20160105, 20160105, 20160110, 20160110, 20160110, 20160110, 20160117 19 | 20160118, 20160105, 20160105, 20160104, 20160104, 20160104, 20160104, 20160118 20 | 20160119, 20160105, 20160105, 20160105, 20160105, 20160105, 20160105, 20160119 21 | 20160120, 20160105, 20160105, 20160105, 20160105, 20160106, 20160106, 20160120 22 | 20160121, 20160105, 20160105, 20160105, 20160105, 20160107, 20160107, 20160121 23 | 20160122, 20160105, 20160105, 20160105, 20160105, 20160108, 20160108, 20160122 24 | 20160123, 20160105, 20160105, 20160109, 20160109, 20160109, 20160109, 20160123 25 | 20160124, 20160105, 20160105, 20160110, 20160110, 20160110, 20160110, 20160124 26 | 20160125, 20160105, 20160105, 20160104, 20160104, 20160104, 20160104, 20160125 27 | 20160126, 20160105, 20160105, 20160105, 20160105, 20160105, 20160105, 20160126 28 | 20160127, 20160105, 20160105, 20160105, 20160105, 20160106, 20160106, 20160127 29 | 20160128, 20160105, 20160105, 20160105, 20160105, 20160107, 20160107, 20160128 30 | 20160129, 20160105, 20160105, 20160105, 20160105, 20160108, 20160108, 20160129 31 | 20160130, 20160105, 20160105, 20160109, 20160109, 20160109, 20160109, 20160130 32 | 20160131, 20160105, 20160105, 20160110, 20160110, 20160110, 20160110, 20160131 33 | -------------------------------------------------------------------------------- /ancillary/2016/smk_merge_dates_201602.txt: -------------------------------------------------------------------------------- 1 | Date, aveday_N, aveday_Y, mwdss_N, mwdss_Y, week_N, week_Y, all 2 | 20160201, 20160202, 20160202, 20160208, 20160208, 20160208, 20160208, 20160201 3 | 20160202, 20160202, 20160202, 20160209, 20160209, 20160209, 20160209, 20160202 4 | 20160203, 20160202, 20160202, 20160209, 20160209, 20160210, 20160210, 20160203 5 | 20160204, 20160202, 20160202, 20160209, 20160209, 20160211, 20160211, 20160204 6 | 20160205, 20160202, 20160202, 20160209, 20160209, 20160212, 20160212, 20160205 7 | 20160206, 20160202, 20160202, 20160213, 20160213, 20160213, 20160213, 20160206 8 | 20160207, 20160202, 20160202, 20160214, 20160214, 20160214, 20160214, 20160207 9 | 20160208, 20160202, 20160202, 20160208, 20160208, 20160208, 20160208, 20160208 10 | 20160209, 20160202, 20160202, 20160209, 20160209, 20160209, 20160209, 20160209 11 | 20160210, 20160202, 20160202, 20160209, 20160209, 20160210, 20160210, 20160210 12 | 20160211, 20160202, 20160202, 20160209, 20160209, 20160211, 20160211, 20160211 13 | 20160212, 20160202, 20160202, 20160209, 20160209, 20160212, 20160212, 20160212 14 | 20160213, 20160202, 20160202, 20160213, 20160213, 20160213, 20160213, 20160213 15 | 20160214, 20160202, 20160202, 20160214, 20160214, 20160214, 20160214, 20160214 16 | 20160215, 20160202, 20160202, 20160208, 20160208, 20160208, 20160208, 20160215 17 | 20160216, 20160202, 20160202, 20160209, 20160209, 20160209, 20160209, 20160216 18 | 20160217, 20160202, 20160202, 20160209, 20160209, 20160210, 20160210, 20160217 19 | 20160218, 20160202, 20160202, 20160209, 20160209, 20160211, 20160211, 20160218 20 | 20160219, 20160202, 20160202, 20160209, 20160209, 20160212, 20160212, 20160219 21 | 20160220, 20160202, 20160202, 20160213, 20160213, 20160213, 20160213, 20160220 22 | 20160221, 20160202, 20160202, 20160214, 20160214, 20160214, 20160214, 20160221 23 | 20160222, 20160202, 20160202, 20160208, 20160208, 20160208, 20160208, 20160222 24 | 20160223, 20160202, 20160202, 20160209, 20160209, 20160209, 20160209, 20160223 25 | 20160224, 20160202, 20160202, 20160209, 20160209, 20160210, 20160210, 20160224 26 | 20160225, 20160202, 20160202, 20160209, 20160209, 20160211, 20160211, 20160225 27 | 20160226, 20160202, 20160202, 20160209, 20160209, 20160212, 20160212, 20160226 28 | 20160227, 20160202, 20160202, 20160213, 20160213, 20160213, 20160213, 20160227 29 | 20160228, 20160202, 20160202, 20160214, 20160214, 20160214, 20160214, 20160228 30 | 20160229, 20160202, 20160202, 20160208, 20160208, 20160208, 20160208, 20160229 31 | -------------------------------------------------------------------------------- /ancillary/2016/smk_merge_dates_201603.txt: -------------------------------------------------------------------------------- 1 | Date, aveday_N, aveday_Y, mwdss_N, mwdss_Y, week_N, week_Y, all 2 | 20160301, 20160308, 20160308, 20160308, 20160308, 20160308, 20160308, 20160301 3 | 20160302, 20160308, 20160308, 20160308, 20160308, 20160309, 20160309, 20160302 4 | 20160303, 20160308, 20160308, 20160308, 20160308, 20160310, 20160310, 20160303 5 | 20160304, 20160308, 20160308, 20160308, 20160308, 20160311, 20160311, 20160304 6 | 20160305, 20160308, 20160308, 20160312, 20160312, 20160312, 20160312, 20160305 7 | 20160306, 20160308, 20160308, 20160313, 20160313, 20160313, 20160313, 20160306 8 | 20160307, 20160308, 20160308, 20160307, 20160307, 20160307, 20160307, 20160307 9 | 20160308, 20160308, 20160308, 20160308, 20160308, 20160308, 20160308, 20160308 10 | 20160309, 20160308, 20160308, 20160308, 20160308, 20160309, 20160309, 20160309 11 | 20160310, 20160308, 20160308, 20160308, 20160308, 20160310, 20160310, 20160310 12 | 20160311, 20160308, 20160308, 20160308, 20160308, 20160311, 20160311, 20160311 13 | 20160312, 20160308, 20160308, 20160312, 20160312, 20160312, 20160312, 20160312 14 | 20160313, 20160308, 20160308, 20160313, 20160313, 20160313, 20160313, 20160313 15 | 20160314, 20160308, 20160308, 20160307, 20160307, 20160307, 20160307, 20160314 16 | 20160315, 20160308, 20160308, 20160308, 20160308, 20160308, 20160308, 20160315 17 | 20160316, 20160308, 20160308, 20160308, 20160308, 20160309, 20160309, 20160316 18 | 20160317, 20160308, 20160308, 20160308, 20160308, 20160310, 20160310, 20160317 19 | 20160318, 20160308, 20160308, 20160308, 20160308, 20160311, 20160311, 20160318 20 | 20160319, 20160308, 20160308, 20160312, 20160312, 20160312, 20160312, 20160319 21 | 20160320, 20160308, 20160308, 20160313, 20160313, 20160313, 20160313, 20160320 22 | 20160321, 20160308, 20160308, 20160307, 20160307, 20160307, 20160307, 20160321 23 | 20160322, 20160308, 20160308, 20160308, 20160308, 20160308, 20160308, 20160322 24 | 20160323, 20160308, 20160308, 20160308, 20160308, 20160309, 20160309, 20160323 25 | 20160324, 20160308, 20160308, 20160308, 20160308, 20160310, 20160310, 20160324 26 | 20160325, 20160308, 20160325, 20160308, 20160325, 20160311, 20160325, 20160325 27 | 20160326, 20160308, 20160326, 20160312, 20160326, 20160312, 20160326, 20160326 28 | 20160327, 20160308, 20160308, 20160313, 20160313, 20160313, 20160313, 20160327 29 | 20160328, 20160308, 20160308, 20160307, 20160307, 20160307, 20160307, 20160328 30 | 20160329, 20160308, 20160308, 20160308, 20160308, 20160308, 20160308, 20160329 31 | 20160330, 20160308, 20160308, 20160308, 20160308, 20160309, 20160309, 20160330 32 | 20160331, 20160308, 20160308, 20160308, 20160308, 20160310, 20160310, 20160331 33 | -------------------------------------------------------------------------------- /ancillary/2016/smk_merge_dates_201604.txt: -------------------------------------------------------------------------------- 1 | Date, aveday_N, aveday_Y, mwdss_N, mwdss_Y, week_N, week_Y, all 2 | 20160401, 20160405, 20160405, 20160405, 20160405, 20160408, 20160408, 20160401 3 | 20160402, 20160405, 20160405, 20160409, 20160409, 20160409, 20160409, 20160402 4 | 20160403, 20160405, 20160405, 20160410, 20160410, 20160410, 20160410, 20160403 5 | 20160404, 20160405, 20160405, 20160404, 20160404, 20160404, 20160404, 20160404 6 | 20160405, 20160405, 20160405, 20160405, 20160405, 20160405, 20160405, 20160405 7 | 20160406, 20160405, 20160405, 20160405, 20160405, 20160406, 20160406, 20160406 8 | 20160407, 20160405, 20160405, 20160405, 20160405, 20160407, 20160407, 20160407 9 | 20160408, 20160405, 20160405, 20160405, 20160405, 20160408, 20160408, 20160408 10 | 20160409, 20160405, 20160405, 20160409, 20160409, 20160409, 20160409, 20160409 11 | 20160410, 20160405, 20160405, 20160410, 20160410, 20160410, 20160410, 20160410 12 | 20160411, 20160405, 20160405, 20160404, 20160404, 20160404, 20160404, 20160411 13 | 20160412, 20160405, 20160405, 20160405, 20160405, 20160405, 20160405, 20160412 14 | 20160413, 20160405, 20160405, 20160405, 20160405, 20160406, 20160406, 20160413 15 | 20160414, 20160405, 20160405, 20160405, 20160405, 20160407, 20160407, 20160414 16 | 20160415, 20160405, 20160405, 20160405, 20160405, 20160408, 20160408, 20160415 17 | 20160416, 20160405, 20160405, 20160409, 20160409, 20160409, 20160409, 20160416 18 | 20160417, 20160405, 20160405, 20160410, 20160410, 20160410, 20160410, 20160417 19 | 20160418, 20160405, 20160405, 20160404, 20160404, 20160404, 20160404, 20160418 20 | 20160419, 20160405, 20160405, 20160405, 20160405, 20160405, 20160405, 20160419 21 | 20160420, 20160405, 20160405, 20160405, 20160405, 20160406, 20160406, 20160420 22 | 20160421, 20160405, 20160405, 20160405, 20160405, 20160407, 20160407, 20160421 23 | 20160422, 20160405, 20160405, 20160405, 20160405, 20160408, 20160408, 20160422 24 | 20160423, 20160405, 20160405, 20160409, 20160409, 20160409, 20160409, 20160423 25 | 20160424, 20160405, 20160405, 20160410, 20160410, 20160410, 20160410, 20160424 26 | 20160425, 20160405, 20160405, 20160404, 20160404, 20160404, 20160404, 20160425 27 | 20160426, 20160405, 20160405, 20160405, 20160405, 20160405, 20160405, 20160426 28 | 20160427, 20160405, 20160405, 20160405, 20160405, 20160406, 20160406, 20160427 29 | 20160428, 20160405, 20160405, 20160405, 20160405, 20160407, 20160407, 20160428 30 | 20160429, 20160405, 20160405, 20160405, 20160405, 20160408, 20160408, 20160429 31 | 20160430, 20160405, 20160405, 20160409, 20160409, 20160409, 20160409, 20160430 32 | -------------------------------------------------------------------------------- /ancillary/2016/smk_merge_dates_201605.txt: -------------------------------------------------------------------------------- 1 | Date, aveday_N, aveday_Y, mwdss_N, mwdss_Y, week_N, week_Y, all 2 | 20160501, 20160503, 20160503, 20160508, 20160508, 20160508, 20160508, 20160501 3 | 20160502, 20160503, 20160503, 20160502, 20160502, 20160502, 20160502, 20160502 4 | 20160503, 20160503, 20160503, 20160503, 20160503, 20160503, 20160503, 20160503 5 | 20160504, 20160503, 20160503, 20160503, 20160503, 20160504, 20160504, 20160504 6 | 20160505, 20160503, 20160503, 20160503, 20160503, 20160505, 20160505, 20160505 7 | 20160506, 20160503, 20160503, 20160503, 20160503, 20160506, 20160506, 20160506 8 | 20160507, 20160503, 20160503, 20160507, 20160507, 20160507, 20160507, 20160507 9 | 20160508, 20160503, 20160503, 20160508, 20160508, 20160508, 20160508, 20160508 10 | 20160509, 20160503, 20160503, 20160502, 20160502, 20160502, 20160502, 20160509 11 | 20160510, 20160503, 20160503, 20160503, 20160503, 20160503, 20160503, 20160510 12 | 20160511, 20160503, 20160503, 20160503, 20160503, 20160504, 20160504, 20160511 13 | 20160512, 20160503, 20160503, 20160503, 20160503, 20160505, 20160505, 20160512 14 | 20160513, 20160503, 20160503, 20160503, 20160503, 20160506, 20160506, 20160513 15 | 20160514, 20160503, 20160503, 20160507, 20160507, 20160507, 20160507, 20160514 16 | 20160515, 20160503, 20160503, 20160508, 20160508, 20160508, 20160508, 20160515 17 | 20160516, 20160503, 20160503, 20160502, 20160502, 20160502, 20160502, 20160516 18 | 20160517, 20160503, 20160503, 20160503, 20160503, 20160503, 20160503, 20160517 19 | 20160518, 20160503, 20160503, 20160503, 20160503, 20160504, 20160504, 20160518 20 | 20160519, 20160503, 20160503, 20160503, 20160503, 20160505, 20160505, 20160519 21 | 20160520, 20160503, 20160503, 20160503, 20160503, 20160506, 20160506, 20160520 22 | 20160521, 20160503, 20160503, 20160507, 20160507, 20160507, 20160507, 20160521 23 | 20160522, 20160503, 20160503, 20160508, 20160508, 20160508, 20160508, 20160522 24 | 20160523, 20160503, 20160503, 20160502, 20160502, 20160502, 20160502, 20160523 25 | 20160524, 20160503, 20160503, 20160503, 20160503, 20160503, 20160503, 20160524 26 | 20160525, 20160503, 20160503, 20160503, 20160503, 20160504, 20160504, 20160525 27 | 20160526, 20160503, 20160503, 20160503, 20160503, 20160505, 20160505, 20160526 28 | 20160527, 20160503, 20160503, 20160503, 20160503, 20160506, 20160506, 20160527 29 | 20160528, 20160503, 20160503, 20160507, 20160507, 20160507, 20160507, 20160528 30 | 20160529, 20160503, 20160503, 20160508, 20160508, 20160508, 20160508, 20160529 31 | 20160530, 20160503, 20160530, 20160502, 20160530, 20160502, 20160530, 20160530 32 | 20160531, 20160503, 20160531, 20160503, 20160531, 20160503, 20160531, 20160531 33 | -------------------------------------------------------------------------------- /ancillary/2016/smk_merge_dates_201606.txt: -------------------------------------------------------------------------------- 1 | Date, aveday_N, aveday_Y, mwdss_N, mwdss_Y, week_N, week_Y, all 2 | 20160601, 20160607, 20160607, 20160607, 20160607, 20160608, 20160608, 20160601 3 | 20160602, 20160607, 20160607, 20160607, 20160607, 20160609, 20160609, 20160602 4 | 20160603, 20160607, 20160607, 20160607, 20160607, 20160610, 20160610, 20160603 5 | 20160604, 20160607, 20160607, 20160611, 20160611, 20160611, 20160611, 20160604 6 | 20160605, 20160607, 20160607, 20160612, 20160612, 20160612, 20160612, 20160605 7 | 20160606, 20160607, 20160607, 20160606, 20160606, 20160606, 20160606, 20160606 8 | 20160607, 20160607, 20160607, 20160607, 20160607, 20160607, 20160607, 20160607 9 | 20160608, 20160607, 20160607, 20160607, 20160607, 20160608, 20160608, 20160608 10 | 20160609, 20160607, 20160607, 20160607, 20160607, 20160609, 20160609, 20160609 11 | 20160610, 20160607, 20160607, 20160607, 20160607, 20160610, 20160610, 20160610 12 | 20160611, 20160607, 20160607, 20160611, 20160611, 20160611, 20160611, 20160611 13 | 20160612, 20160607, 20160607, 20160612, 20160612, 20160612, 20160612, 20160612 14 | 20160613, 20160607, 20160607, 20160606, 20160606, 20160606, 20160606, 20160613 15 | 20160614, 20160607, 20160607, 20160607, 20160607, 20160607, 20160607, 20160614 16 | 20160615, 20160607, 20160607, 20160607, 20160607, 20160608, 20160608, 20160615 17 | 20160616, 20160607, 20160607, 20160607, 20160607, 20160609, 20160609, 20160616 18 | 20160617, 20160607, 20160607, 20160607, 20160607, 20160610, 20160610, 20160617 19 | 20160618, 20160607, 20160607, 20160611, 20160611, 20160611, 20160611, 20160618 20 | 20160619, 20160607, 20160607, 20160612, 20160612, 20160612, 20160612, 20160619 21 | 20160620, 20160607, 20160607, 20160606, 20160606, 20160606, 20160606, 20160620 22 | 20160621, 20160607, 20160607, 20160607, 20160607, 20160607, 20160607, 20160621 23 | 20160622, 20160607, 20160607, 20160607, 20160607, 20160608, 20160608, 20160622 24 | 20160623, 20160607, 20160607, 20160607, 20160607, 20160609, 20160609, 20160623 25 | 20160624, 20160607, 20160607, 20160607, 20160607, 20160610, 20160610, 20160624 26 | 20160625, 20160607, 20160607, 20160611, 20160611, 20160611, 20160611, 20160625 27 | 20160626, 20160607, 20160607, 20160612, 20160612, 20160612, 20160612, 20160626 28 | 20160627, 20160607, 20160607, 20160606, 20160606, 20160606, 20160606, 20160627 29 | 20160628, 20160607, 20160607, 20160607, 20160607, 20160607, 20160607, 20160628 30 | 20160629, 20160607, 20160607, 20160607, 20160607, 20160608, 20160608, 20160629 31 | 20160630, 20160607, 20160607, 20160607, 20160607, 20160609, 20160609, 20160630 32 | -------------------------------------------------------------------------------- /ancillary/2016/smk_merge_dates_201607.txt: -------------------------------------------------------------------------------- 1 | Date, aveday_N, aveday_Y, mwdss_N, mwdss_Y, week_N, week_Y, all 2 | 20160701, 20160712, 20160712, 20160712, 20160712, 20160715, 20160715, 20160701 3 | 20160702, 20160712, 20160712, 20160716, 20160716, 20160716, 20160716, 20160702 4 | 20160703, 20160712, 20160712, 20160717, 20160717, 20160717, 20160717, 20160703 5 | 20160704, 20160712, 20160704, 20160711, 20160704, 20160711, 20160704, 20160704 6 | 20160705, 20160712, 20160705, 20160712, 20160705, 20160712, 20160705, 20160705 7 | 20160706, 20160712, 20160712, 20160712, 20160712, 20160713, 20160713, 20160706 8 | 20160707, 20160712, 20160712, 20160712, 20160712, 20160714, 20160714, 20160707 9 | 20160708, 20160712, 20160712, 20160712, 20160712, 20160715, 20160715, 20160708 10 | 20160709, 20160712, 20160712, 20160716, 20160716, 20160716, 20160716, 20160709 11 | 20160710, 20160712, 20160712, 20160717, 20160717, 20160717, 20160717, 20160710 12 | 20160711, 20160712, 20160712, 20160711, 20160711, 20160711, 20160711, 20160711 13 | 20160712, 20160712, 20160712, 20160712, 20160712, 20160712, 20160712, 20160712 14 | 20160713, 20160712, 20160712, 20160712, 20160712, 20160713, 20160713, 20160713 15 | 20160714, 20160712, 20160712, 20160712, 20160712, 20160714, 20160714, 20160714 16 | 20160715, 20160712, 20160712, 20160712, 20160712, 20160715, 20160715, 20160715 17 | 20160716, 20160712, 20160712, 20160716, 20160716, 20160716, 20160716, 20160716 18 | 20160717, 20160712, 20160712, 20160717, 20160717, 20160717, 20160717, 20160717 19 | 20160718, 20160712, 20160712, 20160711, 20160711, 20160711, 20160711, 20160718 20 | 20160719, 20160712, 20160712, 20160712, 20160712, 20160712, 20160712, 20160719 21 | 20160720, 20160712, 20160712, 20160712, 20160712, 20160713, 20160713, 20160720 22 | 20160721, 20160712, 20160712, 20160712, 20160712, 20160714, 20160714, 20160721 23 | 20160722, 20160712, 20160712, 20160712, 20160712, 20160715, 20160715, 20160722 24 | 20160723, 20160712, 20160712, 20160716, 20160716, 20160716, 20160716, 20160723 25 | 20160724, 20160712, 20160712, 20160717, 20160717, 20160717, 20160717, 20160724 26 | 20160725, 20160712, 20160712, 20160711, 20160711, 20160711, 20160711, 20160725 27 | 20160726, 20160712, 20160712, 20160712, 20160712, 20160712, 20160712, 20160726 28 | 20160727, 20160712, 20160712, 20160712, 20160712, 20160713, 20160713, 20160727 29 | 20160728, 20160712, 20160712, 20160712, 20160712, 20160714, 20160714, 20160728 30 | 20160729, 20160712, 20160712, 20160712, 20160712, 20160715, 20160715, 20160729 31 | 20160730, 20160712, 20160712, 20160716, 20160716, 20160716, 20160716, 20160730 32 | 20160731, 20160712, 20160712, 20160717, 20160717, 20160717, 20160717, 20160731 33 | -------------------------------------------------------------------------------- /ancillary/2016/smk_merge_dates_201608.txt: -------------------------------------------------------------------------------- 1 | Date, aveday_N, aveday_Y, mwdss_N, mwdss_Y, week_N, week_Y, all 2 | 20160801, 20160802, 20160802, 20160808, 20160808, 20160808, 20160808, 20160801 3 | 20160802, 20160802, 20160802, 20160809, 20160809, 20160809, 20160809, 20160802 4 | 20160803, 20160802, 20160802, 20160809, 20160809, 20160810, 20160810, 20160803 5 | 20160804, 20160802, 20160802, 20160809, 20160809, 20160811, 20160811, 20160804 6 | 20160805, 20160802, 20160802, 20160809, 20160809, 20160812, 20160812, 20160805 7 | 20160806, 20160802, 20160802, 20160813, 20160813, 20160813, 20160813, 20160806 8 | 20160807, 20160802, 20160802, 20160814, 20160814, 20160814, 20160814, 20160807 9 | 20160808, 20160802, 20160802, 20160808, 20160808, 20160808, 20160808, 20160808 10 | 20160809, 20160802, 20160802, 20160809, 20160809, 20160809, 20160809, 20160809 11 | 20160810, 20160802, 20160802, 20160809, 20160809, 20160810, 20160810, 20160810 12 | 20160811, 20160802, 20160802, 20160809, 20160809, 20160811, 20160811, 20160811 13 | 20160812, 20160802, 20160802, 20160809, 20160809, 20160812, 20160812, 20160812 14 | 20160813, 20160802, 20160802, 20160813, 20160813, 20160813, 20160813, 20160813 15 | 20160814, 20160802, 20160802, 20160814, 20160814, 20160814, 20160814, 20160814 16 | 20160815, 20160802, 20160802, 20160808, 20160808, 20160808, 20160808, 20160815 17 | 20160816, 20160802, 20160802, 20160809, 20160809, 20160809, 20160809, 20160816 18 | 20160817, 20160802, 20160802, 20160809, 20160809, 20160810, 20160810, 20160817 19 | 20160818, 20160802, 20160802, 20160809, 20160809, 20160811, 20160811, 20160818 20 | 20160819, 20160802, 20160802, 20160809, 20160809, 20160812, 20160812, 20160819 21 | 20160820, 20160802, 20160802, 20160813, 20160813, 20160813, 20160813, 20160820 22 | 20160821, 20160802, 20160802, 20160814, 20160814, 20160814, 20160814, 20160821 23 | 20160822, 20160802, 20160802, 20160808, 20160808, 20160808, 20160808, 20160822 24 | 20160823, 20160802, 20160802, 20160809, 20160809, 20160809, 20160809, 20160823 25 | 20160824, 20160802, 20160802, 20160809, 20160809, 20160810, 20160810, 20160824 26 | 20160825, 20160802, 20160802, 20160809, 20160809, 20160811, 20160811, 20160825 27 | 20160826, 20160802, 20160802, 20160809, 20160809, 20160812, 20160812, 20160826 28 | 20160827, 20160802, 20160802, 20160813, 20160813, 20160813, 20160813, 20160827 29 | 20160828, 20160802, 20160802, 20160814, 20160814, 20160814, 20160814, 20160828 30 | 20160829, 20160802, 20160802, 20160808, 20160808, 20160808, 20160808, 20160829 31 | 20160830, 20160802, 20160802, 20160809, 20160809, 20160809, 20160809, 20160830 32 | 20160831, 20160802, 20160802, 20160809, 20160809, 20160810, 20160810, 20160831 33 | -------------------------------------------------------------------------------- /ancillary/2016/smk_merge_dates_201609.txt: -------------------------------------------------------------------------------- 1 | Date, aveday_N, aveday_Y, mwdss_N, mwdss_Y, week_N, week_Y, all 2 | 20160901, 20160913, 20160913, 20160913, 20160913, 20160915, 20160915, 20160901 3 | 20160902, 20160913, 20160913, 20160913, 20160913, 20160916, 20160916, 20160902 4 | 20160903, 20160913, 20160913, 20160917, 20160917, 20160917, 20160917, 20160903 5 | 20160904, 20160913, 20160913, 20160918, 20160918, 20160918, 20160918, 20160904 6 | 20160905, 20160913, 20160905, 20160912, 20160905, 20160912, 20160905, 20160905 7 | 20160906, 20160913, 20160906, 20160913, 20160906, 20160913, 20160906, 20160906 8 | 20160907, 20160913, 20160913, 20160913, 20160913, 20160914, 20160914, 20160907 9 | 20160908, 20160913, 20160913, 20160913, 20160913, 20160915, 20160915, 20160908 10 | 20160909, 20160913, 20160913, 20160913, 20160913, 20160916, 20160916, 20160909 11 | 20160910, 20160913, 20160913, 20160917, 20160917, 20160917, 20160917, 20160910 12 | 20160911, 20160913, 20160913, 20160918, 20160918, 20160918, 20160918, 20160911 13 | 20160912, 20160913, 20160913, 20160912, 20160912, 20160912, 20160912, 20160912 14 | 20160913, 20160913, 20160913, 20160913, 20160913, 20160913, 20160913, 20160913 15 | 20160914, 20160913, 20160913, 20160913, 20160913, 20160914, 20160914, 20160914 16 | 20160915, 20160913, 20160913, 20160913, 20160913, 20160915, 20160915, 20160915 17 | 20160916, 20160913, 20160913, 20160913, 20160913, 20160916, 20160916, 20160916 18 | 20160917, 20160913, 20160913, 20160917, 20160917, 20160917, 20160917, 20160917 19 | 20160918, 20160913, 20160913, 20160918, 20160918, 20160918, 20160918, 20160918 20 | 20160919, 20160913, 20160913, 20160912, 20160912, 20160912, 20160912, 20160919 21 | 20160920, 20160913, 20160913, 20160913, 20160913, 20160913, 20160913, 20160920 22 | 20160921, 20160913, 20160913, 20160913, 20160913, 20160914, 20160914, 20160921 23 | 20160922, 20160913, 20160913, 20160913, 20160913, 20160915, 20160915, 20160922 24 | 20160923, 20160913, 20160913, 20160913, 20160913, 20160916, 20160916, 20160923 25 | 20160924, 20160913, 20160913, 20160917, 20160917, 20160917, 20160917, 20160924 26 | 20160925, 20160913, 20160913, 20160918, 20160918, 20160918, 20160918, 20160925 27 | 20160926, 20160913, 20160913, 20160912, 20160912, 20160912, 20160912, 20160926 28 | 20160927, 20160913, 20160913, 20160913, 20160913, 20160913, 20160913, 20160927 29 | 20160928, 20160913, 20160913, 20160913, 20160913, 20160914, 20160914, 20160928 30 | 20160929, 20160913, 20160913, 20160913, 20160913, 20160915, 20160915, 20160929 31 | 20160930, 20160913, 20160913, 20160913, 20160913, 20160916, 20160916, 20160930 32 | -------------------------------------------------------------------------------- /ancillary/2016/smk_merge_dates_201610.txt: -------------------------------------------------------------------------------- 1 | Date, aveday_N, aveday_Y, mwdss_N, mwdss_Y, week_N, week_Y, all 2 | 20161001, 20161004, 20161004, 20161008, 20161008, 20161008, 20161008, 20161001 3 | 20161002, 20161004, 20161004, 20161009, 20161009, 20161009, 20161009, 20161002 4 | 20161003, 20161004, 20161004, 20161003, 20161003, 20161003, 20161003, 20161003 5 | 20161004, 20161004, 20161004, 20161004, 20161004, 20161004, 20161004, 20161004 6 | 20161005, 20161004, 20161004, 20161004, 20161004, 20161005, 20161005, 20161005 7 | 20161006, 20161004, 20161004, 20161004, 20161004, 20161006, 20161006, 20161006 8 | 20161007, 20161004, 20161004, 20161004, 20161004, 20161007, 20161007, 20161007 9 | 20161008, 20161004, 20161004, 20161008, 20161008, 20161008, 20161008, 20161008 10 | 20161009, 20161004, 20161004, 20161009, 20161009, 20161009, 20161009, 20161009 11 | 20161010, 20161004, 20161004, 20161003, 20161003, 20161003, 20161003, 20161010 12 | 20161011, 20161004, 20161004, 20161004, 20161004, 20161004, 20161004, 20161011 13 | 20161012, 20161004, 20161004, 20161004, 20161004, 20161005, 20161005, 20161012 14 | 20161013, 20161004, 20161004, 20161004, 20161004, 20161006, 20161006, 20161013 15 | 20161014, 20161004, 20161004, 20161004, 20161004, 20161007, 20161007, 20161014 16 | 20161015, 20161004, 20161004, 20161008, 20161008, 20161008, 20161008, 20161015 17 | 20161016, 20161004, 20161004, 20161009, 20161009, 20161009, 20161009, 20161016 18 | 20161017, 20161004, 20161004, 20161003, 20161003, 20161003, 20161003, 20161017 19 | 20161018, 20161004, 20161004, 20161004, 20161004, 20161004, 20161004, 20161018 20 | 20161019, 20161004, 20161004, 20161004, 20161004, 20161005, 20161005, 20161019 21 | 20161020, 20161004, 20161004, 20161004, 20161004, 20161006, 20161006, 20161020 22 | 20161021, 20161004, 20161004, 20161004, 20161004, 20161007, 20161007, 20161021 23 | 20161022, 20161004, 20161004, 20161008, 20161008, 20161008, 20161008, 20161022 24 | 20161023, 20161004, 20161004, 20161009, 20161009, 20161009, 20161009, 20161023 25 | 20161024, 20161004, 20161004, 20161003, 20161003, 20161003, 20161003, 20161024 26 | 20161025, 20161004, 20161004, 20161004, 20161004, 20161004, 20161004, 20161025 27 | 20161026, 20161004, 20161004, 20161004, 20161004, 20161005, 20161005, 20161026 28 | 20161027, 20161004, 20161004, 20161004, 20161004, 20161006, 20161006, 20161027 29 | 20161028, 20161004, 20161004, 20161004, 20161004, 20161007, 20161007, 20161028 30 | 20161029, 20161004, 20161004, 20161008, 20161008, 20161008, 20161008, 20161029 31 | 20161030, 20161004, 20161004, 20161009, 20161009, 20161009, 20161009, 20161030 32 | 20161031, 20161004, 20161004, 20161003, 20161003, 20161003, 20161003, 20161031 33 | -------------------------------------------------------------------------------- /ancillary/2016/smk_merge_dates_201611.txt: -------------------------------------------------------------------------------- 1 | Date, aveday_N, aveday_Y, mwdss_N, mwdss_Y, week_N, week_Y, all 2 | 20161101, 20161108, 20161108, 20161108, 20161108, 20161108, 20161108, 20161101 3 | 20161102, 20161108, 20161108, 20161108, 20161108, 20161109, 20161109, 20161102 4 | 20161103, 20161108, 20161108, 20161108, 20161108, 20161110, 20161110, 20161103 5 | 20161104, 20161108, 20161108, 20161108, 20161108, 20161111, 20161111, 20161104 6 | 20161105, 20161108, 20161108, 20161112, 20161112, 20161112, 20161112, 20161105 7 | 20161106, 20161108, 20161108, 20161113, 20161113, 20161113, 20161113, 20161106 8 | 20161107, 20161108, 20161108, 20161107, 20161107, 20161107, 20161107, 20161107 9 | 20161108, 20161108, 20161108, 20161108, 20161108, 20161108, 20161108, 20161108 10 | 20161109, 20161108, 20161108, 20161108, 20161108, 20161109, 20161109, 20161109 11 | 20161110, 20161108, 20161108, 20161108, 20161108, 20161110, 20161110, 20161110 12 | 20161111, 20161108, 20161108, 20161108, 20161108, 20161111, 20161111, 20161111 13 | 20161112, 20161108, 20161108, 20161112, 20161112, 20161112, 20161112, 20161112 14 | 20161113, 20161108, 20161108, 20161113, 20161113, 20161113, 20161113, 20161113 15 | 20161114, 20161108, 20161108, 20161107, 20161107, 20161107, 20161107, 20161114 16 | 20161115, 20161108, 20161108, 20161108, 20161108, 20161108, 20161108, 20161115 17 | 20161116, 20161108, 20161108, 20161108, 20161108, 20161109, 20161109, 20161116 18 | 20161117, 20161108, 20161108, 20161108, 20161108, 20161110, 20161110, 20161117 19 | 20161118, 20161108, 20161108, 20161108, 20161108, 20161111, 20161111, 20161118 20 | 20161119, 20161108, 20161108, 20161112, 20161112, 20161112, 20161112, 20161119 21 | 20161120, 20161108, 20161108, 20161113, 20161113, 20161113, 20161113, 20161120 22 | 20161121, 20161108, 20161108, 20161107, 20161107, 20161107, 20161107, 20161121 23 | 20161122, 20161108, 20161108, 20161108, 20161108, 20161108, 20161108, 20161122 24 | 20161123, 20161108, 20161108, 20161108, 20161108, 20161109, 20161109, 20161123 25 | 20161124, 20161108, 20161124, 20161108, 20161124, 20161110, 20161124, 20161124 26 | 20161125, 20161108, 20161125, 20161108, 20161125, 20161111, 20161125, 20161125 27 | 20161126, 20161108, 20161126, 20161112, 20161126, 20161112, 20161126, 20161126 28 | 20161127, 20161108, 20161108, 20161113, 20161113, 20161113, 20161113, 20161127 29 | 20161128, 20161108, 20161108, 20161107, 20161107, 20161107, 20161107, 20161128 30 | 20161129, 20161108, 20161108, 20161108, 20161108, 20161108, 20161108, 20161129 31 | 20161130, 20161108, 20161108, 20161108, 20161108, 20161109, 20161109, 20161130 32 | -------------------------------------------------------------------------------- /ancillary/2016/smk_merge_dates_201612.txt: -------------------------------------------------------------------------------- 1 | Date, aveday_N, aveday_Y, mwdss_N, mwdss_Y, week_N, week_Y, all 2 | 20161201, 20161206, 20161206, 20161206, 20161206, 20161208, 20161208, 20161201 3 | 20161202, 20161206, 20161206, 20161206, 20161206, 20161209, 20161209, 20161202 4 | 20161203, 20161206, 20161206, 20161210, 20161210, 20161210, 20161210, 20161203 5 | 20161204, 20161206, 20161206, 20161211, 20161211, 20161211, 20161211, 20161204 6 | 20161205, 20161206, 20161206, 20161205, 20161205, 20161205, 20161205, 20161205 7 | 20161206, 20161206, 20161206, 20161206, 20161206, 20161206, 20161206, 20161206 8 | 20161207, 20161206, 20161206, 20161206, 20161206, 20161207, 20161207, 20161207 9 | 20161208, 20161206, 20161206, 20161206, 20161206, 20161208, 20161208, 20161208 10 | 20161209, 20161206, 20161206, 20161206, 20161206, 20161209, 20161209, 20161209 11 | 20161210, 20161206, 20161206, 20161210, 20161210, 20161210, 20161210, 20161210 12 | 20161211, 20161206, 20161206, 20161211, 20161211, 20161211, 20161211, 20161211 13 | 20161212, 20161206, 20161206, 20161205, 20161205, 20161205, 20161205, 20161212 14 | 20161213, 20161206, 20161206, 20161206, 20161206, 20161206, 20161206, 20161213 15 | 20161214, 20161206, 20161206, 20161206, 20161206, 20161207, 20161207, 20161214 16 | 20161215, 20161206, 20161206, 20161206, 20161206, 20161208, 20161208, 20161215 17 | 20161216, 20161206, 20161206, 20161206, 20161206, 20161209, 20161209, 20161216 18 | 20161217, 20161206, 20161206, 20161210, 20161210, 20161210, 20161210, 20161217 19 | 20161218, 20161206, 20161206, 20161211, 20161211, 20161211, 20161211, 20161218 20 | 20161219, 20161206, 20161206, 20161205, 20161205, 20161205, 20161205, 20161219 21 | 20161220, 20161206, 20161206, 20161206, 20161206, 20161206, 20161206, 20161220 22 | 20161221, 20161206, 20161206, 20161206, 20161206, 20161207, 20161207, 20161221 23 | 20161222, 20161206, 20161206, 20161206, 20161206, 20161208, 20161208, 20161222 24 | 20161223, 20161206, 20161206, 20161206, 20161206, 20161209, 20161209, 20161223 25 | 20161224, 20161206, 20161224, 20161210, 20161224, 20161210, 20161224, 20161224 26 | 20161225, 20161206, 20161225, 20161211, 20161225, 20161211, 20161225, 20161225 27 | 20161226, 20161206, 20161226, 20161205, 20161226, 20161205, 20161226, 20161226 28 | 20161227, 20161206, 20161227, 20161206, 20161227, 20161206, 20161227, 20161227 29 | 20161228, 20161206, 20161206, 20161206, 20161206, 20161207, 20161207, 20161228 30 | 20161229, 20161206, 20161206, 20161206, 20161206, 20161208, 20161208, 20161229 31 | 20161230, 20161206, 20161206, 20161206, 20161206, 20161209, 20161209, 20161230 32 | 20161231, 20161206, 20161206, 20161210, 20161210, 20161210, 20161210, 20161231 33 | -------------------------------------------------------------------------------- /ancillary/2016/smk_merge_dates_201612.txt.no26decholiday: -------------------------------------------------------------------------------- 1 | Date, aveday_N, aveday_Y, mwdss_N, mwdss_Y, week_N, week_Y, all 2 | 20161201, 20161206, 20161206, 20161206, 20161206, 20161208, 20161208, 20161201 3 | 20161202, 20161206, 20161206, 20161206, 20161206, 20161209, 20161209, 20161202 4 | 20161203, 20161206, 20161206, 20161210, 20161210, 20161210, 20161210, 20161203 5 | 20161204, 20161206, 20161206, 20161211, 20161211, 20161211, 20161211, 20161204 6 | 20161205, 20161206, 20161206, 20161205, 20161205, 20161205, 20161205, 20161205 7 | 20161206, 20161206, 20161206, 20161206, 20161206, 20161206, 20161206, 20161206 8 | 20161207, 20161206, 20161206, 20161206, 20161206, 20161207, 20161207, 20161207 9 | 20161208, 20161206, 20161206, 20161206, 20161206, 20161208, 20161208, 20161208 10 | 20161209, 20161206, 20161206, 20161206, 20161206, 20161209, 20161209, 20161209 11 | 20161210, 20161206, 20161206, 20161210, 20161210, 20161210, 20161210, 20161210 12 | 20161211, 20161206, 20161206, 20161211, 20161211, 20161211, 20161211, 20161211 13 | 20161212, 20161206, 20161206, 20161205, 20161205, 20161205, 20161205, 20161212 14 | 20161213, 20161206, 20161206, 20161206, 20161206, 20161206, 20161206, 20161213 15 | 20161214, 20161206, 20161206, 20161206, 20161206, 20161207, 20161207, 20161214 16 | 20161215, 20161206, 20161206, 20161206, 20161206, 20161208, 20161208, 20161215 17 | 20161216, 20161206, 20161206, 20161206, 20161206, 20161209, 20161209, 20161216 18 | 20161217, 20161206, 20161206, 20161210, 20161210, 20161210, 20161210, 20161217 19 | 20161218, 20161206, 20161206, 20161211, 20161211, 20161211, 20161211, 20161218 20 | 20161219, 20161206, 20161206, 20161205, 20161205, 20161205, 20161205, 20161219 21 | 20161220, 20161206, 20161206, 20161206, 20161206, 20161206, 20161206, 20161220 22 | 20161221, 20161206, 20161206, 20161206, 20161206, 20161207, 20161207, 20161221 23 | 20161222, 20161206, 20161206, 20161206, 20161206, 20161208, 20161208, 20161222 24 | 20161223, 20161206, 20161206, 20161206, 20161206, 20161209, 20161209, 20161223 25 | 20161224, 20161206, 20161224, 20161210, 20161224, 20161210, 20161224, 20161224 26 | 20161225, 20161206, 20161225, 20161211, 20161225, 20161211, 20161225, 20161225 27 | 20161226, 20161206, 20161226, 20161205, 20161226, 20161205, 20161226, 20161226 28 | 20161227, 20161206, 20161206, 20161206, 20161206, 20161206, 20161206, 20161227 29 | 20161228, 20161206, 20161206, 20161206, 20161206, 20161207, 20161207, 20161228 30 | 20161229, 20161206, 20161206, 20161206, 20161206, 20161208, 20161208, 20161229 31 | 20161230, 20161206, 20161206, 20161206, 20161206, 20161209, 20161209, 20161230 32 | 20161231, 20161206, 20161206, 20161210, 20161210, 20161210, 20161210, 20161231 33 | -------------------------------------------------------------------------------- /ancillary/create_merge_dates_ann.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | # Purpose: Creates all annual merge dates files including ramp-ups 4 | # Usage: Change variables for run days and year. 5 | # Type ./create_merge_dates.py 6 | # 7 | # By James Beidler (Beidler.James@epa.gov) 2/09/10 8 | # Changed holidays method from holidays file to calculated 26 Mar 2014 9 | # Requires Python 2.0 + 10 | 11 | import calendar, sys 12 | from datetime import date, timedelta 13 | 14 | # Run information 15 | 16 | # Path to output files 17 | outPath = "." 18 | 19 | ### End Variable Section. You should not have to change anything past this point. 20 | 21 | # Pass an error if the command line is incorrect 22 | if len(sys.argv) != 2 or len(sys.argv[1]) != 4: 23 | print "You must pass a single argument of a four digit base year. " 24 | print "./create_merge_dates.py " 25 | sys.exit() 26 | 27 | year = sys.argv[1] 28 | year = int(year) 29 | 30 | def checkHol(repDate, calDate, holidays): 31 | # Check if a date is a holiday 32 | if calDate in holidays: 33 | return calDate 34 | else: 35 | return repDate 36 | 37 | def goodFriday(year): 38 | # Use the Butcher calculation to get Good Friday 39 | a = year % 19 40 | b = year // 100 41 | c = year % 100 42 | d = ((19 * a) + b - (b // 4) - ((b - ((b + 8) // 25) + 1) // 3) + 15) % 30 43 | e = (32 + (2 * (b % 4)) + (2 * (c // 4)) - d - (c % 4)) % 7 44 | f = d + e - (7 * ((a + (11 * d) + (22 * e)) // 451)) + 114 45 | month = f // 31 46 | day = (f % 31) + 1 47 | 48 | easter = date(year, month, day) 49 | gf = easter - timedelta(2) 50 | 51 | return gf 52 | 53 | def getXDay(year, month, xOccur, dayOfWeek): 54 | ''' 55 | Get the X occurance of a specific weekday in a month 56 | ie. 4th Monday in April 57 | Works for Memorial Day, Labor Day 58 | 59 | xOccur = 1, 2, 3, 4, 5 (where 5 is understood as the last occurance, whether that is the 4th or 5th occurance) 60 | dayOfWeek = 0, 1, 2, 3, 4, 5, 6 (where 0 is Sunday and 6 is Saturday) 61 | ''' 62 | monArray = calendar.monthcalendar(year, month) 63 | 64 | # Convert occurance to a python index number 65 | if xOccur == 5: 66 | wom = -1 67 | else: 68 | wom = xOccur - 1 69 | 70 | # Move up an index if that weekday doesn't exist in the first list in the array 71 | if wom >= 0 and monArray[0][dayOfWeek] == 0: 72 | wom += 1 73 | 74 | day = monArray[wom][dayOfWeek] 75 | 76 | # If the weekday does not exist in the last list, then move back an index 77 | if wom == -1 and day == 0: 78 | day = monArray[-2][dayOfWeek] 79 | 80 | return date(year, month, day) 81 | 82 | def getHolidays(year): 83 | # Fixed date holidays: New Year's, 4th of July, Christmas Eve, Christmas Day 84 | fix = [date(year - 1, 12, 24), date(year - 1, 12, 25), date(year, 1, 1), \ 85 | date(year, 7, 4), date(year, 12, 24), date(year, 12, 25)] 86 | 87 | # If the 4th of July falls on a weekend, then add the day before or day after 88 | if date(year, 7, 4).weekday() == 5: 89 | fix.append(date(year, 7, 3)) 90 | elif date(year, 7, 4).weekday() == 6: 91 | fix.append(date(year, 7, 5)) 92 | 93 | gd = goodFriday(year) # Good Friday 94 | mem = getXDay(year, 5, 5, 0) # Memorial Day 95 | lab = getXDay(year, 9, 1, 0) # Labor Day 96 | thx = getXDay(year, 11, 4, 3) # Thanksgiving 97 | thxa = thx + timedelta(1) # Day after Thanksgiving 98 | 99 | hd = fix + [gd, mem, lab, thx, thxa] 100 | 101 | # Add the day after each holiday 102 | holidays = hd[:] 103 | for hDay in hd: 104 | holidays.append(hDay + timedelta(1)) 105 | 106 | return holidays 107 | 108 | def getAveDay(year, month, holidays): 109 | # Find the average day for the month 110 | # This uses a *slightly* different method than the getRepWeek 111 | # for some inexplicable reason 112 | for wk in calendar.monthcalendar(year, month): 113 | if 0 in wk: 114 | continue 115 | 116 | dWeek = [date(year, month, day) for day in wk] 117 | 118 | if dWeek[1] in holidays: 119 | continue 120 | else: 121 | break 122 | # hWk = False 123 | # for day in dWeek: 124 | # if day in holidays: 125 | # hWk = True 126 | # 127 | # # Return the rep_week if there were no holidays 128 | # if hWk == False: 129 | # break 130 | 131 | return dWeek[1] 132 | 133 | def getRepWeek(year, month, holidays): 134 | # Find representative week in the base year 135 | # This is the first full week in the month without holidays 136 | # For some reason if the first Monday is the first day of the year 137 | # then you wouldn't use that week as a rep week...? 138 | for wk in calendar.monthcalendar(year, month): 139 | if 0 in wk or wk[0] == 1: 140 | continue 141 | 142 | rep_week = [date(year, month, day) for day in wk] 143 | 144 | hWk = False 145 | for day in rep_week: 146 | if day in holidays: 147 | hWk = True 148 | 149 | # Return the rep_week if there were no holidays 150 | if hWk == False: 151 | break 152 | 153 | return rep_week 154 | 155 | # Generate the holidays 156 | holidays = getHolidays(year) 157 | 158 | # Loop for each month, starting with the ramp-up 159 | for month in xrange(13): 160 | if month == 0: 161 | # Use settings for ramp-up 162 | run_year = year - 1 163 | month = 12 164 | outFileName = 'smk_merge_dates_%s12.txt' %run_year 165 | else: 166 | run_year = year 167 | outFileName = 'smk_merge_dates_%s%0.2d.txt' %(run_year, month) 168 | 169 | outFile = open('%s/%s' %(outPath, outFileName), 'w') 170 | 171 | # Get the representative Tuesday 172 | ave_rep = getAveDay(year, month, holidays) 173 | # Get the representative week 174 | rep_week = getRepWeek(year, month, holidays) 175 | 176 | # Write header 177 | outFile.write (' Date, aveday_N, aveday_Y, mwdss_N, mwdss_Y, week_N, week_Y, all\n') 178 | 179 | # Loop for each day in the month of the calendar year 180 | for day in range(1, calendar.monthrange(run_year, month)[1] + 1): 181 | dDate = date(run_year, month, day) 182 | aDate = date(year, month, day) 183 | day_of_week = calendar.weekday(year, month, day) # Find the day of the week in the calendar year 184 | wk_day = rep_week[day_of_week] 185 | mw_day = rep_week[day_of_week] # Default representative day 186 | if day_of_week in range(1,5): 187 | mw_day = rep_week[1] # Overridden for Tues-Fri 188 | 189 | runDate = dDate 190 | avedayN = ave_rep 191 | avedayY = checkHol(ave_rep, aDate, holidays) 192 | mwdssN = mw_day 193 | mwdssY = checkHol(mw_day, aDate, holidays) 194 | weekN = wk_day 195 | weekY = checkHol(wk_day, aDate, holidays) 196 | 197 | day_list = (runDate, avedayN, avedayY, mwdssN, mwdssY, weekN, weekY, runDate) 198 | outFile.write('%s\n' %', '.join([day.strftime('%Y%m%d') for day in day_list])) 199 | 200 | -------------------------------------------------------------------------------- /ancillary/griddesc.txt: -------------------------------------------------------------------------------- 1 | ! coords --line: name; type, P-alpha, P-beta, P-gamma, xcent, ycent 2 | 'LAM_40N97W' 3 | 2, 33.0D0, 45.D0,-97.D0,-97.D0, 40.D0 4 | ' ' ! end coords. grids: name; xorig,yorig,xcell,ycell,ncols,nrows,nthik 5 | '36US1_148X112' ! abbrev. = 36US1 6 | 'LAM_40N97W', -2736.D3, -2088.D3, 36.D3, 36.D3, 148, 112, 1 7 | '12US1_459X299', ! abbrev.=12US1, common name = Continental 12km grid 8 | 'LAM_40N97W', -2556000.000, -1728000.000, 12.D3, 12.D3, 459, 299, 1 9 | '12US2', ! 12km continential, slightly smaller than 12US1 10 | 'LAM_40N97W', -2412000.0, -1620000.0, 12000.0, 12000.0, 396, 246, 1 11 | 'US12KM_444X336', ! 12km national US 12 | 'LAM_40N97W', -2736.D3, -2088.D3, 12.D3, 12.D3, 444, 336, 1 13 | ' ' ! end grids. 14 | -------------------------------------------------------------------------------- /bin/emisqa: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from __future__ import absolute_import 4 | from __future__ import division 5 | from __future__ import print_function 6 | 7 | import emisqa 8 | from emisqa.formulas import calc_form 9 | from emisqa.helpers import clean_temp 10 | from emisqa.dataout.data_out import * 11 | from emisqa.run_parse import * 12 | 13 | # Get the command line options in an object 14 | opts = emisqa.run_parse.RunOpts() 15 | 16 | # Get the output dictionary of species arrays 17 | out_dict = emisqa.run_select.runQA(opts) 18 | 19 | # Caculate any output species from formulas 20 | out_dict = calc_form(out_dict, opts.formK, opts.formNK, opts.ignore_spec, opts.verbosity) 21 | 22 | # Output the dictionary into a csv or ncf file 23 | outfile = DataOut(opts.outfile_name, opts.out_type, opts.gsdate, opts.verbosity) 24 | outfile.write_outfile(out_dict, opts.grid, opts.region, opts.tons, opts.units, opts.srgfile) 25 | 26 | #Clean up temporary zip files 27 | clean_temp(opts.zip_dict) 28 | -------------------------------------------------------------------------------- /docs/usage.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | -------------------------------------------------------------------------------- /emisqa/__init__.py: -------------------------------------------------------------------------------- 1 | __doc__ = r""" 2 | PYQA 3 | """ 4 | 5 | __all__ = ['camx','cmaq','csv','dataout','dateloop','formulas','helpers','inline','run_parse','runtypes','species_array','default_paths','data_file','run_select'] 6 | 7 | import emisqa.default_paths 8 | from emisqa.helpers import * 9 | from emisqa.run_parse import * 10 | 11 | import emisqa.camx 12 | import emisqa.cmaq 13 | import emisqa.dataout 14 | import emisqa.dateloop 15 | import emisqa.formulas 16 | import emisqa.inline 17 | import emisqa.species_array 18 | import emisqa.data_file 19 | import emisqa.run_select 20 | import emisqa.runtypes 21 | import emisqa.chem_mechs 22 | import emisqa.csv 23 | -------------------------------------------------------------------------------- /emisqa/camx/__init__.py: -------------------------------------------------------------------------------- 1 | all = ['read_uam',] 2 | 3 | import emisqa.camx.read_uam 4 | -------------------------------------------------------------------------------- /emisqa/camx/read_uam.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import gzip, random 3 | import os.path 4 | from emisqa.default_paths import tmp_dir 5 | from emisqa.helpers import data_blocks 6 | 7 | class CAMxFile(object): 8 | def __init__(self, infile, verbosity, ptsr, zip_dict): 9 | self.verbosity = verbosity 10 | self.infile_name = infile 11 | self.camx = self.open_uam(zip_dict) 12 | # Defining binary file formats 13 | # format = [('dataname','[number of words][endian-ness][datatype]'), ...] 14 | # ex: [('sep','>i'),('note','60>i')] - the first word in the binary file will be called "sep", it is a big endian 32 bit integers; 15 | # the next sixty words will be called "note", they are big endian 32 bit integers which will be converted into ASCII text 16 | # File header format definition 17 | self.h_dtype = [('sep','>i'),('name','10>i'),('note','60>i'),('one','>i'),('spec','>i'),('sdate','>i'),('stime','>f'),('edate','>i'),('etime','>f')] # 77 * 4 = 308 18 | # Grid information format definition 19 | self.g_dtype = [('sep','2>i'),('x-utm','>f'), ('y-utm','>f'), ('zone-utm','>i'), ('xorig','>f'), ('yorig','>f'), ('xcell','>f'), ('ycell','>f'),\ 20 | ('cols','>i'), ('rows','>i'), ('z-cells','>i'), ('cell-break','>i'), ('cell-top','>i'), ('height-surf','>f'), ('h-break','>f'), ('h-cell','>f')] # 17 * 4 = 68 21 | # Total number of words in file header (77) + grid information (17) + segment information (8) 22 | # This will help determine the total header offset 23 | self.camx_headlen = 77 + 17 + 8 24 | self._read_head() 25 | if ptsr: 26 | self._read_stackdata() 27 | 28 | def open_uam(self, zip_dict): 29 | ''' 30 | Opens UAM file for reading. 31 | ''' 32 | if self.verbosity: 33 | print('Opening file for reading: %s' %self.infile_name) 34 | try: 35 | file_in = open(self.infile_name, 'rb') 36 | except IOError: 37 | print('WARNING: %s not available for access. Attempting to open zipped version.' %self.infile_name) 38 | if self.verbosity: 39 | print('Opening file for reading: %s.gz' %self.infile_name) 40 | # Try to read a zipped version of the input file 41 | try: 42 | zip_in = gzip.open(self.infile_name+'.gz','rb') 43 | except IOError: 44 | raise IOError('%s.gz not available for access.' %self.infile_name) 45 | else: 46 | with zip_in: 47 | if self.infile_name in zip_dict: 48 | # Check if the file has already been unzipped 49 | print('Previously unzipped version found.') 50 | file_in = open(zip_dict[self.infile_name], 'rb') 51 | else: 52 | tmp_filename = os.path.join(tmp_dir('pyqa-tmp-%s.uam' %str(random.randint(100, 9999999)))) 53 | tmp_file = open(tmp_filename, 'wb') 54 | for data in data_blocks(zip_in): 55 | tmp_file.write(data) 56 | tmp_file.close() 57 | zip_dict[self.infile_name] = tmp_filename # Store the unzipped temporary file name in the zip dictionary 58 | file_in = open(tmp_filename, 'rb') 59 | return file_in 60 | 61 | def _read_head(self): 62 | # Get Name, species number, and date/time information 63 | head = np.fromfile(self.camx, np.dtype(self.h_dtype), count = 1) 64 | self.specnum = head[0]['spec'] 65 | # self.tstep = ((head[0]['edate'] - head[0]['sdate']) * 24) + (int(head[0]['etime'] - head[0]['stime']) + 1) 66 | self.tstep = ((head[0]['edate'] - head[0]['sdate']) * 24) + (int(head[0]['etime'] - head[0]['stime'])) 67 | self.sdate = str(head[0]['sdate']) 68 | if self.sdate.startswith('9'): 69 | self.sdate = '19'+ self.sdate 70 | else: 71 | self.sdate = '20' + self.sdate 72 | grid = np.fromfile(self.camx, np.dtype(self.g_dtype), count = 1) 73 | self.ncols = grid[0]['cols'] 74 | self.nrows = grid[0]['rows'] 75 | self.nlays = 1 76 | self.cell = grid[0]['xcell'] 77 | self.xorig = grid[0]['xorig'] 78 | self.yorig = grid[0]['yorig'] 79 | # Get species list 80 | s_dtype = [('sep','8>i')] # 8 * 4 = 32 # Segment information, can skip 81 | s_dtype += [('spec%s' %x,'10>i') for x in range(self.specnum)] # specnum * 10 * 4 82 | species = np.fromfile(self.camx, np.dtype(s_dtype), count = 1) 83 | self.species_list = [self.int_to_str(species[0][x]).strip() for x in range(1,len(species[0]))] 84 | self.camx_headlen += len(self.species_list) * 10 85 | 86 | def int_to_str(self, int_list): 87 | """ 88 | Get characters from stored integers 89 | Output as a string 90 | """ 91 | str_out='' 92 | for i in int_list: 93 | try: 94 | str_out += chr(int(int((int((int((i-32)/256)-32)/256)-32)/256))) 95 | except ValueError: 96 | print('Warning: Could not convert binary data to string: %s' %i) 97 | return str_out 98 | 99 | def _read_stackdata(self): 100 | # Get the stack data for converting from stack to grid 101 | self.stack_list = [] 102 | self.camx.seek(self.camx_headlen*4) 103 | stack_head = np.fromfile(self.camx, np.dtype([('sep1','2>i'),('seg','>i'),('npmax','>i'),('sep2','2>i')]), count = 1) 104 | self.camx_headlen += 6 105 | self.stacks = stack_head[0]['npmax'] 106 | stack_data = np.fromfile(self.camx, np.dtype([('x','>f'),('y','>f'),('col','>i'),('row','>i'),('h','>f'),('dia','>f')]), count = self.stacks) 107 | self.camx_headlen += 6 * self.stacks 108 | for stack in range(self.stacks): 109 | self.stack_list.append({'col': int(abs((stack_data[stack]['x'] - self.xorig)/self.cell)), 110 | 'row': int(abs((stack_data[stack]['y'] - self.yorig)/self.cell))}) 111 | 112 | def get_species(self, species_name, ptsr): 113 | if ptsr: 114 | species = self.get_ptsrspecies(species_name) 115 | else: 116 | species = self.get_emisspecies(species_name) 117 | return species 118 | 119 | def get_emisspecies(self, species_name): 120 | """ 121 | Get the species array in TSTEP,LAYER,ROW,COL format 122 | """ 123 | try: 124 | spec_idx = self.species_list.index(species_name) 125 | except ValueError: 126 | raise ValueError('Species %s not available in %s' %(species_name, self.infile_name)) 127 | species = np.zeros([self.tstep, self.nlays, self.nrows, self.ncols], 'f') 128 | for hour in range(self.tstep): 129 | # Location of species data = len of file header + ((time steps + 1) * length of times tep header) + ((species number + 1) * length of species header) + (species number * cols * rows * layers) \ 130 | # + (time step * (total number of species * (length of species header + (cols * rows * layers)))) 131 | spec_loc = self.camx_headlen + ((hour + 1) * 6) + ((spec_idx + 1) * 13) + (spec_idx * self.ncols * self.nrows * self.nlays) + ( hour * (self.specnum * (13 + (self.ncols * self.nrows * self.nlays))) ) 132 | # self.camx.seek((spec_loc - 10) * 4) 133 | # specHead = np.fromfile(self.camx, np.dtype([('spec','10>i')]), count = 1) 134 | # specName = self.int_to_str(specHead[0]['spec']).strip() 135 | self.camx.seek(spec_loc * 4) 136 | data = np.fromfile(self.camx, np.dtype(('>f',(self.nrows,self.ncols))), count = self.nlays) # (ncols*nrows*4) or datasizebytes 137 | species[hour][:] = data 138 | return species 139 | 140 | def get_ptsrspecies(self, species_name): 141 | """ 142 | Get the species array in TSTEP,LAYER,ROW,COL format from a ptsr file 143 | """ 144 | try: 145 | spec_idx = self.species_list.index(species_name) 146 | except ValueError: 147 | raise ValueError('Species %s not available in %s' %(species_name, self.infile_name)) 148 | self.tstep = 24 149 | species_in = np.zeros([self.tstep, 1, self.stacks], 'f') 150 | species = np.zeros([self.tstep, self.nlays, self.nrows, self.ncols], 'f') 151 | for hour in range(self.tstep): 152 | # Location of species data = len of file header + ((time steps + 1) * length of times tep header) + ((species number + 1) * length of species header) + (species number * cols * rows * layers) \ 153 | # + (time step * (total number of species * (length of species header + (cols * rows * layers)))) 154 | spec_loc = self.camx_headlen + ((hour + 1) * 12) + ((hour + 1) * 5 * self.stacks) + ((spec_idx + 1) * 13) + (spec_idx * self.stacks) + ( hour * (self.specnum * (13 + (self.stacks))) ) 155 | 156 | # self.camx.seek((spec_loc - 10) * 4) 157 | # specHead = np.fromfile(self.camx, np.dtype([('spec','10>i')]), count = 1) 158 | # print int_to_str(specHead[0]['spec']) 159 | self.camx.seek(spec_loc * 4) 160 | data = np.fromfile(self.camx, np.dtype(('>f',(self.stacks,))), count = 1) # (ncols*nrows*4) or datasizebytes 161 | species_in[hour][:] = data 162 | for stack in range(self.stacks): 163 | col = self.stack_list[stack]['col'] 164 | row = self.stack_list[stack]['row'] 165 | if row not in list(range(species.shape[2])) or col not in list(range(species.shape[3])): 166 | # print('stack: %s at col: %s row: %s outside of bounds' %(stack + 1, col + 1, row + 1)) 167 | continue 168 | try: 169 | species[:,0,row,col] += species_in[:,0,stack] 170 | except IndexError: 171 | raise IndexError('Inline to grid problem at: ROW %s COL %s STACK %s' %(row+1,col+1,stack+1)) 172 | return species 173 | 174 | -------------------------------------------------------------------------------- /emisqa/chem_mechs.py: -------------------------------------------------------------------------------- 1 | molecDct = dict() 2 | 3 | molecDct['cmaq_cb05'] = {"CO": 28, "NH3": 17, "NO": 46, "NO2": 46, "SO2": 64, \ 4 | "SULF": 98, "PEC": 1, "POC": 1, "PMFINE": 1, "PNO3": 1, \ 5 | "PSO4": 1, "PMC": 1,"ALD2": 46.8, "ALDX": 38.6, "CH4": 16.0,\ 6 | "ETH": 33.1, "ETHA": 30.1,"ETOH": 45.6, "FORM": 29.9, \ 7 | "IOLE": 55.3, "ISOP": 68.1, "MEOH": 32.0,"NVOL": 15.9, \ 8 | "OLE": 32.3, "PAR": 16.9, "TERP": 134.4, "TOL": 97.7, \ 9 | "UNK": 352.4, "UNR": 25.9, "XYL": 109.0, "HGIIGAS": 200.59,\ 10 | "HGNRVA":200.59, "PHGI": 1, "DIESEL_PMEC": 1, \ 11 | "DIESEL_PMFINE": 1, "DIESEL_PMNO3":1, "DIESEL_PMOC": 1, \ 12 | "DIESEL_PMSO4": 1, "DIESEL_PMC": 1, "FORM_PRIMARY":30.026,\ 13 | "ALD2_PRIMARY": 44.0526, "ACROLEIN": 56.0633, \ 14 | "BUTADIENE13":54.0904, "BENZENE": 78.1118, "VOC_INV": 1.0, \ 15 | "NAPHTHALENE": 128.1705, "CL2": 70.91,"CHROMHEX_C": 1, \ 16 | "CHROMHEX_F": 1, "CHROMTRI_C": 1, "CHROMTRI_F": 1,\ 17 | "NICKEL_C": 1, "NICKEL_F": 1, "BERYLLIUM_C": 1, \ 18 | "BERYLLIUM_F": 1,"CADMIUM_C": 1, "CADMIUM_F": 1, "LEAD_C":1,\ 19 | "LEAD_F": 1, "MANGANESE_C":1, "MANGANESE_F": 1, \ 20 | "OXYL": 106.165, "PXYL": 106.165, "MXYL": 106.165,\ 21 | "TOLU": 92.1384, "CL4_ETHE": 165.83, "TRIETHYLAMINE": 101.19,\ 22 | "HEXAMETHY_DIIS": 168.2, "CHCL3": 119.3776, "CL_ETHE": 62.5,\ 23 | "CL4_ETHANE1122": 167.85, "ETOX": 44.0526, "QUINOLINE": 129.16,\ 24 | "ACRYLONITRILE": 53.06, "CL2_C2_12": 98.9592, \ 25 | "BR2_C2_12": 187.86,"HYDRAZINE": 32.05, "CARBONTET": 153.8227,\ 26 | "DICHLOROPROPENE": 110.97,"PROPDICHLORIDE": 112.9, \ 27 | "MAL_ANHYDRIDE": 98.06, "DICHLOROBENZENE":147.0002, \ 28 | "TOL_DIIS": 174.1561, "CL2_ME": 84.93, "CL3_ETHE": 131.3883,\ 29 | "HCL": 36.46, "HONO": 46, "NOX": 46, "PM2_5": 1, "PM10": 1, "HFLUX": 1,\ 30 | "PEC_72": 1, "PMFINE_72": 1, "POC_72": 1, "PMC_72": 1, "OTHER": 1,\ 31 | "NAPHTH_72": 128.1705, "NH3_FERT": 17, "PAL": 1, "PCA": 1, "PCL": 1, \ 32 | "PFE": 1, "PH2O": 1, "PK": 1, "PMG": 1, "PMN": 1, "PMOTHR": 1, \ 33 | "PNA": 1, "PNCOM": 1, "PNH4": 1, "PSI": 1, "PTI": 1, 34 | "ARSENIC_C": 1, "ARSENIC_F": 1, \ 35 | "PAH_000E0": 379.00, "PAH_101E2": 268.00, "PAH_114E1": 256.00, 36 | "PAH_176E2": 302.00, "PAH_176E3": 244.00, "PAH_176E4": 248.00, 37 | "PAH_176E5": 228.00, "PAH_192E3": 278.00, "PAH_880E5": 196.00} 38 | 39 | molecDct['cmaq_cb05_soa'] = molecDct['cmaq_cb05'] 40 | 41 | molecDct['cmaq_cb05v2_soa'] = {"CO": 28, "NH3": 17, "NO": 46, "NO2": 46, "SO2": 64, \ 42 | "SULF": 98, "PEC": 1, "POC": 1, "PMFINE": 1, "PNO3": 1, \ 43 | "PSO4": 1, "PMC": 1,"ALD2": 43.8, "ALDX": 37.58, "CH4": 16.04,\ 44 | "ETH": 28.05, "ETHA": 30.07,"ETOH": 44.73, "FORM": 30.026, \ 45 | "IOLE": 54.55, "ISOP": 68.12, "MEOH": 32.04,"NVOL": 1.00, \ 46 | "OLE": 33.09, "PAR": 17.15, "TERP": 136.24, "TOL": 97.98, \ 47 | "UNK": 352.4, "UNR": 26.29, "XYL": 109.71, "HGIIGAS": 200.59,\ 48 | "HGNRVA":200.59, "PHGI": 1, "DIESEL_PMEC": 1, \ 49 | "DIESEL_PMFINE": 1, "DIESEL_PMNO3":1, "DIESEL_PMOC": 1, \ 50 | "DIESEL_PMSO4": 1, "DIESEL_PMC": 1, "FORM_PRIMARY":30.026,\ 51 | "ALD2_PRIMARY": 44.0526, "ACROLEIN": 56.0633, \ 52 | "BUTADIENE13":54.0904, "BENZENE": 78.1118, "VOC_INV": 1.0, \ 53 | "NAPHTHALENE": 128.1705, "CL2": 70.91,"CHROMHEX_C": 1, \ 54 | "CHROMHEX_F": 1, "CHROMTRI_C": 1, "CHROMTRI_F": 1,\ 55 | "NICKEL_C": 1, "NICKEL_F": 1, "BERYLLIUM_C": 1, \ 56 | "BERYLLIUM_F": 1,"CADMIUM_C": 1, "CADMIUM_F": 1, "LEAD_C":1,\ 57 | "LEAD_F": 1, "MANGANESE_C":1, "MANGANESE_F": 1, \ 58 | "OXYL": 106.165, "PXYL": 106.165, "MXYL": 106.165,\ 59 | "TOLU": 92.1384, "CL4_ETHE": 165.83, "TRIETHYLAMINE": 101.19,\ 60 | "HEXAMETHY_DIIS": 168.2, "CHCL3": 119.3776, "CL_ETHE": 62.5,\ 61 | "CL4_ETHANE1122": 167.85, "ETOX": 44.0526, "QUINOLINE": 129.16,\ 62 | "ACRYLONITRILE": 53.06, "CL2_C2_12": 98.9592, \ 63 | "BR2_C2_12": 187.86,"HYDRAZINE": 32.05, "CARBONTET": 153.8227,\ 64 | "DICHLOROPROPENE": 110.97,"PROPDICHLORIDE": 112.9, \ 65 | "MAL_ANHYDRIDE": 98.06, "DICHLOROBENZENE":147.0002, \ 66 | "TOL_DIIS": 174.1561, "CL2_ME": 84.93, "CL3_ETHE": 131.3883,\ 67 | "HCL": 36.46, "HONO": 46, "NOX": 46, "PM2_5": 1, "PM10": 1, "HFLUX": 1,\ 68 | "PEC_72": 1, "PMFINE_72": 1, "POC_72": 1, "PMC_72": 1, "OTHER": 1,\ 69 | "NAPHTH_72": 128.1705, "NH3_FERT": 17, "PAL": 1, "PCA": 1, "PCL": 1, \ 70 | "PFE": 1, "PH2O": 1, "PK": 1, "PMG": 1, "PMN": 1, "PMOTHR": 1, \ 71 | "PNA": 1, "PNCOM": 1, "PNH4": 1, "PSI": 1, "PTI": 1, 72 | "ARSENIC_C": 1, "ARSENIC_F": 1, \ 73 | "PAH_000E0": 379.00, "PAH_101E2": 268.00, "PAH_114E1": 256.00, 74 | "PAH_176E2": 302.00, "PAH_176E3": 244.00, "PAH_176E4": 248.00, 75 | "PAH_176E5": 228.00, "PAH_192E3": 278.00, "PAH_880E5": 196.00} 76 | 77 | molecDct['cmaq_cb05v2_mplite'] = molecDct['cmaq_cb05v2_soa'] 78 | 79 | molecDct['cmaq_cb05_tx'] = {"CO": 28, "NH3": 17, "NO": 46, "NO2": 46, "SO2": 64, 80 | "SULF": 98, "PEC": 1, "POC": 1, "PMFINE": 1, "PNO3": 1, 81 | "PSO4": 1, "PMC": 1,"ALD2": 46.8, "ALDX": 38.6, "CH4": 16.0, 82 | "ETH": 33.1, "ETHA": 30.1,"ETOH": 45.6, "FORM": 29.9, 83 | "IOLE": 55.3, "ISOP": 68.1, "MEOH": 32.0,"NVOL": 15.9, 84 | "OLE": 32.3, "PAR": 16.9, "TERP": 134.4, "TOL": 97.7, 85 | "UNK": 352.4, "UNR": 25.9, "XYL": 109.0, "HGIIGAS": 200.59, 86 | "HGNRVA":200.59, "PHGI": 1, "DIESEL_PEC": 1, 87 | "DIESEL_PMFINE": 1, "DIESEL_PNO3":1, "DIESEL_POC": 1, 88 | "DIESEL_PSO4": 1, "DIESEL_PMC": 1, "FORM_PRIMARY":30.026, 89 | "ALD2_PRIMARY": 44.0526, "ACROLEIN": 56.0633, 90 | "BUTADIENE13":54.0904, "BENZENE": 78.1118, "VOC_INV": 46.0, 91 | "NAPHTHALENE": 128.1705, "CL2": 70.91,"CHROMHEX_C": 1, 92 | "CHROMHEX_F": 1, "CHROMTRI_C": 1, "CHROMTRI_F": 1, 93 | "NICKEL_C": 1, "NICKEL_F": 1, "BERYLLIUM_C": 1, 94 | "BERYLLIUM_F": 1,"CADMIUM_C": 1, "CADMIUM_F": 1, "LEAD_C":1, 95 | "LEAD_F": 1, "MANGANESE_C":1, "MANGANESE_F": 1, 96 | "OXYL": 106.165, "PXYL": 106.165, "MXYL": 106.165, 97 | "TOLU": 92.1384, "CL4_ETHE": 165.83, "TRIETHYLAMINE": 101.19, 98 | "HEXAMETHY_DIIS": 168.2, "CHCL3": 119.3776, "CL_ETHE": 62.5, 99 | "CL4_ETHANE1122": 167.85, "ETOX": 44.0526, "QUINOLINE": 129.16, 100 | "ACRYLONITRILE": 53.06, "CL2_C2_12": 98.9592, 101 | "BR2_C2_12": 187.86,"HYDRAZINE": 32.05, "CARBONTET": 153.8227, 102 | "DICHLOROPROPENE": 110.97,"PROPDICHLORIDE": 112.9, 103 | "MAL_ANHYDRIDE": 98.06, "DICHLOROBENZENE":147.0002, 104 | "TOL_DIIS": 174.1561, "CL2_ME": 84.93, "CL3_ETHE": 131.3883, 105 | "HCL": 36.46, "HONO": 46, "NOX": 46, "PM2_5": 1, "PM10": 1, "HFLUX": 1, 106 | "PEC_72": 1, "PMFINE_72": 1, "POC_72": 1, "PMC_72": 1, "OTHER": 1, 107 | "NAPHTH_72": 128.1705, "NH3_FERT": 17} 108 | 109 | molecDct['cmaq_saprc07T'] = {"CO": 28, "NH3": 17, "NO": 46, "NO2": 46, "SO2": 64, 110 | "SULF": 98, "PEC": 1, "POC": 1, "PMFINE": 1, "PNO3": 1, 111 | "PSO4": 1, "PMC": 1,"ALD2": 46.8, "ALDX": 38.6, "CH4": 16.0, 112 | "ETH": 33.1, "ETHA": 30.1,"ETOH": 45.6, "FORM": 29.9, 113 | "IOLE": 55.3, "ISOP": 68.1, "MEOH": 32.0,"NVOL": 15.9, 114 | "OLE": 32.3, "PAR": 16.9, "TERP": 134.4, "TOL": 97.7, 115 | "UNK": 352.4, "UNR": 25.9, "XYL": 109.0, "HGIIGAS": 200.59, 116 | "HGNRVA":200.59, "PHGI": 1, "DIESEL_PEC": 1, 117 | "DIESEL_PMFINE": 1, "DIESEL_PNO3":1, "DIESEL_POC": 1, 118 | "DIESEL_PSO4": 1, "DIESEL_PMC": 1, "FORM_PRIMARY":30.026, 119 | "ALD2_PRIMARY": 44.0526, "ACROLEIN": 56.0633, 120 | "BUTADIENE13":54.0904, "BENZENE": 78.1118, 121 | "NAPHTHALENE": 128.1705, "CL2": 70.91,"CHROMHEX_C": 1, 122 | "CHROMHEX_F": 1, "CHROMTRI_C": 1, "CHROMTRI_F": 1, 123 | "NICKEL_C": 1, "NICKEL_F": 1, "BERYLLIUM_C": 1, 124 | "BERYLLIUM_F": 1,"CADMIUM_C": 1, "CADMIUM_F": 1, "LEAD_C":1, 125 | "LEAD_F": 1, "MANGANESE_C":1, "MANGANESE_F": 1, 126 | "OXYL": 106.165, "PXYL": 106.165, "MXYL": 106.165, 127 | "TOLU": 92.1384, "CL4_ETHE": 165.83, "TRIETHYLAMINE": 101.19, 128 | "HEXAMETHY_DIIS": 168.2, "CHCL3": 119.3776, "CL_ETHE": 62.5, 129 | "CL4_ETHANE1122": 167.85, "ETOX": 44.0526, "QUINOLINE": 129.16, 130 | "ACRYLONITRILE": 53.06, "CL2_C2_12": 98.9592, 131 | "BR2_C2_12": 187.86,"HYDRAZINE": 32.05, "CARBONTET": 153.8227, 132 | "DICHLOROPROPENE": 110.97,"PROPDICHLORIDE": 112.9, 133 | "MAL_ANHYDRIDE": 98.06, "DICHLOROBENZENE":147.0002, 134 | "TOL_DIIS": 174.1561, "CL2_ME": 84.93, "CL3_ETHE": 131.3883, 135 | "HCL": 36.46, "HONO": 46, "NOX": 46, "PM2_5": 1, "PM10": 1, "HFLUX": 1, "HFC": 66.0999, "HCHO": 1, "CCHO": 1, "BENZ": 78.1118, 136 | "13BDE": 54.0904, "ACRO": 56.0633, "BACL": 86.0892, "ACET": 36.0, "IPRD":70.0898, "PRPE": 42.0797, "PACD": 74.0785, 137 | "CRES": 104.0141, "ETHE": 24.0, "ARO1": 159.5964, "ARO2": 147.3216, "OLE2": 91.6257, "OLE1": 74.8872, "RCHO": 66.7692, 138 | "PRD2": 106.6958, "VOC": 1, "GLY": 58.0361, "APIN": 136.234, "BPIN": 120.0, "B124": 127.5588, "NROG": 102.4983, "BALD": 115.0695, 139 | "MVK": 140.2227, "SESQ": 180.0, "MEK": 72.1046, "ALK4": 82.814, "ALK5": 99.9047, "ALK2": 54.3473, "ALK3": 64.5106, "ALK1": 59.8675, 140 | "AACD": 60.052, "MGLY": 72.0627, "MACR": 70.0898, "ACYE": 26.0373, "FACD": 46.0254, "HCHO_PRIMARY":30.026 } 141 | 142 | molecDct['cmaq_cb6'] = {"CO": 28, "NH3": 17, "NO": 46, "NO2": 46, "SO2": 64, \ 143 | "SULF": 98, "PEC": 1, "POC": 1, "PMFINE": 1, "PNO3": 1, \ 144 | "PSO4": 1, "PMC": 1,"ALD2": 44.0526, "ALDX": 43.65, "CH4": 16.042,\ 145 | "ETH": 28.053, "ETHA": 30.069,"ETOH": 46.0684, "FORM": 30.026, \ 146 | "IOLE": 56.11, "ISOP": 68.117, "MEOH": 32.042,"NVOL": 1.0001, \ 147 | "OLE": 27.65, "PAR": 14.43, "TERP": 136.234, "TOL": 92.138, \ 148 | "UNK": 137.19, "UNR": 28.86, "XYL": 106.165, "HGIIGAS": 200.59,\ 149 | "HGNRVA":200.59, "PHGI": 1, "DIESEL_PMEC": 1, \ 150 | "DIESEL_PMFINE": 1, "DIESEL_PMNO3":1, "DIESEL_PMOC": 1, \ 151 | "DIESEL_PMSO4": 1, "DIESEL_PMC": 1, "FORM_PRIMARY":30.026,\ 152 | "ALD2_PRIMARY": 44.0526, "ACROLEIN": 56.0633, \ 153 | "BUTADIENE13":54.0904, "BENZ": 78.1118, "BENZENE": 78.1118, "VOC_INV": 1.0, \ 154 | "NAPHTHALENE": 128.1705, "CL2": 70.91,"CHROMHEX_C": 1, \ 155 | "CHROMHEX_F": 1, "CHROMTRI_C": 1, "CHROMTRI_F": 1,\ 156 | "NICKEL_C": 1, "NICKEL_F": 1, "BERYLLIUM_C": 1, \ 157 | "BERYLLIUM_F": 1,"CADMIUM_C": 1, "CADMIUM_F": 1, "LEAD_C":1,\ 158 | "LEAD_F": 1, "MANGANESE_C":1, "MANGANESE_F": 1, \ 159 | "OXYL": 106.165, "PXYL": 106.165, "MXYL": 106.165,\ 160 | "TOLU": 92.1384, "CL4_ETHE": 165.83, "TRIETHYLAMINE": 101.19,\ 161 | "HEXAMETHY_DIIS": 168.2, "CHCL3": 119.3776, "CL_ETHE": 62.5,\ 162 | "CL4_ETHANE1122": 167.85, "ETOX": 44.0526, "QUINOLINE": 129.16,\ 163 | "ACRYLONITRILE": 53.06, "CL2_C2_12": 98.9592, \ 164 | "BR2_C2_12": 187.86,"HYDRAZINE": 32.05, "CARBONTET": 153.8227,\ 165 | "DICHLOROPROPENE": 110.97,"PROPDICHLORIDE": 112.9, \ 166 | "MAL_ANHYDRIDE": 98.06, "DICHLOROBENZENE":147.0002, \ 167 | "TOL_DIIS": 174.1561, "CL2_ME": 84.93, "CL3_ETHE": 131.3883,\ 168 | "HCL": 36.46, "HONO": 46, "NOX": 46, "PM2_5": 1, "PM10": 1, "HFLUX": 1,\ 169 | "PEC_72": 1, "PMFINE_72": 1, "POC_72": 1, "PMC_72": 1, "OTHER": 1,\ 170 | "NAPHTH_72": 128.1705, "NH3_FERT": 17, "PAL": 1, "PCA": 1, "PCL": 1, \ 171 | "PFE": 1, "PH2O": 1, "PK": 1, "PMG": 1, "PMN": 1, "PMOTHR": 1, \ 172 | "PNA": 1, "PNCOM": 1, "PNH4": 1, "PSI": 1, "PTI": 1, 173 | "ARSENIC_C": 1, "ARSENIC_F": 1, \ 174 | "PAH_000E0": 379.00, "PAH_101E2": 268.00, "PAH_114E1": 256.00, 175 | "PAH_176E2": 302.00, "PAH_176E3": 244.00, "PAH_176E4": 248.00, 176 | "PAH_176E5": 228.00, "PAH_192E3": 278.00, "PAH_880E5": 196.00, 177 | "ACET": 58.079, "ETHY": 26.037, "KET": 28.82, "PRPA": 44.096, 178 | "SOAALK": 92.1006, "XYLMN": 106.165, "NAPH": 128.1705, 179 | "ACETONITRILE": 41.05, "ACRYLICACID": 72.06, "ACRYLONITRILE": 53.06, 180 | "CARBSULFIDE": 60.07, "CHLOROPRENE": 88.54, "ETHYLBENZ": 106.165, 181 | "HEXANE": 86.175, "METHCHLORIDE": 50.49, "STYRENE": 104.15, 182 | "XYLENES": 106.165, "VOC_BEIS": 46, "APIN": 120, "BPIN": 120, 183 | "SESQ": 180, "NR": 24} 184 | 185 | molecDct['cmaq_cb6ae7'] = {"CO": 28, "NH3": 17, "NO": 46, "NO2": 46, "SO2": 64, \ 186 | "SULF": 98, "PEC": 1, "POC": 1, "PMFINE": 1, "PNO3": 1, \ 187 | "PSO4": 1, "PMC": 1,"ALD2": 44.0526, "ALDX": 43.65, "CH4": 16.042,\ 188 | "ETH": 28.053, "ETHA": 30.069,"ETOH": 46.0684, "FORM": 30.026, \ 189 | "IOLE": 56.11, "ISOP": 68.117, "MEOH": 32.042,"NVOL": 1.0001, \ 190 | "OLE": 27.65, "PAR": 14.43, "TERP": 136.234, "TOL": 92.138, \ 191 | "UNK": 137.19, "UNR": 28.86, "XYL": 106.165, "HGIIGAS": 200.59,\ 192 | "HGNRVA":200.59, "PHGI": 1, "DIESEL_PMEC": 1, \ 193 | "DIESEL_PMFINE": 1, "DIESEL_PMNO3":1, "DIESEL_PMOC": 1, \ 194 | "DIESEL_PMSO4": 1, "DIESEL_PMC": 1, "FORM_PRIMARY":30.026,\ 195 | "ALD2_PRIMARY": 44.0526, "ACROLEIN": 56.0633, \ 196 | "BUTADIENE13":54.0904, "BENZ": 78.1118, "BENZENE": 78.1118, "VOC_INV": 1.0, \ 197 | "NAPHTHALENE": 128.1705, "CL2": 70.91,"CHROMHEX_C": 1, \ 198 | "CHROMHEX_F": 1, "CHROMTRI_C": 1, "CHROMTRI_F": 1,\ 199 | "NICKEL_C": 1, "NICKEL_F": 1, "BERYLLIUM_C": 1, \ 200 | "BERYLLIUM_F": 1,"CADMIUM_C": 1, "CADMIUM_F": 1, "LEAD_C":1,\ 201 | "LEAD_F": 1, "MANGANESE_C":1, "MANGANESE_F": 1, \ 202 | "OXYL": 106.165, "PXYL": 106.165, "MXYL": 106.165,\ 203 | "TOLU": 92.1384, "CL4_ETHE": 165.83, "TRIETHYLAMINE": 101.19,\ 204 | "HEXAMETHY_DIIS": 168.2, "CHCL3": 119.3776, "CL_ETHE": 62.5,\ 205 | "CL4_ETHANE1122": 167.85, "ETOX": 44.0526, "QUINOLINE": 129.16,\ 206 | "ACRYLONITRILE": 53.06, "CL2_C2_12": 98.9592, \ 207 | "BR2_C2_12": 187.86,"HYDRAZINE": 32.05, "CARBONTET": 153.8227,\ 208 | "DICHLOROPROPENE": 110.97,"PROPDICHLORIDE": 112.9, \ 209 | "MAL_ANHYDRIDE": 98.06, "DICHLOROBENZENE":147.0002, \ 210 | "TOL_DIIS": 174.1561, "CL2_ME": 84.93, "CL3_ETHE": 131.3883,\ 211 | "HCL": 36.46, "HONO": 46, "NOX": 46, "PM2_5": 1, "PM10": 1, "HFLUX": 1,\ 212 | "PEC_72": 1, "PMFINE_72": 1, "POC_72": 1, "PMC_72": 1, "OTHER": 1,\ 213 | "NAPHTH_72": 128.1705, "NH3_FERT": 17, "PAL": 1, "PCA": 1, "PCL": 1, \ 214 | "PFE": 1, "PH2O": 1, "PK": 1, "PMG": 1, "PMN": 1, "PMOTHR": 1, \ 215 | "PNA": 1, "PNCOM": 1, "PNH4": 1, "PSI": 1, "PTI": 1, 216 | "ARSENIC_C": 1, "ARSENIC_F": 1, \ 217 | "PAH_000E0": 379.00, "PAH_101E2": 268.00, "PAH_114E1": 256.00, 218 | "PAH_176E2": 302.00, "PAH_176E3": 244.00, "PAH_176E4": 248.00, 219 | "PAH_176E5": 228.00, "PAH_192E3": 278.00, "PAH_880E5": 196.00, 220 | "ACET": 58.079, "ETHY": 26.037, "KET": 28.82, "PRPA": 44.096, 221 | "SOAALK": 92.1006, "XYLMN": 106.165, "NAPH": 128.1705, 222 | "ACETONITRILE": 41.05, "ACRYLICACID": 72.06, "ACRYLONITRILE": 53.06, 223 | "CARBSULFIDE": 60.07, "CHLOROPRENE": 88.54, "ETHYLBENZ": 106.165, 224 | "HEXANE": 86.175, "METHCHLORIDE": 50.49, "STYRENE": 104.15, 225 | "XYLENES": 106.165, "VOC_BEIS": 46, "APIN": 136, "BPIN": 120, 226 | "SESQ": 180, "NR": 24, "AACD": 60.052, "FACD": 46.025, 227 | "IVOC": 125.9429, "BENZOAPYRNE": 252.316} 228 | 229 | -------------------------------------------------------------------------------- /emisqa/cmaq/__init__.py: -------------------------------------------------------------------------------- 1 | all = ['read_ncf',] 2 | 3 | import emisqa.cmaq.read_ncf 4 | -------------------------------------------------------------------------------- /emisqa/cmaq/read_ncf.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from builtins import range 3 | from builtins import object 4 | import numpy as np 5 | import netCDF4 as ncf 6 | import gzip, random 7 | from emisqa.helpers import data_blocks 8 | from emisqa.default_paths import tmp_dir 9 | from fauxioapi import Grid 10 | import os.path 11 | 12 | # Subclass to Fauxio 13 | class NCFFile(object): 14 | """ 15 | Instance of a NCFFile. 16 | """ 17 | def __init__(self, infile_name, verbosity, zip_dict): 18 | self.infile_name = infile_name 19 | self.verbosity = verbosity 20 | self.NCF = self.open_NCF(zip_dict) 21 | try: 22 | self.sdate = getattr(self.NCF, 'SDATE') 23 | except AttributeError: 24 | self.sdate = '2011001' 25 | self.species_list = list(self.NCF.variables.keys()) 26 | 27 | def __str__(self): 28 | return self.infile_name 29 | 30 | def __call__(self): 31 | return self.NCF 32 | 33 | def get_species(self, species_name, grid, ignore_spec, inln, stacks): 34 | if species_name not in self.species_list: 35 | if ignore_spec: 36 | print('WARNING: The species %s does not exist in the file %s.' %(species_name, self.infile_name)) 37 | spec_shape = self.NCF.variables[self.species_list[0]].shape 38 | data_in = np.zeros(spec_shape, np.float32) 39 | else: 40 | raise ValueError('The species %s does not exist in the file %s.' %(species_name, self.infile_name)) 41 | else: 42 | data_in = self.NCF.variables[species_name][:] 43 | # Place inline data into a 2D grid 44 | if inln == True: 45 | data_in = self.grid_inln(data_in, stacks, grid) 46 | return data_in 47 | 48 | def open_NCF(self, zip_dict = {}): 49 | ''' 50 | Opens the netCDF input file and returns an open file object. 51 | ''' 52 | if self.verbosity: 53 | print('Opening file for reading: %s' %self.infile_name) 54 | try: 55 | file_in = ncf.Dataset(self.infile_name, 'r') 56 | except TypeError: 57 | raise IOError('%s not a valid netCDF file. Please check file format and selected input type (-c [TYPE]).' %self.infile_name) 58 | except IOError: 59 | print('WARNING: %s not available for access. Attempting to open zipped version.' %self.infile_name) 60 | if self.verbosity: 61 | print('Opening file for reading: %s.gz' %self.infile_name) 62 | # Try to read a zipped version of the input file 63 | try: 64 | zip_in = gzip.open(self.infile_name+'.gz','rb') 65 | except IOError: 66 | raise IOError('%s.gz not available for access.' %self.infile_name) 67 | else: 68 | with zip_in: 69 | if self.infile_name in zip_dict: 70 | # Check if the file has already been unzipped 71 | print('Previously unzipped version found.') 72 | file_in = ncf.Dataset(zip_dict[self.infile_name], 'r') 73 | return file_in 74 | tmp_filename = os.path.join(tmp_dir, 'pyqa-tmp-%s.ncf' %random.randint(100, 9999999)) 75 | tmp_file = open(tmp_filename, 'wb') 76 | for data in data_blocks(zip_in): 77 | tmp_file.write(data) 78 | tmp_file.close() 79 | zip_dict[self.infile_name] = tmp_filename # Store the unzipped temporary file name in the zip dictionary 80 | try: 81 | file_in = ncf.Dataset(tmp_filename, 'r') 82 | except TypeError: 83 | raise TypeError('Extracted file from %s.gz is not a valid netCDF file.' %self.infile_name) 84 | return file_in 85 | 86 | def grid_inln(self, data_in, stacks, grid): 87 | """ 88 | Process the input species and adjust based on the ratio table 89 | """ 90 | data_out = np.zeros([data_in.shape[0],1,grid.NROWS,grid.NCOLS], np.float32) 91 | for col_row, stack_list in stacks.items(): 92 | col = int(col_row[:4]) 93 | row = int(col_row[4:]) 94 | data_out[:,0,row,col] = np.sum(data_in[:,0,stack_list,0], axis=1) 95 | return data_out[:] 96 | 97 | def close_file(self): 98 | ''' 99 | Closes the open file 100 | ''' 101 | self.NCF.close() 102 | 103 | -------------------------------------------------------------------------------- /emisqa/csv/__init__.py: -------------------------------------------------------------------------------- 1 | all = ['read_csv',] 2 | 3 | import emisqa.csv.read_csv 4 | -------------------------------------------------------------------------------- /emisqa/csv/read_csv.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from builtins import str 3 | from builtins import object 4 | import numpy as np 5 | import gzip, random 6 | from emisqa.helpers import data_blocks, conv2jul 7 | import os.path 8 | from emisqa.default_paths import tmp_dir 9 | 10 | class CSVFile(object): 11 | """ 12 | Instance of a CSVFile. 13 | The CSV File should contain gridded time-stepped species data. The "hours" in the file may 14 | represent any time step, but are typically hours. 15 | 16 | The format requires a header containing the number of columns, rows, hours, and the 17 | 8 character Gregorian date. 18 | Those are defined on header lines starting with "#" followed by COLS, ROWS, HOURS, and 19 | GSDATE respectively. 20 | The data should be formatted as column number (starting at 1), row number (starting at 1), 21 | hour, species name, and value. No header of column names are required for the data. If the 22 | column names are included, then the first column name should start with "col" or "COL". 23 | 24 | ex. 25 | #COLS 265 26 | #ROWS 300 27 | #HOURS 2 28 | #GSDATE 20110520 29 | col,row,hour,speciesname,value 30 | 3,5,1,NOX,265.4 31 | 231,35,2,CO,122.3 32 | ... 33 | 34 | """ 35 | def __init__(self, infile_name, verbosity, zip_dict): 36 | self.infile_name = infile_name 37 | self.verbosity = verbosity 38 | self.cols = 0 39 | self.rows = 0 40 | self.gsdate = '' 41 | self.hours = 0 42 | self.units = '' 43 | self.spec_dict = {} 44 | self.open_CSV(zip_dict) 45 | self.sdate = conv2jul(self.gsdate) 46 | 47 | def __str__(self): 48 | return self.infile_name 49 | 50 | def get_species(self, species_name, grid, ignore_spec, inln, stacks): 51 | ''' 52 | Function to read a species from the object and return the numpy array 53 | ''' 54 | if species_name not in self.species_list: 55 | if ignore_spec: 56 | print('WARNING: The species %s does not exist in the file %s.' %(species_name, self.infile_name)) 57 | spec_shape = self.spec_dict[self.species_list[0]].shape 58 | species = np.zeros(spec_shape, '>f') 59 | else: 60 | raise ValueError('The species %s does not exist in the file %s.' %(species_name, self.infile_name)) 61 | else: 62 | species = self.spec_dict[species_name] 63 | data_in = species[:] 64 | return data_in 65 | 66 | def _check_int(self, x, name): 67 | try: 68 | y = int(x) 69 | except ValueError: 70 | raise ValueError('"%s" in file header (#%s). Should be integer.' %(x, name)) 71 | else: 72 | return y 73 | 74 | def _read_data(self, infile): 75 | ''' 76 | Read in the formatted CSV file 77 | ''' 78 | with open(infile,'r') as csvfile: 79 | for ln, line in enumerate(csvfile): 80 | # Read the meta data headers: #COLS, #ROWS, #HOURS, #GSDATE, #UNITS 81 | if line.startswith('#'): 82 | line = [cell.strip().upper() for cell in line.strip('#').strip().split(' ')] 83 | if line[0] in ('COLS','ROWS','HOURS','GSDATE','UNITS'): 84 | setattr(self, line[0].lower(), self._check_int(line[1], line[0])) 85 | else: 86 | line = [cell.strip().upper() for cell in line.strip().split(',')] 87 | if self.cols == 0 or self.rows == 0 or self.hours == 0 or not self.gsdate: 88 | raise ValueError('Must set file header with lines of #COLS, #ROWS, #HOURS, #GSDATE.') 89 | elif line[0].startswith('COL'): 90 | # Ignore the header 91 | pass 92 | else: 93 | # Check current col, row, hour against the boundaries set in the header 94 | col = int(line[0]) - 1 95 | if (col + 1) > self.cols or col < 0: 96 | print('WARNING: Column %s on line %s outside of column boundary as defined in header.' %(col+1, ln+1)) 97 | row = int(line[1]) - 1 98 | if (row + 1) > self.rows or row < 0: 99 | print('WARNING: Row %s on line %s outside of row boundary as defined in header.' %(row+1, ln+1)) 100 | hour = int(line[2]) - 1 101 | if (hour + 1) > self.hours or hour < 0: 102 | print('WARNING: Hour %s on line %s outside of hours maximum as defined in header.' %(hour+1, ln+1)) 103 | species = line[3] 104 | self.spec_dict.setdefault(species, np.zeros([self.hours, 1, self.rows, self.cols], '>f')) 105 | # Put the values in species dictionary of numpy arrays 106 | try: 107 | val = float(line[4]) 108 | except ValueError: 109 | raise ValueError('Value for species %s on line %s cannot be converted to a float.' %(spec, ln+1)) 110 | else: 111 | if self.spec_dict[species][hour,0,row,col] == 0: 112 | self.spec_dict[species][hour,0,row,col] = val 113 | else: 114 | print('Duplicate column, row, and hour combination at line %s.' %ln+1) 115 | print(line) 116 | raise ValueError('Check for additional entry for column, row, hour, and species.') 117 | self.species_list = list(self.spec_dict.keys()) 118 | 119 | def open_CSV(self, zip_dict): 120 | ''' 121 | Finds the correct CSV file, unzips if necessary 122 | ''' 123 | if self.verbosity: 124 | print('Opening file for reading: %s' %self.infile_name) 125 | try: 126 | file_in = open(self.infile_name, 'r') 127 | except IOError: 128 | print('WARNING: %s not available for access. Attempting to open zipped version.' %self.infile_name) 129 | if self.verbosity: 130 | print('Opening file for reading: %s.gz' %self.infile_name) 131 | # Try to read a zipped version of the input file 132 | try: 133 | zip_in = gzip.open(self.infile_name+'.gz','rb') 134 | except IOError: 135 | raise IOError('%s.gz not available for access.' %self.infile_name) 136 | else: 137 | with zip_in: 138 | if self.infile_name in zip_dict: 139 | # Check if the file has already been unzipped 140 | print('Previously unzipped version found.') 141 | file_in = zip_dict[self.infile_name] 142 | else: 143 | tmp_filename = os.path.join(tmp_dir, 'pyqa-tmp-%s.csv' %str(random.randint(100, 9999999))) 144 | tmp_file = open(tmp_filename, 'wb') 145 | for data in data_blocks(zip_in): 146 | tmp_file.write(data) 147 | tmp_file.close() 148 | zip_dict[self.infile_name] = tmp_filename # Store the unzipped temporary file name in the zip dictionary 149 | file_in = tmp_filename 150 | else: 151 | file_in = self.infile_name 152 | self._read_data(file_in) 153 | 154 | -------------------------------------------------------------------------------- /emisqa/data_file.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | from builtins import range 3 | from builtins import object 4 | import numpy as np 5 | 6 | class DataFile(object): 7 | """ 8 | Input file wrapper 9 | """ 10 | 11 | def __init__(self, infile, verbosity=False, informat='NCF', ptsr=False, zip_dict={}): 12 | if informat == 'UAM': 13 | from emisqa.camx.read_uam import CAMxFile 14 | self.infile = CAMxFile(infile, verbosity, ptsr, zip_dict) 15 | elif informat == 'CSV': 16 | from emisqa.csv.read_csv import CSVFile 17 | self.infile = CSVFile(infile, verbosity, zip_dict) 18 | elif informat == 'NCF': 19 | from emisqa.cmaq.read_ncf import NCFFile 20 | self.infile = NCFFile(infile, verbosity, zip_dict) 21 | else: 22 | raise ValueError('Wrong input format. Specify NCF, UAM, or CSV with -c.') 23 | self.infile_name = self.infile.infile_name 24 | self.species_list = self.infile.species_list 25 | self.sdate = self.infile.sdate 26 | self.ptsr = ptsr 27 | self.informat = informat 28 | 29 | def __str__(self): 30 | return self.infile_name 31 | 32 | def __call__(self): 33 | return self.infile 34 | 35 | def get_species(self, species_name, grid, ignore_spec, inln, ptsr, stacks, informat): 36 | if informat == 'NCF': 37 | species = self.infile.get_species(species_name, grid, ignore_spec, inln, stacks) 38 | else: 39 | species = self.infile.get_species(species_name, ptsr, ignore_spec, inln, stacks) 40 | return species 41 | 42 | def dump_val(self, species_name, all_hours=False, grid='', ignore_spec=False, inln=False, interpolate=False, layer='', stacks=''): 43 | ''' 44 | Returns an array of the hourly data of the selected species in the open NCF. 45 | Optionally takes all_hours as T/F. If true, all hours in the NCF are dumped. If false, just the first 24/1 day (ie. 0-23). 46 | Flattens all layers unless a single layer to use is specified. 47 | ''' 48 | species = self.get_species(species_name, grid, ignore_spec, inln, self.ptsr, stacks, self.informat) 49 | if len(species.shape) == 2: 50 | ''' 51 | Assume that all 2D netCDFs are just ROWxCOL 52 | ''' 53 | species = species[np.newaxis,np.newaxis,:,:] 54 | elif len(species.shape) == 3: 55 | ''' 56 | Assume that all 3D netCDFs are HOURxROWxCOL 57 | ''' 58 | species = species[:,np.newaxis,:,:] 59 | if len(species.shape) != 4: 60 | raise ValueError('Input variable arrays must be of size 2D, 3D, or 4D') 61 | if all_hours: 62 | hours = species.shape[0] 63 | else: 64 | hours = 24 65 | data = np.zeros([hours, species.shape[-2], species.shape[-1]], '>f4') 66 | if layer: 67 | layers = slice(int(layer) - 1,int(layer)) 68 | if layer not in list(range(species.shape[1])): 69 | raise IndexError('The specified layer is out of range.') 70 | else: 71 | layers = slice(0,species.shape[1]) 72 | if interpolate: 73 | data[:] += np.sum(species[slice(0,hours),layers,:], axis=1) * 0.5 + np.sum(species[slice(1,hours + 1),layers,:], axis=1) * 0.5 74 | else: 75 | data[:] += np.sum(species[slice(0,hours),layers,:], axis=1) 76 | return data 77 | 78 | def sum_val(self, species_name, all_hours=False, grid='', ignore_spec= False, inln=False, interpolate=False, layer='', stacks=''): 79 | ''' 80 | Returns an array of the summed hourly data of the selected species in the open NCF. 81 | Optionally takes all_hours as T/F. If true, all hours in the NCF are summed. If false, just the first 24/1 day (ie. 0-23). 82 | Flattens all layers unless a single layer to use is specified. 83 | ''' 84 | species = self.get_species(species_name, grid, ignore_spec, inln, self.ptsr, stacks, self.informat) 85 | if all_hours: 86 | hours = species.shape[0] 87 | else: 88 | hours = 24 89 | data = np.zeros([1, species.shape[-2], species.shape[-1]], '>f4') 90 | if layer: 91 | layers = slice(int(layer) - 1,int(layer)) 92 | if layer not in list(range(species.shape[1])): 93 | raise IndexError('The specified layer is out of range.') 94 | else: 95 | layers = slice(0,species.shape[1]) 96 | if interpolate: 97 | data[:] += np.sum(species[slice(0,hours),layers,:], axis=(0,1)) * 0.5 + np.sum(species[slice(1,hours + 1),layers,:], axis=(0,1)) * 0.5 98 | else: 99 | data[:] += np.sum(species[slice(0,hours),layers,:], axis=(0,1)) 100 | return data 101 | 102 | def close_file(self): 103 | self.infile.close_file() 104 | 105 | -------------------------------------------------------------------------------- /emisqa/dataout/__init__.py: -------------------------------------------------------------------------------- 1 | __all__ = ['data_out',] 2 | 3 | import emisqa.dataout.data_out 4 | -------------------------------------------------------------------------------- /emisqa/dataout/data_out.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from builtins import str 3 | from builtins import range 4 | from builtins import object 5 | import numpy as np 6 | from datetime import date, datetime, timedelta 7 | from emisqa.helpers import conv2jul, RatioTable 8 | import fauxioapi as io 9 | 10 | class DataOut(object): 11 | """ 12 | Outfile class. 13 | """ 14 | 15 | def __init__(self, outfile_name, out_type, gsdate, verbosity = False): 16 | self.outfile_name = outfile_name 17 | self.out_type = out_type.upper() 18 | self.gsdate = gsdate 19 | self.verbosity = verbosity 20 | if self.verbosity == True: 21 | print('Opening %s file for writing: %s' %(self.out_type, self.outfile_name)) 22 | if self.out_type == 'NCF': 23 | self.outfile = self._open_NCF() 24 | elif self.out_type == 'CSV': 25 | self.outfile = self._open_CSV() 26 | else: 27 | raise ValueError('Wrong outfile type specified.') 28 | 29 | def __str__(self): 30 | return self.outfile_name 31 | 32 | def __call__(self): 33 | return self.outfile 34 | 35 | def write_outfile(self, out_dict, grid, region, tons, units, srg_file): 36 | species_list = list(out_dict.keys()) 37 | while '0' in species_list: # Dunno why this gets inserted? 38 | species_list.remove('0') 39 | self.species_list = sorted(species_list) 40 | if self.out_type == 'NCF': 41 | self._write_NCF(out_dict, self.species_list, grid, tons, units) 42 | elif self.out_type == 'CSV': 43 | if region in ('state','county','countyavg'): 44 | self._write_FIPS_CSV(out_dict, self.species_list, grid, tons, region, srg_file) 45 | else: 46 | self._write_grid_CSV(out_dict, self.species_list, tons) 47 | 48 | def _open_CSV(self): 49 | try: 50 | outfile = open(self.outfile_name, 'w') 51 | except IOError: 52 | raise IOError('%s not available for access.' %self.outfile_name) 53 | else: 54 | return outfile 55 | 56 | def _write_grid_CSV(self, out_dict, species_list, tons): 57 | ''' 58 | Writes the dictionary to an output file in csv format by grid cell. Takes output dictionary and the species_list. 59 | ''' 60 | hours = out_dict[species_list[0]]().shape[0] 61 | if hours == 1: 62 | hour_type = 'sum' 63 | else: 64 | hour_type = 'hourly' 65 | self.outfile.write('hour,row,col,' + ','.join(species_name for species_name in species_list) + '\n') 66 | for hour in range(hours): 67 | if hour_type == 'sum': 68 | out_hour = 'sum' 69 | else: 70 | out_hour = hour 71 | for row in range(out_dict[species_list[0]]().shape[1]): 72 | srow = str(row) 73 | for col in range(out_dict[species_list[0]]().shape[2]): 74 | scol = str(col) 75 | outline = '%s,%s,%s' %(out_hour, row + 1, col + 1) 76 | for species_name in species_list: 77 | outline = '%s,%s' %(outline, out_dict[species_name]()[hour,row,col]) 78 | self.outfile.write(outline + '\n') 79 | 80 | def _write_FIPS_CSV(self, out_dict, species_list, grid, tons, region, srg_file): 81 | ''' 82 | Writes the dictionary to an output file in csv format by fips. Takes output dictionary and the species_list. 83 | ''' 84 | if not grid: 85 | raise ValueError('No grid specified. Grid needed to write state or county based csv.') 86 | import pandas as pd 87 | ratio_table = RatioTable() 88 | ratio_table.parse_ratio(region, grid, srg_file) 89 | hours = out_dict[species_list[0]]().shape[0] 90 | df = pd.DataFrame() 91 | for n, fips in enumerate(ratio_table.fips): 92 | fipsdf = pd.DataFrame() 93 | factors = np.tile(ratio_table.arr[n,:], (hours,1,1)) 94 | for species_name in species_list: 95 | vals = pd.DataFrame((out_dict[species_name]() * factors).sum(axis=(1,2)), 96 | columns=[species_name,]) 97 | vals['hour'] = vals.index 98 | vals['fips'] = fips 99 | if len(fipsdf) == 0: 100 | fipsdf = vals 101 | else: 102 | fipsdf = pd.concat((fipsdf, vals[species_name]), axis=1) 103 | df = pd.concat((df, fipsdf)) 104 | cols = ['hour','fips'] + species_list 105 | df.to_csv(self.outfile, columns=cols, index=False) 106 | 107 | def _open_NCF(self): 108 | ''' 109 | Opens the netCDF input file and returns an open file object. 110 | ''' 111 | try: 112 | outfile = io.IODataset(self.outfile_name, 'w') 113 | except TypeError: 114 | raise IOError('%s not available for access.' %self.outfile_name) 115 | else: 116 | return outfile 117 | 118 | def _write_NCF(self, out_dict, species_list, grid, tons = False, units = ''): 119 | ''' 120 | Writes the dictionary to an output file in NCF format. Takes output dictionary and the species_list. 121 | ''' 122 | hours = out_dict[species_list[0]]().shape[0] 123 | self.outfile.set_dimensions('GRID', LAY=1, ROW=grid.NROWS, COL=grid.NCOLS, VAR=len(species_list)) 124 | for species_name in species_list: 125 | self._write_species(out_dict, species_name, tons, units) 126 | self._outfile_settings(hours, grid) 127 | self.outfile.write_TFLAG() 128 | self.outfile.close() 129 | 130 | def _write_species(self, out_dict, species_name, tons = False, units = '', long_name = '', var_desc = ''): 131 | """ 132 | Takes the output dictionary, species name, and optionally long name, units, and variable description. 133 | Creates a species of name species_name with the standard smoke shape of TSTEP, LAY, ROW, and COL. 134 | Returns a multidimensional array of shape TSTEP, LAY, ROW, COL. 135 | """ 136 | if not units: 137 | if tons: 138 | units = 'tons/day' 139 | else: 140 | units = 'moles/s' 141 | if not long_name: 142 | long_name = species_name 143 | if not var_desc: 144 | var_desc = 'Model species ' + species_name 145 | d_shape = [out_dict[species_name]().shape[0],1,out_dict[species_name]().shape[1],out_dict[species_name]().shape[2]] 146 | species_out = self.outfile.create_variable(species_name, 'REAL', ('TSTEP','LAY','ROW','COL'), 147 | long_name=long_name, units=units, var_desc=var_desc) 148 | data_out = np.zeros(d_shape, np.float32) 149 | for lay in range(data_out.shape[1]): 150 | try: 151 | data_out[:,lay,:,:] = out_dict[species_name]() 152 | except ValueError: 153 | raise ValueError('Array size mismatch. Please check that input domain matches the size of the intended output domain (-G [GRID]).') 154 | species_out[:] = data_out 155 | 156 | def _outfile_settings(self, hours, grid): 157 | ''' 158 | Set the output file dimensions and IOAPI metadata 159 | ''' 160 | # Set the time step based on how many hours are in the DS 161 | if hours > 1: 162 | hstep = 10000 163 | else: 164 | hstep = 240000 165 | esdate = conv2jul(self.gsdate) 166 | self.outfile.set_attributes(esdate, grid, FILEDESC='EMISQA'.ljust(80), TSTEP=hstep) 167 | 168 | -------------------------------------------------------------------------------- /emisqa/dateloop/__init__.py: -------------------------------------------------------------------------------- 1 | all = ['inday',] 2 | 3 | import emisqa.dateloop.inday 4 | -------------------------------------------------------------------------------- /emisqa/dateloop/inday.py: -------------------------------------------------------------------------------- 1 | from datetime import timedelta, date, datetime 2 | import time 3 | import os.path 4 | 5 | class InDay(object): 6 | """ 7 | Current date and object for looping. 8 | Takes a Gregorian date (YYYYMMDD) to run as input. 9 | 10 | Operations- 11 | obj.iterday(): Move to the next Gregorian date. 12 | obj: returns current Gregorian date when called as a string 13 | """ 14 | 15 | def __init__(self, gsdate, rep_days, run_days, smkdates_path): 16 | self.today = date(int(gsdate[:4]), int(gsdate[4:6]), int(gsdate[6:8])) 17 | self.y = self.today.year 18 | self.m = self.today.month 19 | self.first_day = self.today.strftime('%Y%m%d') 20 | self.rep_days = rep_days 21 | self.smkdates_path = smkdates_path 22 | self.last_day = self.today + timedelta(run_days - 1) 23 | self.last_day = self.last_day.strftime('%Y%m%d') 24 | if self.rep_days: 25 | self.date_dict = self._parse_smkdates() 26 | 27 | def __str__(self): 28 | """ 29 | When called as a string returns the representative day 30 | """ 31 | current_date = self.today.strftime('%Y%m%d') 32 | if self.rep_days: 33 | return "%s" %self.date_dict[current_date]['rep'] 34 | else: 35 | return current_date 36 | 37 | def _parse_smkdates(self): 38 | """ 39 | Parse in the SMOKE dates file for this month. Creating a dictionary containing representative day information. 40 | """ 41 | infile_name = os.path.join(self.smkdates_path, str(self.y), 'smk_merge_dates_%s%0.2d.txt' %(self.y, self.m)) 42 | in_file = open(infile_name) 43 | for line in in_file: 44 | row = [record.strip().upper() for record in line.split(',')] 45 | if 'Date' in line: 46 | try: 47 | col_num = row.index(self.rep_days) 48 | except ValueError: 49 | raise ValueError('Representative type %s not found in SMOKE merge dates file.' %self.rep_days) 50 | else: 51 | date_dict = {} 52 | rep_dict = {} 53 | continue 54 | in_date = row[0] 55 | rep_date = int(row[col_num]) 56 | # Read only the days that fall in the date range 57 | if (int(in_date) >= int(self.first_day)) and (int(in_date) <= int(self.last_day)): 58 | if rep_date not in rep_dict: 59 | rep_dict[rep_date] = {'first': in_date, 'mult': 1} 60 | else: 61 | rep_dict[rep_date]['mult'] = rep_dict[rep_date]['mult'] + 1 62 | 63 | date_dict[in_date] = {'rep': rep_date, 'mult': 0} 64 | for rep_date in rep_dict: 65 | first_date = rep_dict[rep_date]['first'] 66 | mult = rep_dict[rep_date]['mult'] 67 | date_dict[first_date]['mult'] = mult 68 | return date_dict 69 | 70 | def current_date(self): 71 | """ 72 | Return the non-representative current date. 73 | """ 74 | return self.today.strftime('%Y%m%d') 75 | 76 | def current_mult(self): 77 | """ 78 | Return the current representative day multiplier for the day. 79 | """ 80 | if self.rep_days: 81 | mult = self.date_dict[self.today.strftime('%Y%m%d')]['mult'] 82 | else: 83 | mult = 1 84 | return mult 85 | 86 | def iterday(self): 87 | """ 88 | Advance to the next Gregorian day. 89 | """ 90 | self.today = self.today + timedelta(1) 91 | self.y = self.today.year 92 | old_month = self.m 93 | self.m = self.today.month 94 | if self.rep_days: 95 | if old_month != self.m: 96 | self.date_dict = self._parse_smkdates() 97 | 98 | -------------------------------------------------------------------------------- /emisqa/default_paths.py: -------------------------------------------------------------------------------- 1 | ### 2 | # Path and file defaults 3 | 4 | #Path to the SMOKE dates files 5 | smkDatesPath = 'ancillary/smk_dates/' 6 | 7 | #Path to temporary directory 8 | tmp_dir = '/tmp' 9 | 10 | # Set the default grid description file. This can be overridden with a GRIDDESC environment variable. 11 | defGridDesc = 'ancillary/griddesc.txt' 12 | 13 | # Default surrogate gridding file 14 | defSrgFile = 'ancillary/USA_LAND.txt' 15 | -------------------------------------------------------------------------------- /emisqa/formulas.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from builtins import range 3 | from re import match 4 | from emisqa.species_array import SpeciesArray 5 | 6 | def calc_form(outDict, formK, formNK, ignoreSpec, verbosity): 7 | ''' 8 | Calculates output species from formulas 9 | ''' 10 | rmSpec = [] 11 | for x in range(2): 12 | if x == 0: 13 | formList = formK 14 | keep = True 15 | else: 16 | formList = formNK 17 | keep = False 18 | if not formList: 19 | continue 20 | formList = [formula.strip() for formula in formList.split(',')] 21 | # Loop over the formulas 22 | for eq in formList: 23 | # Get the output species name 24 | outSpec = eq.split('=')[0] 25 | if outSpec in list(outDict.keys()): 26 | raise ValueError('Output species name %s already exists in species list') 27 | formula = eq.split('=')[1] 28 | # Parse the formula 29 | pol = '' 30 | formOut = '' 31 | for y,c in enumerate(formula): 32 | if match('[A-Z]', c): 33 | pol += c 34 | elif match('[0-9\_\-]', c) and pol !='': 35 | # Put numbers in pol names, keeping in mind that pols can't start with a number 36 | pol += c 37 | else: 38 | if pol: 39 | if pol not in list(outDict.keys()): 40 | if ignoreSpec: 41 | formOut += '0' 42 | print('Warning: Input species %s does not exist. Replacing with 0 in formula for %s.' %(pol, outSpec)) 43 | else: 44 | raise ValueError('Cannot calculate %s. Input species %s does not exist.\nMake sure that species is specified after -s or use -a.' %(outSpec,pol)) 45 | else: 46 | formOut += 'outDict[\'%s\']()' %pol 47 | if not keep and pol not in rmSpec: 48 | rmSpec.append(pol) 49 | pol = '' 50 | formOut += c 51 | # Append the last pollutant of the formula if there is one 52 | if y == (len(formula) - 1) and pol: 53 | if pol not in list(outDict.keys()): 54 | if ignoreSpec: 55 | formOut += '0' 56 | print('Warning: Input species %s does not exist. Replacing with 0 in formula for %s.' %(pol, outSpec)) 57 | else: 58 | raise ValueError('Cannot calculate %s. Input species %s does not exist.\nMake sure that species is specified after -s or use -a.' %(outSpec,pol)) 59 | else: 60 | formOut += 'outDict[\'%s\']()' %pol 61 | if not keep and pol not in rmSpec: 62 | rmSpec.append(pol) 63 | if verbosity: 64 | print('Calculating %s = %s' %(outSpec, formOut)) 65 | # Calculate the formula if it at least one input is present, otherwise don't write 66 | if 'outDict' in formOut: 67 | outDict[outSpec] = SpeciesArray(eval(formOut), outSpec) 68 | # Delete output pollutants marked for removal 69 | for pol in rmSpec: 70 | del outDict[pol] 71 | return outDict 72 | 73 | 74 | -------------------------------------------------------------------------------- /emisqa/helpers.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | from past.utils import old_div 3 | from builtins import str 4 | from builtins import zip 5 | from builtins import range 6 | import os 7 | from datetime import timedelta, date, datetime 8 | import numpy as np 9 | from emisqa.chem_mechs import * 10 | 11 | # Helper procedures 12 | def check_ev(ev_name): 13 | """ 14 | Checks if an environment variable is set. 15 | """ 16 | try: 17 | var = os.environ[ev_name] 18 | except KeyError: 19 | return '' 20 | else: 21 | return var 22 | 23 | def parse_float(x): 24 | """ 25 | Returns a floating point with the correct number of trailing zeros based on the .Dx 26 | """ 27 | from math import pow 28 | from numpy import float64 29 | if 'D' in x: 30 | num = float(x.strip().split('D')[0]) 31 | exp = int(x.strip().split('D')[1]) 32 | return float64(num * pow(10, exp)) 33 | else: 34 | return float64(x) 35 | 36 | def conv2jul(gsdate): 37 | """ 38 | Returns Julian date from Gregorian date. 39 | """ 40 | gdate = datetime.strptime(str(gsdate), '%Y%m%d') 41 | return int(datetime.strftime(gdate, '%Y%j')) 42 | 43 | def conv2greg(jul_date): 44 | """ 45 | Returns Gregorian date from a Julian date. 46 | """ 47 | jdate = datetime.strptime(str(jul_date), '%Y%j') 48 | return datetime.strftime(jdate, '%Y%m%d') 49 | 50 | def moles2tons(val, species_name, in_format, mech = 'cmaq_cb05'): 51 | """ 52 | Converts a value or array of values from moles/s to tons/hr 53 | """ 54 | mechDct = molecDct[mech] 55 | 56 | if species_name in mechDct: 57 | factor = mechDct[species_name] 58 | else: 59 | factor = 1 60 | 61 | val = val * factor # Convert moles per second to grams per second 62 | 63 | if in_format != 'UAM': 64 | val = val * 3600 # Convert tons per second to tons per hour 65 | 66 | val = val * (0.00000110231131) # Convert grams per hour to tons per hour 67 | 68 | return val 69 | 70 | def data_blocks(infile, size=1024): 71 | """ 72 | Reads in binary files by blocks 73 | """ 74 | while True: 75 | block = infile.read(size) 76 | if len(block) == 0: 77 | break 78 | yield block 79 | 80 | class RatioTable(): 81 | ''' 82 | Defines a degridding ratio table 83 | self.arr = [fips, row, col] = cell to county factor 84 | self.fips = list of fips in axis = 0 array order 85 | ''' 86 | 87 | def __init__(self): 88 | self.arr = False 89 | self.fips = [] 90 | 91 | def parse_ratio(self, region, grid, srg_file): 92 | """ 93 | Parses in the county to grid conversion ratios. 94 | """ 95 | with open(srg_file) as infile: 96 | cell_size = grid.XCELL 97 | cell_area = cell_size * cell_size 98 | ratio_table = {} 99 | for line in infile: 100 | line = [cell.strip() for cell in line.split('\t') if cell and cell != '!'] 101 | if line[0].startswith('#') or line[0].strip() == '': 102 | if line[0].strip() == '#GRID': 103 | # Calculate grid cell offset from surrogate grid and cell range for our grid 104 | xorig = float(line[2]) 105 | yorig = float(line[3]) 106 | col_offset = abs(int(old_div((xorig - grid.XORIG), cell_size))) 107 | row_offset = abs(int(old_div((yorig - grid.YORIG), cell_size))) 108 | col_range = range(1 + col_offset, grid.NCOLS + 1 + col_offset) 109 | row_range = range(1 + row_offset, grid.NROWS + 1 + row_offset) 110 | else: 111 | cols = ['code','fips','col','row','fac','cellarea','ctyarea','fac2'] 112 | row_dict = dict(list(zip(cols, line))) 113 | if region == 'state': 114 | fips = row_dict['fips'][:2] 115 | elif region in ('county','countyavg'): 116 | fips = row_dict['fips'] 117 | # Check to see if the surrogate grid col and row is within the range of our grid 118 | if int(row_dict['col']) in col_range and int(row_dict['row']) in row_range: 119 | ratio_table.setdefault(fips, np.zeros([grid.NROWS, grid.NCOLS], 'f')) 120 | # Offset the columns and rows starting at (0,0) 121 | col = int(row_dict['col']) - col_offset - 1 122 | row = int(row_dict['row']) - row_offset - 1 123 | if col in range(grid.NCOLS) and row in range(grid.NROWS): 124 | if region == 'countyavg': 125 | divisor = float(row_dict['ctyarea'].strip()) 126 | else: 127 | divisor = cell_area 128 | ratio = old_div(float(row_dict['cellarea']), divisor) 129 | ratio_table[fips][row,col] = ratio 130 | self.fips = sorted(ratio_table.keys()) 131 | self.arr = np.zeros([len(self.fips), grid.NROWS, grid.NCOLS], 'f') 132 | for n, fips in enumerate(self.fips): 133 | self.arr[n,:] = ratio_table[fips][:] 134 | self.arr = np.ma.masked_equal(self.arr, 0) 135 | np.ma.set_fill_value(self.arr, 0) 136 | 137 | def clean_temp(zip_dict): 138 | """ 139 | Cleans up the temporary zip output files at the end of a run. 140 | """ 141 | if len(zip_dict) == 0: 142 | return 143 | for name in zip_dict: 144 | os.remove(zip_dict[name]) 145 | 146 | 147 | -------------------------------------------------------------------------------- /emisqa/inline/__init__.py: -------------------------------------------------------------------------------- 1 | all = ['stack_group',] 2 | 3 | import emisqa.inline.stack_group 4 | -------------------------------------------------------------------------------- /emisqa/inline/stack_group.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from builtins import range 3 | from builtins import object 4 | import sys 5 | import netCDF4 as ncf 6 | 7 | class StkGrp(object): 8 | """ 9 | Develop the stack group col/row x-ref information 10 | """ 11 | 12 | def __init__(self, infile_name, grid, verbosity=False): 13 | """ 14 | """ 15 | self.infile_name = infile_name 16 | self.verbosity = verbosity 17 | self.infile = self._load_infile() 18 | if self.infile.GDNAM.strip() == grid.GDNAM.strip(): 19 | self.pt_xref = {} 20 | self.stk_num = 0 21 | self._get_xref(grid) 22 | else: 23 | raise ValueError('Stack group and output grid names must match') 24 | 25 | def _open_NCF(self): 26 | ''' 27 | Opens the netCDF input file and returns an open file object. 28 | ''' 29 | try: 30 | infile = ncf.Dataset(self.infile_name) 31 | except TypeError: 32 | raise TypeError('%s not available for access or not a netCDF.' %self.infile_name) 33 | else: 34 | return infile 35 | 36 | def _load_infile(self): 37 | """ 38 | Set the infile name based on the SMOKE conventions. 39 | """ 40 | if self.verbosity: 41 | print("Stack groups: " + self.infile_name) 42 | return self._open_NCF() 43 | 44 | def _get_xref(self, grid): 45 | """ 46 | Process the col and row to create a x ref to stack 47 | """ 48 | # Fetch a variable from the in file 49 | rows = self.infile.variables['ROW'][:] 50 | cols = self.infile.variables['COL'][:] 51 | self.stk_num = rows.shape[2] 52 | for stack in range(rows.shape[2]): 53 | row = int(rows[0][0][stack][0])-1 54 | col = int(cols[0][0][stack][0])-1 55 | if col in range(grid.NCOLS) and row in range(grid.NROWS): 56 | key = '%0.4d%0.4d' %(col,row) 57 | self.pt_xref.setdefault(key, []) 58 | self.pt_xref[key].append(stack) 59 | 60 | -------------------------------------------------------------------------------- /emisqa/run_parse.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from emisqa.helpers import check_ev, conv2greg 3 | from emisqa.default_paths import * 4 | from optparse import OptionParser,OptionGroup 5 | from emisqa.data_file import DataFile 6 | from emisqa.dateloop import inday as dl 7 | from emisqa.inline.stack_group import StkGrp 8 | import os.path 9 | from sys import exit 10 | from fauxioapi import Grid 11 | 12 | ### Main 13 | 14 | def listRunTypes(): 15 | ''' 16 | Lists the available run types and syntax. 17 | ''' 18 | print(''' 19 | \n\t\tRun types for the pyQA script.\n\t\t ------------ \n 20 | pe: percent error run between two files and the specified species. Requires two files listed with the -f option. 21 | rd: raw difference run between two files and the specified species. Requires two files listed with the -f option. 22 | add: adds together two files and the specified species. Returns CSV. Requires two files listed with the -f option. 23 | dv: dumps the daily value or timestep sum of a specified file. Requires one file listed with the -f option. 24 | sum: sums up a group of files. Requires case, sector, speciation, input path, grid, GSDATE, and rundays. 25 | avg: averages a group of files over the number of days. Requires case, sector, speciation, input path, grid, GSDATE, and rundays. 26 | domain: writes the daily domain of a file. Returns as one line CSV. Requires one file listed with the -f option. 27 | mm: gives the maximum and minimum species values for a file. Requires one file listed with the -f option. 28 | dump: raw dump of a gridded file. Requires one file with the -f option. 29 | dh: Dump hourly data from a gridded file. Useful for type conversions.\n 30 | ''') 31 | exit(1) 32 | 33 | class RunOpts(object): 34 | 35 | def __init__(self): 36 | self.set_ev() 37 | options, args = self.get_opts() 38 | self.check_valid(options, args) 39 | self.set_opt_args(options) 40 | self.set_cmd_args(args) 41 | self.init_run() 42 | 43 | def get_opts(self): 44 | # Handle command line arguments and options. 45 | self.parser = OptionParser(usage = 'usage: %prog [options] OUTFILE RUNTYPE') 46 | outputGroup = OptionGroup(self.parser, "Output File Configuration Options") 47 | loopGroup = OptionGroup(self.parser, "Options Required for sum and avg Methods") 48 | regionGroup = OptionGroup(self.parser, "Options Required for Region Output","Output type must be set to CSV or left default") 49 | self.parser.add_option('-s', '--speciesname', dest='species_name', 50 | help='List of species to process. Ex. -s SPEC1,SPEC2,...', default='') 51 | self.parser.add_option('-a', '--allspecies', action='store_true', dest='all_species', 52 | help='Run for all species.', default=False) 53 | self.parser.add_option('-l', '--listruns', action='store_true', dest='listRuns', 54 | help='List run types.', default=False) 55 | self.parser.add_option('-f', '--file', dest='file_name', metavar='FILE', 56 | help='Specify a file or a list of up to two files for access.', default='') 57 | self.parser.add_option('-v', '--verbose', action='store_true', dest='verbosity', 58 | help='Give more information during run.', default=False) 59 | self.parser.add_option('-i', '--inline', action='store_true', dest='inln', 60 | help='Use inline input files with stack groups rather than 2D', default=False) 61 | self.parser.add_option('-g', '--stack-groups', dest='stack_file', 62 | help='Explicitly set stack groups file location. Set to default path when looping with -i option.', default = '') 63 | self.parser.add_option('-c', '--informat', dest='informat', 64 | help='Input file type. Specify NCF, CSV, or UAM.', default='NCF', metavar='INTYPE') 65 | self.parser.add_option('--camx-ptsr', action='store_true', dest='ptsr', 66 | help='Use PTSOURCE CAMx emissions format as input.', default=False) 67 | self.parser.add_option('--layer', dest='layer', 68 | help='Specify an individual layer. Defaults to flatten all layers.', metavar='#', default='') 69 | self.parser.add_option('--ignore-missing', action='store_true', dest='ignore_spec', 70 | help='Ignore missing input species instead of producing fatal error. May produce inaccurate results.', default=False) 71 | self.parser.add_option('--interpolate', action='store_true', dest='interpolate', 72 | help='Average between hours to get interpolated results for CAMx comparison.', default=False) 73 | self.parser.add_option('--mp', dest='threads', 74 | help='Threads to use. Defaults to 1.', default='1') 75 | regionGroup.add_option('-e', '--region', dest='region', 76 | help='Specify CSV output type as county or state. Defaults to gridded.\nRegion requires -G [grid] command line option.', metavar='region', default='') 77 | regionGroup.add_option('--srg_file', dest='srgfile', 78 | help='Specify path to surrogate file used for getting regions. Defaults to 12km land area.', default=defSrgFile) 79 | loopGroup.add_option('-r', '--repdays', dest='rep_days', 80 | help='Use representative days of specified type: aveday_N, aveday_Y, mwdss_N, mwdss_Y, week_N, week_Y, all\n Defaults to all', metavar='TYPE', default='') 81 | loopGroup.add_option('-G', '--grid', dest='grid_name', 82 | help='Set the grid name when looping or when writing state/county based reports.', metavar='GRID', default='') 83 | loopGroup.add_option('-D', '--gsdate', dest='gsdate', 84 | help='Set the starting GSDATE when looping.', metavar='YYYYMMDD', default='') 85 | loopGroup.add_option('-R', '--rundays', dest='run_days', 86 | help='Number of days from the starting GSDATE to run for looping.', metavar='1-366', default='') 87 | loopGroup.add_option('-C', '--case', dest='case', 88 | help='Name of case for the input data when looping.', metavar='CASE', default='') 89 | loopGroup.add_option('-S', '--sector', dest='sector', 90 | help='Sector of the input data when looping.', metavar='SECTOR', default='') 91 | loopGroup.add_option('-P', '--speciation', dest='spec', 92 | help='Speciation of the input data for mole->ton conversion and filenames when looping.', metavar='SPEC', default='cmaq_cb6') 93 | loopGroup.add_option('-I', '--inputpath', dest='inpath', 94 | help='Path to the input data. Used when looping.', metavar='PATH', default='') 95 | outputGroup.add_option('-o', '--outtype', dest='out_type', 96 | help='Output to NCF or CSV. Default is CSV.', default='CSV') 97 | outputGroup.add_option('-t', '--tons', action='store_true', dest='tons', 98 | help='Convert units from moles/sec to tons/hr. Defaults to no conversion.', default=False) 99 | outputGroup.add_option('-F', '--formula', dest='formK', 100 | help='Adds existing species into an output species. Format: CALCSPEC1=SPECa+SPECb,CALCSPEC2=SPECc+SPECd,...', default='') 101 | outputGroup.add_option('-K', '--formula-no-keep', dest='formNK', 102 | help='Same as -F, except that all species used in a calculation will not be in output.', default='') 103 | outputGroup.add_option('-u', '--units', dest='units', 104 | help='Override units name. Default is moles/s or tons/day if -t flag is used.', default='') 105 | outputGroup.add_option('--all-hours', action='store_true', dest='all_hours', 106 | help='Sum up species over all time steps/hours in a file. Default is to sum first 24 hours.', default=False) 107 | self.parser.add_option('--griddesc', dest='griddesc', 108 | help='Specify the path to the grid description file', metavar='#', default=defGridDesc) 109 | self.parser.add_option_group(regionGroup) 110 | self.parser.add_option_group(loopGroup) 111 | self.parser.add_option_group(outputGroup) 112 | return self.parser.parse_args() 113 | 114 | def set_ev(self): 115 | ''' 116 | Set the attributes with the environment variables 117 | ''' 118 | self.evs = {'GRID': 'grid_name', 'GSDATE': 'gsdate', 'CASE': 'case', 'SECTOR': 'sector', 119 | 'IMD_ROOT': 'inpath', 'SPEC': 'spec', 'GRIDDESC': 'griddesc'} 120 | for ev, att_name in self.evs.items(): 121 | setattr(self, att_name, check_ev(ev)) 122 | 123 | def set_opt_args(self, options): 124 | ''' 125 | Set the self.parser options to object attributes 126 | ''' 127 | int_list = ['run_days','threads'] 128 | lower_list = ['region',] 129 | upper_list = ['rep_days','informat','out_type'] 130 | for opt, val in options.__dict__.items(): 131 | if type(val) == str: 132 | val = val.strip() 133 | if opt in int_list: 134 | if val != '': 135 | val = int(val) 136 | elif opt in lower_list: 137 | val = val.lower() 138 | elif opt in upper_list: 139 | val = val.upper() 140 | # Don't override an att that was set by an EV but not set on the command line 141 | if opt in self.evs and opt == '': 142 | pass 143 | else: 144 | setattr(self, opt, val) 145 | 146 | def set_cmd_args(self, args): 147 | self.outfile_name = args[0] 148 | self.run_type = args[1].strip().lower() 149 | 150 | def check_valid(self, options, args): 151 | ''' 152 | Misc. option validity checks 153 | ''' 154 | if options.listRuns: 155 | listRunTypes() 156 | if len(args) != 2: 157 | print('Must specify an outfile and a run type.') 158 | print('Use the -l option to list run types or -h for futher options.') 159 | exit() 160 | # Handle species list options 161 | if not options.species_name and not options.all_species: 162 | self.parser.error('No species specified. Must either specify the -s or -a option.') 163 | if options.species_name and options.all_species: 164 | self.parser.error('You must only specify either the -s or the -a option.') 165 | self.species_list = options.species_name.split(',') 166 | 167 | def init_run(self): 168 | ''' 169 | Setup other attributes that need additional processing or logic to define 170 | ''' 171 | self.zip_dict = {} # Dictionary pointing to unzipped input files 172 | if self.grid_name and self.griddesc: 173 | self.grid = Grid(self.grid_name, self.griddesc) 174 | elif self.out_type == 'NCF': 175 | self.parser.error('Must specify a grid name and a grid description file when outputting to NCF.') 176 | else: 177 | self.grid = '' 178 | # Set the input file name prefix for inline versus 2D 179 | if self.inln: 180 | inprefix = 'inln' 181 | if not self.grid_name: 182 | self.parser.error('Inline to 2D conversion requires setting a grid. Please specify a grid with -G') 183 | if not self.stack_file: 184 | if self.inpath and self.sector and self.grid and self.case: 185 | self.stack_file = os.path.join(self.inpath, 'stack_groups_%s_%s_%s.ncf' %(self.sector, 186 | self.grid, self.case)) 187 | else: 188 | self.parser.error('Stack groups file not set. Please set path using -g.') 189 | self.stacks = StkGrp(self.stack_file, self.grid).pt_xref 190 | else: 191 | inprefix = 'emis' 192 | self.stacks = '' 193 | # Load first file to be read to obtain species list 194 | if len(self.file_name.split(',')) > 1: 195 | speciesfile_name = self.file_name.split(',')[0] 196 | else: 197 | speciesfile_name = self.file_name 198 | # Try to read an individual file from the file list 199 | if speciesfile_name: 200 | infile = DataFile(speciesfile_name, self.verbosity, self.informat, self.ptsr, 201 | self.zip_dict) 202 | if not self.gsdate: 203 | esdate = infile.sdate 204 | if int(esdate) < 190000: 205 | esdate = 2011001 206 | self.gsdate = conv2greg(esdate) 207 | # Try to read the first file in a looped case 208 | else: 209 | if not self.grid_name or not self.gsdate or not self.case or not self.sector or not self.inpath or not self.spec or not self.run_days: 210 | self.parser.error('Must set an input path, case, sector, gsdate, grid, rundays, and speciation OR an input file name (-f).') 211 | # Handle representative days 212 | if self.run_days: 213 | sdate = dl.InDay(self.gsdate, self.rep_days, self.run_days, smkDatesPath) 214 | else: 215 | sdate = self.gsdate 216 | if self.sector.lower() == 'mrggrid': 217 | self.infile_name = os.path.join(self.inpath, 'emis_mole_all_%s_%s_nobeis_%s.ncf' %(sdate, self.grid_name, self.case)) 218 | else: 219 | self.infile_name = os.path.join(self.inpath, self.sector, 220 | '%s_mole_%s_%s_%s_%s_%s.ncf' %(inprefix, self.sector, sdate, self.grid_name, 221 | self.spec, self.case)) # v5 directory structure 222 | infile = DataFile(self.infile_name, self.verbosity, self.informat, self.ptsr, self.zip_dict) 223 | # Get species list from open file 224 | if self.all_species: 225 | self.species_list = list(species_name for species_name in infile.species_list if species_name != 'TFLAG') 226 | if self.verbosity and self.layer: 227 | print('Running for layer %s' %self.layer) 228 | 229 | -------------------------------------------------------------------------------- /emisqa/run_select.py: -------------------------------------------------------------------------------- 1 | from emisqa.runtypes import * 2 | import multiprocessing 3 | 4 | def runQA(opts): 5 | # Select run type and run the script. 6 | # Run types dictionary contains x-reference from command line run type name to run type function name 7 | run_types = {'pe': pe, 'add': add_files, 'dv': dump_dv, 'sum': sumdv, 'avg': avgdv, 8 | 'domain': single_domain, 'mm': mm_domain, 'rd': raw_diff, 'dump': raw_dump, 'hd': hour_dump, 9 | 'yd': hourly_domain, 'sumhour': sumhour} 10 | if opts.run_type not in run_types: 11 | raise ValueError('Specified run type not available. Please refer to the list of run types using the -l argument.') 12 | pool = multiprocessing.Pool(opts.threads) 13 | workers = [pool.apply_async(run_types[opts.run_type].get_spec, (species_name, opts)) for species_name in opts.species_list] 14 | out_dict = dict([worker.get() for worker in workers]) 15 | pool.close() 16 | pool.join() 17 | # Convert to tons by species if conversion is turned on 18 | if opts.tons: 19 | for speciesName in list(out_dict.keys()): 20 | out_dict[speciesName].moles2tons(opts.informat, opts.spec) 21 | return out_dict 22 | 23 | -------------------------------------------------------------------------------- /emisqa/runtypes/__init__.py: -------------------------------------------------------------------------------- 1 | __all__ = ['add_files','avgdv','hour_dump','mm_domain','pe','raw_diff','raw_dump','single_domain','sumdv','hourly_domain','dump_dv','sumhour'] 2 | 3 | -------------------------------------------------------------------------------- /emisqa/runtypes/add_files.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from emisqa.data_file import DataFile 3 | import numpy as np 4 | from emisqa.species_array import SpeciesArray 5 | 6 | ''' 7 | Adds variables together across multiple files 8 | ''' 9 | 10 | def get_spec(species_name, opts): 11 | ''' 12 | Dumps all summed column and rows for the day. 13 | ''' 14 | file_names = [fn.strip() for fn in opts.file_name.split(',') if fn.strip()] 15 | out_arr = False 16 | if len(file_names) < 2: 17 | raise ValueError('You must specify two or more input filenames using the -f argument.') 18 | if opts.verbosity: 19 | print('Adding for species: %s' %species_name) 20 | for fn in file_names: 21 | f_in = DataFile(fn, opts.verbosity, opts.informat, opts.ptsr, opts.zip_dict) 22 | if species_name not in f_in.species_list and opts.ignore_spec: 23 | print('WARNING: The species %s does not exist in the file %s. Skipping.' %(species_name, fn)) 24 | else: 25 | species = SpeciesArray(f_in.dump_val(species_name, opts.all_hours, opts.grid, opts.ignore_spec, 26 | opts.inln, opts.interpolate, opts.layer, opts.stacks), species_name) 27 | if out_arr: 28 | out_arr.add_array(species.array) 29 | else: 30 | out_arr = species 31 | if out_arr: 32 | return (species_name, out_arr) 33 | else: 34 | raise ValueError('The species %s does not exist in any input file. Please remove from species list.' %species_name) 35 | 36 | -------------------------------------------------------------------------------- /emisqa/runtypes/avgdv.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | from __future__ import print_function 3 | from builtins import range 4 | from sys import exit 5 | from emisqa.data_file import DataFile 6 | import numpy as np 7 | from emisqa.species_array import SpeciesArray 8 | from emisqa.dateloop.inday import InDay 9 | import os.path 10 | 11 | def get_spec(species_name, opts): 12 | ''' 13 | Calculates the daily values of NCF files from a start date through the number of run dates. 14 | ''' 15 | if opts.verbosity: 16 | print('Creating daily sum for species: %s' %species_name) 17 | current_day = InDay(opts.gsdate) 18 | for day in range(opts.run_days): 19 | dayMult = current_day.currentMult() 20 | # Skip days that have already been represented. 21 | if dayMult == 0: 22 | if day != (opts.run_days - 1): 23 | current_day.iterday() 24 | continue 25 | if opts.sector.lower() == 'mrggrid': 26 | infile_name = os.path.join(in_path, 'emis_mole_all_%s_%s_%s_%s.ncf' %(current_day, otps.grid, opts.spec, opts.case)) 27 | elif opts.sector.lower() == 'mrggrid_withbeis': 28 | infile_name = os.path.join(opts.inpath, 'emis_mole_all_%s_%s_withbeis_%s.ncf' %(current_day, 29 | opts.grid.GDNAM, opts.case)) 30 | elif opts.sector.lower() == 'mrggrid_nobeis': 31 | infile_name = os.path.join(opts.inpath, 'emis_mole_all_%s_%s_nobeis_%s.ncf' %(current_day, 32 | opts.grid.GDNAM, opts.case)) 33 | else: 34 | infile_name = os.path.join(opts.in_path, opts.sector, 35 | '%s_mole_%s_%s_%s_%s.ncf' %(opts.in_prefix, opts.sector, opts.current_day, 36 | opts.grid, opts.spec, opts.case)) # v5 directory structure 37 | in_file = DataFile(infile_name) 38 | if species_name not in list(in_file.NCF.variables.keys()) and opts.ignore_spec: 39 | print('WARNING: The species %s does not exist in the file %s. Skipping.' %(species_name, in_file)) 40 | break 41 | in_array = in_file.sum_val(species_name, opts.layer, opts.all_hours) * day_mult 42 | if day == 0: 43 | SUM = SpeciesArray(in_array, species_name) 44 | else: 45 | SUM.add_array(in_array) 46 | in_file.close_file() 47 | if day != (opts.run_days - 1): 48 | current_day.iterday() 49 | if species_name in list(in_file.NCF.variables.keys()): 50 | return (species_name, SUM()/opts.run_days) 51 | else: 52 | return (species_name, 0) 53 | 54 | -------------------------------------------------------------------------------- /emisqa/runtypes/dump_dv.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from emisqa.data_file import DataFile 3 | import numpy as np 4 | from emisqa.species_array import SpeciesArray 5 | 6 | def get_spec(species_name, opts): 7 | ''' 8 | Dumps all summed column and rows for the day. 9 | ''' 10 | file1 = DataFile(opts.file_name, opts.verbosity, opts.informat, opts.ptsr, opts.zip_dict) 11 | if opts.verbosity: 12 | print('Creating summed dump for species: %s' %species_name) 13 | if species_name not in file1.species_list and opts.ignore_spec: 14 | print('WARNING: The species %s does not exist in the file %s. Skipping.' %(species_name, file1)) 15 | DV = 0 16 | else: 17 | DV = SpeciesArray(file1.sum_val(species_name, opts.all_hours, opts.grid, opts.ignore_spec, 18 | opts.inln, opts.interpolate, opts.layer, opts.stacks), species_name) 19 | return (species_name, DV) 20 | 21 | -------------------------------------------------------------------------------- /emisqa/runtypes/hour_dump.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from emisqa.data_file import DataFile 3 | import numpy as np 4 | from emisqa.species_array import SpeciesArray 5 | 6 | def get_spec(species_name, opts): 7 | ''' 8 | Dumps every grid cell for every hour for each species. 9 | ''' 10 | file1 = DataFile(opts.file_name, opts.verbosity, opts.informat, opts.ptsr, opts.zip_dict) 11 | print('Writing Domain...') 12 | if opts.verbosity: 13 | print('Creating domain total for species: %s' %species_name) 14 | if species_name not in file1.species_list and opts.ignore_spec: 15 | print('WARNING: The species %s does not exist in the file %s. Skipping.' %(species_name, file1)) 16 | DV = SpeciesArray(np.zeros([24,opts.grid.NROWS,opts.grid.NCOLS]), species_name) 17 | else: 18 | DV = SpeciesArray(file1.dump_val(species_name, opts.all_hours, opts.grid, opts.ignore_spec, 19 | opts.inln, opts.layer, opts.stacks), species_name) 20 | return (species_name, DV) 21 | 22 | -------------------------------------------------------------------------------- /emisqa/runtypes/hourly_domain.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from emisqa.data_file import DataFile 3 | from emisqa.species_array import SpeciesArray 4 | import numpy as np 5 | 6 | def get_spec(species_name, opts): 7 | ''' 8 | Sums up every grid cell for every hour for each species. 9 | ''' 10 | if opts.region: 11 | raise ValueError('This run type does not support grid to fips conversion. Please remove -e argument from command line.') 12 | file1 = DataFile(opts.fileName, opts.verbosity, opts.informat, opts.ptsr, opts.zip_dict) 13 | outdict = dict() 14 | if self.verbosity: 15 | print('Creating domain total for species: %s' %species_name) 16 | if species_name not in file1.species_list and self.ignore_spec: 17 | print('WARNING: The species %s does not exist in the file %s. Skipping.' %(species_name, file1)) 18 | DV = SpeciesArray(np.zeros([24,1,1]), species_name) 19 | else: 20 | DV = SpeciesArray(file1.dump_val(species_name, opts.all_hours, opts.grid, opts.ignore_spec, 21 | opts.inln, opts.interpolate, opts.layer, opts.stacks), species_name) 22 | DV.sum_dims() 23 | return (species_name, DV) 24 | 25 | -------------------------------------------------------------------------------- /emisqa/runtypes/mm_domain.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from emisqa.data_file import DataFile 3 | import numpy as np 4 | from emisqa.species_array import SpeciesArray 5 | import sys 6 | 7 | def getDict(speciesList, fileName, allHours = False, grid = '', gridDesc = '', ignoreSpec = False, inln = False, interpolate = False, layer = '', region = '', 8 | stacks = False, ptsr = False, inFormat = 'NCF', verbosity = False, zipDict = {}): 9 | ''' 10 | Sums up every grid cell for a day for each species. 11 | ''' 12 | if not fileName: 13 | raise ValueError('You must specify an input filename using the -f argument.') 14 | if region: 15 | raise ValueError('This run type does not support grid to fips conversion. Please remove -e argument from command line.') 16 | 17 | file1 = DataFile(fileName, verbosity, inFormat, ptsr, zipDict) 18 | 19 | print('Running max/min') 20 | outDict = dict() 21 | 22 | for speciesName in speciesList: 23 | if verbosity: 24 | print('Running max/min for species: %s' %speciesName) 25 | 26 | if speciesName not in file1.speciesList and ignoreSpec: 27 | print('WARNING: The species %s does not exist in the file %s. Skipping.' %(speciesName, file1)) 28 | continue 29 | 30 | array1 = SpeciesArray(file1.sumVal(speciesName, layer, allHours), speciesName) 31 | mmVals = array1.maxMin() 32 | outFile.write('For species %s\n' %speciesName) 33 | outFile.write('Min Value: %s Max Value: %s\n' %(mmVals[0], mmVals[1])) 34 | 35 | sys.exit(0) 36 | 37 | -------------------------------------------------------------------------------- /emisqa/runtypes/pe.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | from __future__ import print_function 3 | from builtins import range 4 | import sys 5 | 6 | def percentError(speciesList, fileName): 7 | ''' 8 | Calculate the percentError between two NCF files. 9 | ''' 10 | sys.exit('PE not yet implemented') 11 | if len(fileName.split(',')) != 2: \ 12 | sys.exit('ERROR: You must specify two input filenames using the -f argument.') 13 | file1 = dataFile(fileName.split(',')[0]) 14 | file2 = dataFile(fileName.split(',')[1]) 15 | 16 | print('Running percent error calculation...') 17 | outDict = {} # Create the output dictionary that will be of shape { row: { col: { speciesname: val ... } ... } ... } 18 | 19 | # Run percent error a special way to get correct state and county percentages 20 | if region == 'state' or region == 'county': 21 | if not grid: 22 | sys.exit('No grid specified. Needed to write state or county based csv.') 23 | ratioTable = parseRatio(region) 24 | outFile = open(outFileName, 'w') 25 | outFile.write('fips,' + ','.join(speciesName for speciesName in speciesList) + '\n') 26 | fipsList = sorted(ratioTable.keys()) 27 | for fips in fipsList: 28 | outLine = fips 29 | lineDict = {} 30 | for speciesName in speciesList: 31 | if verbosity: 32 | print('Getting percent error for species: %s' %speciesName) 33 | if speciesName not in lineDict: 34 | lineDict[speciesName] = { 'a1': 0, 'a2': 0} 35 | array1 = speciesArray(file1.sumVal(speciesName, layer, allHours), speciesName) 36 | array2 = speciesArray(file2.sumVal(speciesName, layer, allHours), speciesName) 37 | for cell in ratioTable[fips]: 38 | col = int(cell.split(',')[0]) 39 | row = int(cell.split(',')[1]) 40 | if col == 1: 41 | print('Col') 42 | if row == 1: 43 | print('Row') 44 | if row not in list(range(array1().shape[0])) or col not in list(range(array1().shape[1])): 45 | continue # Skip ratios that are outside the domain 46 | a1val = array1()[row][col] * ratioTable[fips][cell] 47 | a2val = array2()[row][col] * ratioTable[fips][cell] 48 | lineDict[speciesName]['a1'] = lineDict[speciesName]['a1'] + a1val 49 | lineDict[speciesName]['a2'] = lineDict[speciesName]['a2'] + a2val 50 | if lineDict[speciesName]['a1'] == 0: 51 | lineDict[speciesName]['pe'] = 0 52 | else: 53 | lineDict[speciesName]['pe'] = round(((old_div((lineDict[speciesName]['a2'] - lineDict[speciesName]['a1']), lineDict[speciesName]['a1'])) * 100), 2) 54 | if tons: 55 | outLine = '%s,%s' %(outLine, moles2tons(lineDict[speciesName]['pe'], speciesName)) 56 | else: 57 | outLine = '%s,%s' %(outLine, lineDict[speciesName]['pe']) 58 | outFile.write(outLine + '\n') 59 | sys.exit(0) 60 | # Run percent error the standard way for gridded percentages 61 | else: 62 | for speciesName in speciesList: 63 | if verbosity: 64 | print('Getting percent error for species: %s' %speciesName) 65 | array1 = speciesArray(file1.sumVal(speciesName, layer, allHours), speciesName) 66 | array2 = file2.sumVal(speciesName, layer, allHours) 67 | PE = array1.pctErr(array2) 68 | 69 | if tons: 70 | outDict[speciesName] = moles2tons(PE, speciesName) 71 | else: 72 | outDict[speciesName] = PE 73 | 74 | return outDict 75 | 76 | -------------------------------------------------------------------------------- /emisqa/runtypes/raw_diff.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from sys import exit 3 | from emisqa.data_file import DataFile 4 | import numpy as np 5 | from emisqa.species_array import SpeciesArray 6 | 7 | def getDict(speciesList, fileName, grid = '', gridDesc = '', ignoreSpec = False, inln = False, interpolate = False, layer = '', region = '', \ 8 | stacks = False, ptsr = False, inFormat = 'NCF', verbosity = False, zipDict = {}): 9 | ''' 10 | Gets raw difference of two NCF files 11 | ''' 12 | if len(fileName.split(',')) != 2: 13 | exit('ERROR: You must specify two input filenames using the -f argument.') 14 | 15 | file1 = DataFile(fileName.split(',')[0]) 16 | file2 = DataFile(fileName.split(',')[1]) 17 | 18 | print('Adding files...') 19 | outDict = {} # Create the output dictionary that will be of shape { row: { col: { speciesname: val ... } ... } ... } 20 | 21 | for speciesName in speciesList: 22 | 23 | if verbosity: 24 | print('Creating domain total for species: %s' %speciesName) 25 | 26 | if speciesName not in file1.speciesList and ignoreSpec: 27 | print('WARNING: The species %s does not exist in the file %s. Skipping.' %(speciesName, file1)) 28 | 29 | if verbosity: 30 | print('Adding for species: %s' %speciesName) 31 | array1 = SpeciesArray(file1.sumVal(speciesName, allHours, grid, gridDesc, ignoreSpec, inln, interpolate, layer, stacks), speciesName) 32 | array2 = SpeciesArray(file2.sumVal(speciesName, allHours, grid, gridDesc, ignoreSpec, inln, interpolate, layer, stacks), speciesName) 33 | 34 | DIFF = array1.diffArray(array2) 35 | 36 | outDict[speciesName] = DIFF 37 | 38 | return outDict 39 | 40 | -------------------------------------------------------------------------------- /emisqa/runtypes/raw_dump.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from sys import exit 3 | from emisqa.data_file import DataFile 4 | import numpy as np 5 | from emisqa.species_array import SpeciesArray 6 | 7 | def get_spec(species_name, opts): 8 | ''' 9 | Dumps all summed column and rows for the day. 10 | ''' 11 | file1 = DataFile(opts.file_name, opts.verbosity, opts.informat, opts.ptsr, opts.zip_dict) 12 | print('Writing Domain...') 13 | if opts.verbosity: 14 | print('Creating raw dump for species: %s' %species_name) 15 | if species_name not in file1.species_list and opts.ignore_spec: 16 | print('WARNING: The species %s does not exist in the file %s. Skipping.' %(species_name, file1)) 17 | DV = SpeciesArray(np.zeros([24,opts.grid.NROWS,opts.grid.NCOLS]), species_name) 18 | else: 19 | DV = SpeciesArray(file1.dump_val(species_name, opts.all_hours, opts.grid, opts.ignore_spec, 20 | opts.inln, opts.interpolate, opts.layer, opts.stacks), species_name) 21 | return (species_name, DV) 22 | 23 | -------------------------------------------------------------------------------- /emisqa/runtypes/single_domain.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from emisqa.data_file import DataFile 3 | from emisqa.species_array import SpeciesArray 4 | 5 | def get_spec(species_name, opts): 6 | ''' 7 | Sums up every grid cell for every hour for each species. 8 | ''' 9 | if opts.region: 10 | raise ValueError('This run type does not support grid to fips conversion. Please remove -e argument from command line.') 11 | file1 = DataFile(opts.file_name, opts.verbosity, opts.informat, opts.ptsr, opts.zip_dict) 12 | if opts.verbosity: 13 | print('Creating domain total for species: %s' %species_name) 14 | if species_name not in file1.species_list and opts.ignore_spec: 15 | print('WARNING: The species %s does not exist in the file %s. Skipping.' %(species_name, file1)) 16 | DV = 0 17 | else: 18 | DV = SpeciesArray(file1.sum_val(species_name, opts.all_hours, opts.grid, opts.ignore_spec, 19 | opts.inln, opts.interpolate, opts.layer, opts.stacks), species_name) 20 | DV.sum_dims() 21 | return (species_name, DV) 22 | 23 | -------------------------------------------------------------------------------- /emisqa/runtypes/sumdv.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from builtins import range 3 | from emisqa.data_file import DataFile 4 | from emisqa.species_array import SpeciesArray 5 | from emisqa.dateloop.inday import InDay 6 | from emisqa.default_paths import * 7 | import os.path 8 | import numpy as np 9 | 10 | def get_spec(species_name, opts): 11 | ''' 12 | Sums the daily values of NCF files from a start date through the number of run dates. 13 | ''' 14 | if opts.verbosity: 15 | print('Creating daily sum for species: %s' %species_name) 16 | current_day = InDay(opts.gsdate, opts.rep_days, opts.run_days, smkDatesPath) 17 | for day in range(opts.run_days): 18 | day_mult = current_day.current_mult() 19 | # Skip days that have already been represented. 20 | if day_mult == 0: 21 | if day != (opts.run_days - 1): 22 | current_day.iterday() 23 | continue 24 | # Set the input file name prefix for inline versus 2D 25 | if opts.inln: 26 | prefix = 'inln' 27 | else: 28 | prefix = 'emis' 29 | if opts.sector.lower() == 'mrggrid': 30 | infile_name = os.path.join(opts.inpath, 'emis_mole_all_%s_%s_nobeis_%s.ncf' %(current_day, 31 | opts.grid.GDNAM, opts.case)) 32 | elif opts.sector.lower() == 'mrggrid_withbeis': 33 | infile_name = os.path.join(opts.inpath, 'emis_mole_all_%s_%s_withbeis_%s.ncf' %(current_day, 34 | opts.grid.GDNAM, opts.case)) 35 | elif opts.sector.lower() == 'mrggrid_nobeis': 36 | infile_name = os.path.join(opts.inpath, 'emis_mole_all_%s_%s_nobeis_%s.ncf' %(current_day, 37 | opts.grid.GDNAM, opts.case)) 38 | else: 39 | infile_name = os.path.join(opts.inpath, opts.sector, '%s_mole_%s_%s_%s_%s_%s.ncf' %(prefix, 40 | opts.sector, current_day, opts.grid.GDNAM, opts.spec, opts.case)) # v5 directory structure 41 | infile = DataFile(infile_name, opts.verbosity, opts.informat, opts.ptsr, opts.zip_dict) 42 | if species_name not in infile.species_list and opts.ignore_spec: 43 | print('WARNING: The species %s does not exist in the file %s. Skipping.' %(species_name, infile)) 44 | SUM = SpeciesArray(np.zeros([1,opts.grid.NROWS,opts.grid.NCOLS]), species_name) 45 | break 46 | inArray = infile.sum_val(species_name, opts.all_hours, opts.grid, 47 | opts.ignore_spec, opts.inln, opts.interpolate, opts.layer, 48 | opts.stacks) * day_mult 49 | if day == 0: 50 | SUM = SpeciesArray(inArray, species_name) 51 | else: 52 | SUM.add_array(inArray) 53 | infile.close_file() 54 | if day != (opts.run_days - 1): 55 | current_day.iterday() 56 | return (species_name, SUM) 57 | 58 | -------------------------------------------------------------------------------- /emisqa/runtypes/sumhour.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from builtins import range 3 | from emisqa.data_file import DataFile 4 | from emisqa.species_array import SpeciesArray 5 | from emisqa.dateloop.inday import InDay 6 | from emisqa.default_paths import * 7 | import os.path 8 | import numpy as np 9 | 10 | def get_spec(species_name, opts): 11 | ''' 12 | Sums the hourly values of NCF files from a start date through the number of run dates. 13 | ''' 14 | if opts.verbosity: 15 | print('Creating daily sum for species: %s' %species_name) 16 | current_day = InDay(opts.gsdate, opts.rep_days, opts.run_days, smkDatesPath) 17 | for day in range(opts.run_days): 18 | day_mult = current_day.current_mult() 19 | # Skip days that have already been represented. 20 | if day_mult == 0: 21 | if day != (opts.run_days - 1): 22 | current_day.iterday() 23 | continue 24 | # Set the input file name prefix for inline versus 2D 25 | if inln: 26 | inprefix = 'inln' 27 | else: 28 | inprefix = 'emis' 29 | if opts.sector.lower() == 'mrggrid': 30 | infile_name = os.path.join(opts.inpath, 'emis_mole_all_%s_%s_nobeis_%s.ncf' %(current_day, opts.grid, opts.case)) 31 | else: 32 | infile_name = os.path.join(opts.inpath, opts.sector, 33 | '%s_mole_%s_%s_%s_%s_%s.ncf' %(inprefix, opts.sector, current_day, opts.grid, 34 | opts.spec, opts.case)) # v5 directory structure 35 | infile = DataFile(infile_name, verbosity, inFormat, ptsr, opts.zip_dict) 36 | if species_name not in infile.species_list and opts.ignore_spec: 37 | print('WARNING: The species %s does not exist in the file %s. Skipping.' %(species_name, infile)) 38 | SUM = SpeciesArray(np.zeros([1,opts.grid.NROWS,opts.grid.NCOLS]), species_name) 39 | break 40 | in_array = infile.dump_val(species_name, opts.all_hours, opts.grid, opts.ignore_spec, 41 | opts.inln, opts.interpolate, opts.layer, opts.stacks) * day_mult 42 | if day == 0: 43 | SUM = SpeciesArray(in_array, species_name) 44 | else: 45 | SUM.add_array(in_array) 46 | infile.close_file() 47 | if day != (opts.run_days - 1): 48 | current_day.iterday() 49 | return (species_name, SUM) 50 | 51 | -------------------------------------------------------------------------------- /emisqa/species_array.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | from __future__ import print_function 3 | from past.utils import old_div 4 | from builtins import object 5 | import numpy as np 6 | from emisqa.chem_mechs import * 7 | 8 | class SpeciesArray(object): 9 | """ 10 | A class container for an array of a species. 11 | Takes a species name and initial array upon creation. 12 | 13 | obj.add_array(inarray): Adds another array of data to the current array 14 | obj.pct_err(inarray): Returns the percent error with another array as an array. 15 | obj.maxMin(): Returns the maximum and minimum values for the current array. 16 | obj: returns current species name when called as a string 17 | obj(): returns current array 18 | """ 19 | 20 | def __init__(self, init_array, species_name): 21 | self.species_name = species_name 22 | self.array = init_array 23 | 24 | def __str__(self): 25 | return self.species_name 26 | 27 | def __call__(self): 28 | return self.array 29 | 30 | def add_array(self, inarray): 31 | """ 32 | Adds another array of data to the current array. 33 | """ 34 | self.array = self.array + inarray 35 | 36 | def diff_arr(self, inarray): 37 | """ 38 | Gets the difference between the current array and another array. 39 | """ 40 | if inarray.shape != self.array.shape: 41 | raise IndexError('Array size mismatch in percent error calculation') 42 | 43 | outarray = (inarray - self.array) 44 | outarray = np.where(outarray != outarray, 0.0, outarray) 45 | outarray = np.where(outarray > 1e10, 0.0, outarray) 46 | return outarray 47 | 48 | def pct_err(self, inarray): 49 | """ 50 | Gets the percent error between the current array and another array. Outputs to an array of 51 | the same size. 52 | """ 53 | if inarray.shape != self.array.shape: 54 | raise IndexError('ERROR: Array size mismatch in percent error calculation') 55 | outarray = (old_div((inarray - self.array), self.array)) * float(100) 56 | outarray = np.where(outarray != outarray, 0.0, outarray) 57 | outarray = np.where(outarray > 1e10, 0.0, outarray) 58 | return outarray 59 | 60 | def maxMin(self): 61 | """ 62 | Gives the maximum and minimum values in the array. 63 | """ 64 | minVal = argmin(self.array.flat) 65 | maxVal = argmax(self.array.flat) 66 | return minVal, maxVal 67 | 68 | def moles2tons(self, informat, mech='cmaq_cb6'): 69 | """ 70 | Converts a value or array of values from moles/s to tons/hr 71 | """ 72 | mech_dct = molecDct[mech] 73 | if self.species_name in mech_dct: 74 | factor = mech_dct[self.species_name] 75 | else: 76 | print('WARNING: No match found for %s in mech table' %self.species_name) 77 | factor = 1 78 | self.array = self.array * float(factor) # Convert moles per second to grams per second 79 | if informat != 'UAM': 80 | self.array = self.array * 3600. # Convert tons per second to tons per hour 81 | self.array = self.array * (0.00000110231131) # Convert grams per hour to tons per hour 82 | 83 | def sum_dims(self): 84 | self.array = np.sum(self.array, axis=(1,2), keepdims=True) 85 | 86 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | setup( 3 | name="emisqa", 4 | version="0.3.1", 5 | packages=['emisqa','emisqa.camx','emisqa.cmaq','emisqa.csv','emisqa.dataout','emisqa.dateloop','emisqa.inline','emisqa.runtypes'], 6 | scripts = ['bin/emisqa',], 7 | package_data = {'emisqa': ['ancillary/*','ancillary/2016/*']}, 8 | python_requires='>3.5', 9 | setup_requires=['numpy>=1.12,<=1.24.3','netCDF4>=1.2.9,<=1.5.8','pandas>=0.20,<1','fauxioapi>=0.1.5'], 10 | author_email='james.beidler@gmail.com' 11 | ) 12 | --------------------------------------------------------------------------------