├── .coveragerc ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ ├── enhancement_request.md │ ├── new_feature_request.md │ ├── sub-issue.md │ └── task.md ├── jobs │ ├── build_documentation.sh │ └── configure_sonarqube.sh ├── pull_request_template.md └── workflows │ ├── documentation.yaml │ ├── release-checksum.yaml │ ├── sonarqube.yaml │ ├── trigger_metplus.yaml │ └── unit_tests.yaml ├── .gitignore ├── .idea ├── .gitignore ├── METcalcpy.iml ├── inspectionProfiles │ ├── Project_Default.xml │ └── profiles_settings.xml ├── modules.xml └── vcs.xml ├── .readthedocs.yaml ├── .vscode └── settings.json ├── LICENSE.md ├── README.md ├── docs ├── Contributors_Guide │ └── index.rst ├── Makefile ├── README ├── Users_Guide │ ├── aggregation.rst │ ├── difficulty_index.rst │ ├── figure │ │ ├── FileReaderIO.png │ │ └── weighting_wind_speed_difficulty_index.png │ ├── index.rst │ ├── installation.rst │ ├── logging.rst │ ├── release-notes.rst │ ├── vertical_interpolation.rst │ └── write_mpr.rst ├── _static │ ├── METplus_banner_photo_web.png │ ├── custom.css │ ├── met_calcpy_logo_2019_09.png │ └── sphx_glr_cross_spectra_thumb.png ├── _templates │ ├── autosummary │ │ └── module.rst │ ├── footer.html │ └── placeholder.txt ├── auto_examples │ ├── auto_examples_jupyter.zip │ ├── auto_examples_python.zip │ └── index.rst ├── conf.py ├── diag_ref │ └── index.rst ├── index.rst ├── make.bat ├── requirements.txt ├── utils │ └── README_util.rst └── version ├── examples ├── README.txt ├── cross_spectra.py ├── grid_diag_gfs.sh ├── grid_diag_gfs.yaml ├── height_from_pressure_merra2.sh ├── height_from_pressure_merra2.yaml ├── height_from_pressure_tcrmw.sh ├── height_from_pressure_tcrmw.yaml ├── read_files.xml ├── read_files.yaml ├── vertical_interp_hwrf.sh ├── vertical_interp_hwrf.yaml ├── vertical_interp_merra2.sh └── vertical_interp_merra2.yaml ├── internal └── scripts │ ├── installation │ └── modulefiles │ │ ├── 3.0.0_casper │ │ ├── 3.0.0_derecho │ │ ├── 3.0.0_gaea │ │ ├── 3.0.0_hera │ │ ├── 3.0.0_hercules │ │ ├── 3.0.0_jet │ │ ├── 3.0.0_orion │ │ └── 3.0.0_wcoss2 │ └── sonarqube │ ├── development.seneca │ ├── run_nightly.sh │ ├── run_sonarqube.sh │ └── sonar-project.properties ├── license.txt ├── metcalcpy ├── LICENSE ├── README ├── __init__.py ├── agg_eclv.py ├── agg_stat.py ├── agg_stat_bootstrap.py ├── agg_stat_eqz.py ├── agg_stat_event_equalize.py ├── bootstrap.py ├── calc_difficulty_index.py ├── compare_images.py ├── contributed │ ├── __init__.py │ ├── blocking_weather_regime │ │ ├── Blocking.py │ │ ├── Blocking_WeatherRegime_util.py │ │ ├── WeatherRegime.py │ │ └── __init__.py │ ├── mjo_enso │ │ ├── __init__.py │ │ └── compute_mjo_enso.py │ ├── rmm_omi │ │ ├── __init__.py │ │ └── compute_mjo_indices.py │ ├── spacetime │ │ ├── __init__.py │ │ ├── cross_spectra.py │ │ ├── matsuno_plot.py │ │ ├── spacetime.py │ │ └── spacetime_utils.py │ ├── tropical_diagnostics │ │ ├── __init__.py │ │ ├── ccew_activity.py │ │ ├── compute_omi_example.py │ │ ├── compute_rmm_example.py │ │ ├── example_kelvin_activity.py │ │ ├── phase_diagram_example.py │ │ ├── readme.md │ │ └── utils.py │ └── zonal_meridional │ │ └── recipes.py ├── diagnostics │ ├── README │ ├── __init__.py │ └── land_surface.py ├── event_equalize.py ├── event_equalize_against_values.py ├── logging_config.py ├── piecewise_linear.py ├── pre_processing │ ├── README │ ├── __init__.py │ ├── aggregation │ │ ├── .gitignore │ │ ├── LICENSE │ │ ├── README.md │ │ ├── config │ │ │ ├── config_agg_stat.yaml │ │ │ ├── config_aggregation_preprocessor.yaml │ │ │ ├── config_plot_cmn.yaml │ │ │ ├── custom_line.yaml │ │ │ ├── custom_performance_diagram.yaml │ │ │ └── custom_taylor_diagram.yaml │ │ ├── src │ │ │ ├── aggregation_preprocessor.py │ │ │ └── yaml_preprocessor.py │ │ └── wrapper │ │ │ ├── aggregation_WE2E.py │ │ │ └── environment.yaml │ ├── directional_means.py │ └── waves.py ├── process_tcrmw.py ├── scorecard.py ├── sum_stat.py ├── util │ ├── README │ ├── README_util.rst │ ├── __init__.py │ ├── correlation.py │ ├── ctc_statistics.py │ ├── eclv_statistics.py │ ├── ecnt_statistics.py │ ├── grad_statistics.py │ ├── mcts_statistics.py │ ├── met_stats.py │ ├── mode_2d_arearat_statistics.py │ ├── mode_2d_ratio_statistics.py │ ├── mode_3d_ratio_statistics.py │ ├── mode_3d_volrat_statistics.py │ ├── mode_arearat_statistics.py │ ├── mode_ratio_statistics.py │ ├── nbrcnt_statistics.py │ ├── nbrctc_statistics.py │ ├── pstd_statistics.py │ ├── read_env_vars_in_config.py │ ├── read_file.py │ ├── rps_statistics.py │ ├── safe_log.py │ ├── sal1l2_statistics.py │ ├── sl1l2_statistics.py │ ├── ssvar_statistics.py │ ├── tost_paired.py │ ├── utils.py │ ├── val1l2_statistics.py │ ├── vcnt_statistics.py │ ├── vl1l2_statistics.py │ ├── wald_wolfowitz_runs_test.py │ └── write_mpr.py ├── validate_mv_python.py └── vertical_interp.py ├── nco_requirements.txt ├── requirements.txt ├── scratch └── python_query_util.py ├── setup.py └── test ├── README ├── __init__.py ├── convert_headers.py ├── data ├── .gitignore ├── ROC_CTC.data ├── ROC_CTC_SFP.data ├── ROC_CTC_thresh.data ├── ROC_PSTD_dummy.data ├── agg_eclv_data.data ├── agg_ratio.data ├── agg_stat_and_boot_data.data ├── agg_stat_and_boot_output.data ├── agg_stat_with_groups_data.data ├── calc_tci_jja_pandas_input.csv ├── calc_tci_jja_xarray_input.nc ├── calc_tci_jja_xarray_output.nc ├── dummy.data ├── ee_av_input.data ├── et.txt ├── event_equalize_dummy.data ├── event_equalize_eqz_dummy.data ├── event_equalize_group_input.data ├── event_equalize_input.data ├── img_1.png ├── img_2.png ├── img_diff.png ├── mtd_revision.data ├── perf_diagram.data ├── point_stat │ ├── point_stat_GRIB1_NAM_GDAS_MASK_SID_120000L_20120409_120000V_val1l2.txt │ ├── point_stat_GRIB2_SREF_GDAS_150000L_20120409_120000V_vcnt.txt │ └── point_stat_GRIB2_SREF_GDAS_150000L_20120409_120000V_vl1l2.txt ├── pstd.data.agg_stat ├── roc_sample.data ├── rrfs_ecnt_for_agg.data ├── scorecard.csv ├── scorecard.data ├── stat_analysis │ ├── ensemble_stat_OBSERR_20120410_120000V_ecnt.txt │ ├── met_ecnt_agg.txt │ ├── met_val1l2_aggregated.txt │ ├── met_vcnt_from_vl1l2.txt │ └── met_vl1l2_aggregated.txt ├── stats_ee_av_input.data └── threshold.csv ├── ecnt_agg_stat.yaml ├── logs └── log_agg_eclv.txt ├── pytest.ini ├── rrfs_ecnt_config_agg_stat.yaml ├── run_all_nco.sh ├── test_agg_eclv.py ├── test_agg_ratio.py ├── test_agg_stat.py ├── test_agg_stats_and_boot.py ├── test_agg_stats_with_groups.py ├── test_calc_difficulty_index.py ├── test_compare_images.py ├── test_convert_lon_indices.py ├── test_ctc_statistics.py ├── test_diagnostics_land_surface.py ├── test_event_equalize.py ├── test_event_equalize_against_values.py ├── test_future_warnings.py ├── test_grid_diag.py ├── test_lon_360_to_180.py ├── test_mode_2d_statistics.py ├── test_no_ARIMA_utils.py ├── test_reformatted_for_agg.py ├── test_scorecard.py ├── test_sl1l2.py ├── test_spacetime.py ├── test_statistics.py ├── test_tost_paired.py ├── test_utils.py ├── test_validate_mv_python.py ├── val1l2_agg_stat.yaml ├── vcnt_agg_stat.yaml └── vl1l2_agg_stat_met_v12.yaml /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | source = metcalcpy 3 | relative_files = True 4 | omit = 5 | config.py 6 | config-3.py 7 | metcalcpy/contributed/* 8 | 9 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Fix something that's not working 4 | title: 'Bugfix' 5 | labels: 'alert: NEED ACCOUNT KEY, alert: NEED MORE DEFINITION, alert: NEED CYCLE ASSIGNMENT, type: bug' 6 | assignees: '' 7 | 8 | --- 9 | 10 | *Replace italics below with details for this issue.* 11 | 12 | ## Describe the Problem ## 13 | *Provide a clear and concise description of the bug here.* 14 | 15 | ### Expected Behavior ### 16 | *Provide a clear and concise description of what you expected to happen here.* 17 | 18 | ### Environment ### 19 | Describe your runtime environment: 20 | *1. Machine: (e.g. HPC name, Linux Workstation, Mac Laptop)* 21 | *2. OS: (e.g. RedHat Linux, MacOS)* 22 | *3. Software version number(s)* 23 | 24 | ### To Reproduce ### 25 | Describe the steps to reproduce the behavior: 26 | *1. Go to '...'* 27 | *2. Click on '....'* 28 | *3. Scroll down to '....'* 29 | *4. See error* 30 | *Post relevant sample data following these instructions:* 31 | *https://dtcenter.org/community-code/model-evaluation-tools-met/met-help-desk#ftp* 32 | 33 | ### Relevant Deadlines ### 34 | *List relevant project deadlines here or state NONE.* 35 | 36 | ### Funding Source ### 37 | *Define the source of funding and account keys here or state NONE.* 38 | 39 | ## Define the Metadata ## 40 | 41 | ### Assignee ### 42 | - [ ] Select **engineer(s)** or **no engineer** required 43 | - [ ] Select **scientist(s)** or **no scientist** required 44 | 45 | ### Labels ### 46 | - [ ] Review default **alert** labels 47 | - [ ] Select **component(s)** 48 | - [ ] Select **priority** 49 | - [ ] Select **requestor(s)** 50 | 51 | ### Milestone and Projects ### 52 | - [ ] Select **Milestone** as the next bugfix version 53 | - [ ] Select **Coordinated METplus-X.Y Support** project for support of the current coordinated release 54 | - [ ] Select **METcalcpy-X.Y.Z Development** project for development toward the next official release 55 | 56 | ## Define Related Issue(s) ## 57 | Consider the impact to the other METplus components. 58 | - [ ] [METplus](https://github.com/dtcenter/METplus/issues/new/choose), [MET](https://github.com/dtcenter/MET/issues/new/choose), [METdataio](https://github.com/dtcenter/METdataio/issues/new/choose), [METviewer](https://github.com/dtcenter/METviewer/issues/new/choose), [METexpress](https://github.com/dtcenter/METexpress/issues/new/choose), [METcalcpy](https://github.com/dtcenter/METcalcpy/issues/new/choose), [METplotpy](https://github.com/dtcenter/METplotpy/issues/new/choose) 59 | 60 | ## Bugfix Checklist ## 61 | See the [METplus Workflow](https://metplus.readthedocs.io/en/latest/Contributors_Guide/github_workflow.html) for details. 62 | - [ ] Complete the issue definition above, including the **Time Estimate** and **Funding Source**. 63 | - [ ] Fork this repository or create a branch of **main_\**. 64 | Branch name: `bugfix__main__` 65 | - [ ] Fix the bug and test your changes. 66 | - [ ] Add/update log messages for easier debugging. 67 | - [ ] Add/update unit tests. 68 | - [ ] Add/update documentation. 69 | - [ ] Add any new Python packages to the [METplus Components Python Requirements](https://metplus.readthedocs.io/en/develop/Users_Guide/appendixA.html#metplus-components-python-packages) table. 70 | - [ ] Push local changes to GitHub. 71 | - [ ] Submit a pull request to merge into **main_\**. 72 | Pull request: `bugfix main_ ` 73 | - [ ] Define the pull request metadata, as permissions allow. 74 | Select: **Reviewer(s)** and **Development** issue 75 | Select: **Milestone** as the next bugfix version 76 | Select: Coordinated METplus-X.Y Support project for support of the current coordinated release 77 | - [ ] Iterate until the reviewer(s) accept and merge your changes. 78 | - [ ] Delete your fork or branch. 79 | - [ ] Complete the steps above to fix the bug on the **develop** branch. 80 | Branch name: `bugfix__develop_` 81 | Pull request: `bugfix develop ` 82 | Select: **Reviewer(s)** and **Development** issue 83 | Select: **Milestone** as the next official version 84 | Select: **METcalcpy-X.Y.Z Development** project for development toward the next official release 85 | - [ ] Close this issue. 86 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/enhancement_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Enhancement request 3 | about: Improve something that it's currently doing 4 | title: '' 5 | labels: 'alert: NEED ACCOUNT KEY, alert: NEED MORE DEFINITION, alert: NEED CYCLE ASSIGNMENT, type: enhancement' 6 | assignees: '' 7 | 8 | --- 9 | 10 | *Replace italics below with details for this issue.* 11 | 12 | ## Describe the Enhancement ## 13 | *Provide a description of the enhancement request here.* 14 | 15 | ### Time Estimate ### 16 | *Estimate the amount of work required here.* 17 | *Issues should represent approximately 1 to 3 days of work.* 18 | 19 | ### Sub-Issues ### 20 | Consider breaking the enhancement down into sub-issues. 21 | - [ ] *Add a checkbox for each sub-issue here.* 22 | 23 | ### Relevant Deadlines ### 24 | *List relevant project deadlines here or state NONE.* 25 | 26 | ### Funding Source ### 27 | *Define the source of funding and account keys here or state NONE.* 28 | 29 | ## Define the Metadata ## 30 | 31 | ### Assignee ### 32 | - [ ] Select **engineer(s)** or **no engineer** required 33 | - [ ] Select **scientist(s)** or **no scientist** required 34 | 35 | ### Labels ### 36 | - [ ] Review default **alert** labels 37 | - [ ] Select **component(s)** 38 | - [ ] Select **priority** 39 | - [ ] Select **requestor(s)** 40 | 41 | ### Milestone and Projects ### 42 | - [ ] Select **Milestone** as a **METcalcpy-X.Y.Z** version, **Consider for Next Release**, or **Backlog of Development Ideas** 43 | - [ ] For a **METcalcpy-X.Y.Z** version, select the **METcalcpy-X.Y.Z Development** project 44 | 45 | ## Define Related Issue(s) ## 46 | Consider the impact to the other METplus components. 47 | - [ ] [METplus](https://github.com/dtcenter/METplus/issues/new/choose), [MET](https://github.com/dtcenter/MET/issues/new/choose), [METdataio](https://github.com/dtcenter/METdataio/issues/new/choose), [METviewer](https://github.com/dtcenter/METviewer/issues/new/choose), [METexpress](https://github.com/dtcenter/METexpress/issues/new/choose), [METcalcpy](https://github.com/dtcenter/METcalcpy/issues/new/choose), [METplotpy](https://github.com/dtcenter/METplotpy/issues/new/choose) 48 | 49 | ## Enhancement Checklist ## 50 | See the [METplus Workflow](https://metplus.readthedocs.io/en/latest/Contributors_Guide/github_workflow.html) for details. 51 | - [ ] Complete the issue definition above, including the **Time Estimate** and **Funding Source**. 52 | - [ ] Fork this repository or create a branch of **develop**. 53 | Branch name: `feature__` 54 | - [ ] Complete the development and test your changes. 55 | - [ ] Add/update log messages for easier debugging. 56 | - [ ] Add/update unit tests. 57 | - [ ] Add/update documentation. 58 | - [ ] Add any new Python packages to the [METplus Components Python Requirements](https://metplus.readthedocs.io/en/develop/Users_Guide/appendixA.html#metplus-components-python-packages) table. 59 | - [ ] Push local changes to GitHub. 60 | - [ ] Submit a pull request to merge into **develop**. 61 | Pull request: `feature ` 62 | - [ ] Define the pull request metadata, as permissions allow. 63 | Select: **Reviewer(s)** and **Development** issue 64 | Select: **Milestone** as the next official version 65 | Select: **METcalcpy-X.Y.Z Development** project for development toward the next official release 66 | - [ ] Iterate until the reviewer(s) accept and merge your changes. 67 | - [ ] Delete your fork or branch. 68 | - [ ] Close this issue. 69 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/new_feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: New feature request 3 | about: Make it do something new 4 | title: '' 5 | labels: 'alert: NEED ACCOUNT KEY, alert: NEED MORE DEFINITION, alert: NEED CYCLE ASSIGNMENT, type: new feature' 6 | assignees: '' 7 | 8 | --- 9 | 10 | *Replace italics below with details for this issue.* 11 | 12 | ## Describe the New Feature ## 13 | *Provide a description of the new feature request here.* 14 | 15 | ### Acceptance Testing ### 16 | *List input data types and sources.* 17 | *Describe tests required for new functionality.* 18 | 19 | ### Time Estimate ### 20 | *Estimate the amount of work required here.* 21 | *Issues should represent approximately 1 to 3 days of work.* 22 | 23 | ### Sub-Issues ### 24 | Consider breaking the new feature down into sub-issues. 25 | - [ ] *Add a checkbox for each sub-issue here.* 26 | 27 | ### Relevant Deadlines ### 28 | *List relevant project deadlines here or state NONE.* 29 | 30 | ### Funding Source ### 31 | *Define the source of funding and account keys here or state NONE.* 32 | 33 | ## Define the Metadata ## 34 | 35 | ### Assignee ### 36 | - [ ] Select **engineer(s)** or **no engineer** required 37 | - [ ] Select **scientist(s)** or **no scientist** required 38 | 39 | ### Labels ### 40 | - [ ] Review default **alert** labels 41 | - [ ] Select **component(s)** 42 | - [ ] Select **priority** 43 | - [ ] Select **requestor(s)** 44 | 45 | ### Milestone and Projects ### 46 | - [ ] Select **Milestone** as a **METcalcpy-X.Y.Z** version, **Consider for Next Release**, or **Backlog of Development Ideas** 47 | - [ ] For a **METcalcpy-X.Y.Z** version, select the **METcalcpy-X.Y.Z Development** project 48 | 49 | ## Define Related Issue(s) ## 50 | Consider the impact to the other METplus components. 51 | - [ ] [METplus](https://github.com/dtcenter/METplus/issues/new/choose), [MET](https://github.com/dtcenter/MET/issues/new/choose), [METdataio](https://github.com/dtcenter/METdataio/issues/new/choose), [METviewer](https://github.com/dtcenter/METviewer/issues/new/choose), [METexpress](https://github.com/dtcenter/METexpress/issues/new/choose), [METcalcpy](https://github.com/dtcenter/METcalcpy/issues/new/choose), [METplotpy](https://github.com/dtcenter/METplotpy/issues/new/choose) 52 | 53 | ## New Feature Checklist ## 54 | See the [METplus Workflow](https://metplus.readthedocs.io/en/latest/Contributors_Guide/github_workflow.html) for details. 55 | - [ ] Complete the issue definition above, including the **Time Estimate** and **Funding source**. 56 | - [ ] Fork this repository or create a branch of **develop**. 57 | Branch name: `feature__` 58 | - [ ] Complete the development and test your changes. 59 | - [ ] Add/update log messages for easier debugging. 60 | - [ ] Add/update unit tests. 61 | - [ ] Add/update documentation. 62 | - [ ] Add any new Python packages to the [METplus Components Python Requirements](https://metplus.readthedocs.io/en/develop/Users_Guide/appendixA.html#metplus-components-python-packages) table. 63 | - [ ] Push local changes to GitHub. 64 | - [ ] Submit a pull request to merge into **develop**. 65 | Pull request: `feature ` 66 | - [ ] Define the pull request metadata, as permissions allow. 67 | Select: **Reviewer(s)** and **Development** issue 68 | Select: **Milestone** as the next official version 69 | Select: **METcalcpy-X.Y.Z Development** project for development toward the next official release 70 | - [ ] Iterate until the reviewer(s) accept and merge your changes. 71 | - [ ] Delete your fork or branch. 72 | - [ ] Close this issue. 73 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/sub-issue.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Sub-Issue 3 | about: Break an issue down into smaller parts 4 | title: '' 5 | labels: 'alert: NEED ACCOUNT KEY, alert: NEED MORE DEFINITION, alert: NEED CYCLE ASSIGNMENT, type: sub-issue' 6 | assignees: '' 7 | 8 | --- 9 | 10 | This is a sub-issue of #*List the parent issue number here*. 11 | 12 | ## Describe the Sub-Issue ## 13 | *Provide a description of the sub-issue here.* 14 | 15 | ### Time Estimate ### 16 | *Estimate the amount of work required here.* 17 | *Issues should represent approximately 1 to 3 days of work.* 18 | 19 | ## Define the Metadata ## 20 | 21 | ### Assignee ### 22 | - [ ] Select **engineer(s)** or **no engineer** required 23 | - [ ] Select **scientist(s)** or **no scientist** required 24 | 25 | ### Labels ### 26 | - [ ] Review default **alert** labels 27 | - [ ] Select **component(s)** 28 | - [ ] Select **priority** 29 | - [ ] Select **requestor(s)** 30 | 31 | ### Milestone and Projects ### 32 | - [ ] Select **Milestone** as a **METcalcpy-X.Y.Z** version, **Consider for Next Release**, or **Backlog of Development Ideas** 33 | - [ ] For a **METcalcpy-X.Y.Z** version, select the **METcalcpy-X.Y.Z Development** project 34 | 35 | 36 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/task.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Task 3 | about: Describe something that needs to be done 4 | title: '' 5 | labels: 'alert: NEED ACCOUNT KEY, alert: NEED MORE DEFINITION, alert: NEED CYCLE ASSIGNMENT, type: task' 6 | assignees: '' 7 | 8 | --- 9 | 10 | *Replace italics below with details for this issue.* 11 | 12 | ## Describe the Task ## 13 | *Provide a description of the task here.* 14 | 15 | ### Time Estimate ### 16 | *Estimate the amount of work required here.* 17 | *Issues should represent approximately 1 to 3 days of work.* 18 | 19 | ### Sub-Issues ### 20 | Consider breaking the task down into sub-issues. 21 | - [ ] *Add a checkbox for each sub-issue here.* 22 | 23 | ### Relevant Deadlines ### 24 | *List relevant project deadlines here or state NONE.* 25 | 26 | ### Funding Source ### 27 | *Define the source of funding and account keys here or state NONE.* 28 | 29 | ## Define the Metadata ## 30 | 31 | ### Assignee ### 32 | - [ ] Select **engineer(s)** or **no engineer** required 33 | - [ ] Select **scientist(s)** or **no scientist** required 34 | 35 | ### Labels ### 36 | - [ ] Review default **alert** labels 37 | - [ ] Select **component(s)** 38 | - [ ] Select **priority** 39 | - [ ] Select **requestor(s)** 40 | 41 | ### Milestone and Projects ### 42 | - [ ] Select **Milestone** as a **METcalcpy-X.Y.Z** version, **Consider for Next Release**, or **Backlog of Development Ideas** 43 | - [ ] For a **METcalcpy-X.Y.Z** version, select the **METcalcpy-X.Y.Z Development** project 44 | 45 | ## Define Related Issue(s) ## 46 | Consider the impact to the other METplus components. 47 | - [ ] [METplus](https://github.com/dtcenter/METplus/issues/new/choose), [MET](https://github.com/dtcenter/MET/issues/new/choose), [METdataio](https://github.com/dtcenter/METdataio/issues/new/choose), [METviewer](https://github.com/dtcenter/METviewer/issues/new/choose), [METexpress](https://github.com/dtcenter/METexpress/issues/new/choose), [METcalcpy](https://github.com/dtcenter/METcalcpy/issues/new/choose), [METplotpy](https://github.com/dtcenter/METplotpy/issues/new/choose) 48 | 49 | ## Task Checklist ## 50 | See the [METplus Workflow](https://metplus.readthedocs.io/en/latest/Contributors_Guide/github_workflow.html) for details. 51 | - [ ] Complete the issue definition above, including the **Time Estimate** and **Funding Source**. 52 | - [ ] Fork this repository or create a branch of **develop**. 53 | Branch name: `feature__` 54 | - [ ] Complete the development and test your changes. 55 | - [ ] Add/update log messages for easier debugging. 56 | - [ ] Add/update unit tests. 57 | - [ ] Add/update documentation. 58 | - [ ] Add any new Python packages to the [METplus Components Python Requirements](https://metplus.readthedocs.io/en/develop/Users_Guide/appendixA.html#metplus-components-python-packages) table. 59 | - [ ] Push local changes to GitHub. 60 | - [ ] Submit a pull request to merge into **develop**. 61 | Pull request: `feature ` 62 | - [ ] Define the pull request metadata, as permissions allow. 63 | Select: **Reviewer(s)** and **Development** issue 64 | Select: **Milestone** as the next official version 65 | Select: **METcalcpy-X.Y.Z Development** project for development toward the next official release 66 | - [ ] Iterate until the reviewer(s) accept and merge your changes. 67 | - [ ] Delete your fork or branch. 68 | - [ ] Close this issue. 69 | -------------------------------------------------------------------------------- /.github/jobs/build_documentation.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | # path to docs directory relative to top level of repository 4 | # $GITHUB_WORKSPACE is set if the actions/checkout@v2 action is run first 5 | 6 | DOCS_DIR=${GITHUB_WORKSPACE}/docs 7 | 8 | # run Make to build the documentation and return to previous directory 9 | cd ${DOCS_DIR} 10 | make clean html 11 | cd - 12 | 13 | # copy HTML output into directory to create an artifact 14 | mkdir -p artifact/documentation 15 | cp -r ${DOCS_DIR}/_build/html/* artifact/documentation 16 | 17 | # check if the warnings.log file is empty 18 | # Copy it into the artifact and documeentation directories 19 | # so it will be available in the artifacts 20 | warning_file=${DOCS_DIR}/_build/warnings.log 21 | if [ -s $warning_file ]; then 22 | cp -r ${DOCS_DIR}/_build/warnings.log artifact/doc_warnings.log 23 | cp artifact/doc_warnings.log artifact/documentation 24 | exit 1 25 | fi 26 | -------------------------------------------------------------------------------- /.github/jobs/configure_sonarqube.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Constants 4 | SONAR_PROPERTIES_DIR=internal/scripts/sonarqube 5 | SONAR_PROPERTIES=sonar-project.properties 6 | 7 | # Check that this is being run from the top-level METcalcpy directory 8 | if [ ! -e $SONAR_PROPERTIES_DIR/$SONAR_PROPERTIES ]; then 9 | echo "ERROR: ${0} -> must be run from the top-level METcalcpy directory" 10 | exit 1 11 | fi 12 | 13 | # Check required environment variables 14 | if [ -z ${SOURCE_BRANCH+x} ]; then 15 | echo "ERROR: ${0} -> \$SOURCE_BRANCH not defined!" 16 | exit 1 17 | fi 18 | if [ -z ${WD_REFERENCE_BRANCH+x} ]; then 19 | echo "ERROR: ${0} -> \$WD_REFERENCE_BRANCH not defined!" 20 | exit 1 21 | fi 22 | if [ -z ${SONAR_HOST_URL+x} ]; then 23 | echo "ERROR: ${0} -> \$SONAR_HOST_URL not defined!" 24 | exit 1 25 | fi 26 | if [ -z ${SONAR_TOKEN+x} ]; then 27 | echo "ERROR: ${0} -> \$SONAR_TOKEN not defined!" 28 | exit 1 29 | fi 30 | 31 | # Define the version string 32 | SONAR_PROJECT_VERSION=$(cat docs/version | cut -d'=' -f2 | tr -d '" ') 33 | 34 | # 35 | # Define the $SONAR_REFERENCE_BRANCH as the 36 | # - Target of any requests 37 | # - Manual setting for workflow dispatch 38 | # - Source branch for any pushes (e.g. develop) 39 | # 40 | if [ "$GITHUB_EVENT_NAME" == "pull_request" ]; then 41 | export SONAR_REFERENCE_BRANCH=$GITHUB_BASE_REF 42 | elif [ "$GITHUB_EVENT_NAME" == "workflow_dispatch" ]; then 43 | export SONAR_REFERENCE_BRANCH=$WD_REFERENCE_BRANCH 44 | else 45 | export SONAR_REFERENCE_BRANCH=$SOURCE_BRANCH 46 | fi 47 | 48 | # Configure the sonar-project.properties 49 | [ -e $SONAR_PROPERTIES ] && rm $SONAR_PROPERTIES 50 | sed -e "s|SONAR_PROJECT_VERSION|$SONAR_PROJECT_VERSION|" \ 51 | -e "s|SONAR_HOST_URL|$SONAR_HOST_URL|" \ 52 | -e "s|SONAR_TOKEN|$SONAR_TOKEN|" \ 53 | -e "s|SONAR_BRANCH_NAME|$SOURCE_BRANCH|" \ 54 | $SONAR_PROPERTIES_DIR/$SONAR_PROPERTIES > $SONAR_PROPERTIES 55 | 56 | # Define new code when the source and reference branches differ 57 | if [ "$SOURCE_BRANCH" != "$SONAR_REFERENCE_BRANCH" ]; then 58 | echo "sonar.newCode.referenceBranch=${SONAR_REFERENCE_BRANCH}" >> $SONAR_PROPERTIES 59 | fi 60 | 61 | echo "Contents of the $SONAR_PROPERTIES file:" 62 | cat $SONAR_PROPERTIES 63 | 64 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | ## Pull Request Testing ## 2 | 3 | - [ ] Describe testing already performed for these changes:
4 | 5 | - [ ] Recommend testing for the reviewer(s) to perform, including the location of input datasets, and any additional instructions:
6 | 7 | - [ ] Do these changes include sufficient documentation updates, ensuring that no errors or warnings exist in the build of the documentation? **[Yes or No]** 8 | 9 | - [ ] Do these changes include sufficient testing updates? **[Yes or No]** 10 | 11 | - [ ] Will this PR result in changes to the test suite? **[Yes or No]**
12 | If **yes**, describe the new output and/or changes to the existing output:
13 | 14 | - [ ] Do these changes introduce new SonarQube findings? **[Yes or No]**
15 | If **yes**, please describe: 16 | 17 | - [ ] Please complete this pull request review by **[Fill in date]**.
18 | 19 | ## Pull Request Checklist ## 20 | See the [METplus Workflow](https://metplus.readthedocs.io/en/latest/Contributors_Guide/github_workflow.html) for details. 21 | - [ ] Add any new Python packages to the [METplus Components Python Requirements](https://metplus.readthedocs.io/en/develop/Users_Guide/appendixA.html#metplus-components-python-packages) table. 22 | - [ ] Review the source issue metadata (required labels, projects, and milestone). 23 | - [ ] Complete the PR definition above. 24 | - [ ] Ensure the PR title matches the feature or bugfix branch name. 25 | - [ ] Define the PR metadata, as permissions allow. 26 | Select: **Reviewer(s)** and **Development** issue 27 | Select: **Milestone** as the version that will include these changes 28 | Select: **Coordinated METplus-X.Y Support** project for bugfix releases or **METcalcpy-X.Y.Z Development** project for official releases 29 | - [ ] After submitting the PR, select the :gear: icon in the **Development** section of the right hand sidebar. Search for the issue that this PR will close and select it, if it is not already selected. 30 | - [ ] After the PR is approved, merge your changes. If permissions do not allow this, request that the reviewer do the merge. 31 | - [ ] Close the linked issue and delete your feature or bugfix branch from GitHub. 32 | -------------------------------------------------------------------------------- /.github/workflows/documentation.yaml: -------------------------------------------------------------------------------- 1 | name: Documentation 2 | on: 3 | push: 4 | paths: 5 | - docs/** 6 | branches: 7 | - develop 8 | - develop-ref 9 | - feature_* 10 | - main_* 11 | - bugfix_* 12 | pull_request: 13 | types: [opened, reopened, synchronize] 14 | 15 | jobs: 16 | documentation: 17 | name: Build Documentation 18 | runs-on: ubuntu-latest 19 | steps: 20 | - uses: actions/checkout@v4 21 | - uses: actions/setup-python@v5 22 | with: 23 | python-version: '3.10' 24 | - name: Install dependencies 25 | run: | 26 | python -m pip install --upgrade python-dateutil requests sphinx \ 27 | sphinx-gallery Pillow sphinx_rtd_theme pandas xarray 28 | python -m pip install -r docs/requirements.txt 29 | - name: Build docs 30 | run: ./.github/jobs/build_documentation.sh 31 | - uses: actions/upload-artifact@v4 32 | if: always() 33 | with: 34 | name: documentation 35 | path: artifact/documentation 36 | - uses: actions/upload-artifact@v4 37 | if: failure() 38 | with: 39 | name: documentation_warnings.log 40 | path: artifact/doc_warnings.log 41 | if-no-files-found: ignore 42 | -------------------------------------------------------------------------------- /.github/workflows/release-checksum.yaml: -------------------------------------------------------------------------------- 1 | name: Add checksum to release 2 | 3 | on: 4 | release: 5 | types: 6 | - published 7 | 8 | jobs: 9 | add-checksum: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: dtcenter/metplus-action-release-checksum@v2 13 | with: 14 | token: ${{ secrets.METPLUS_BOT_TOKEN }} 15 | -------------------------------------------------------------------------------- /.github/workflows/sonarqube.yaml: -------------------------------------------------------------------------------- 1 | name: SonarQube Scan 2 | 3 | # Run SonarQube for Pull Requests and changes to the develop and main_vX.Y branches 4 | 5 | on: 6 | 7 | # Trigger analysis for pushes to develop and main_vX.Y branches 8 | push: 9 | branches: 10 | - develop 11 | - 'main_v**' 12 | paths-ignore: 13 | - 'docs/**' 14 | - '.github/pull_request_template.md' 15 | - '.github/ISSUE_TEMPLATE/**' 16 | - '**/README.md' 17 | - '**/LICENSE.md' 18 | 19 | # Trigger analysis for pull requests to develop and main_vX.Y branches 20 | pull_request: 21 | types: [opened, synchronize, reopened] 22 | branches: 23 | - develop 24 | - 'main_v**' 25 | paths-ignore: 26 | - 'docs/**' 27 | - '.github/pull_request_template.md' 28 | - '.github/ISSUE_TEMPLATE/**' 29 | - '**/README.md' 30 | - '**/LICENSE.md' 31 | 32 | workflow_dispatch: 33 | inputs: 34 | reference_branch: 35 | description: 'Reference Branch' 36 | default: develop 37 | type: string 38 | 39 | jobs: 40 | sonarqube: 41 | name: SonarQube Scan 42 | runs-on: ubuntu-latest 43 | 44 | steps: 45 | 46 | - uses: actions/checkout@v4 47 | with: 48 | # Disable shallow clones for better analysis 49 | fetch-depth: 0 50 | 51 | - name: Set up Python 3.10 52 | uses: actions/setup-python@v5 53 | with: 54 | python-version: "3.10" 55 | - name: Install dependencies 56 | run: | 57 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi 58 | # try installing scikit-image explicitly. Using requirements.txt versions doesn't seem to build correctly 59 | python -m pip install -U scikit-image 60 | # install the coverage.py code coverage tool 61 | python3 -m pip install pytest-cov 62 | - name: Test with pytest 63 | # these are tests that don't have external dependencies (i.e. need to run on Linux test hosts where large datasets reside or require exact machine type for 64 | # image comparisons to work, etc.) 65 | run: | 66 | coverage run -m pytest 67 | coverage report -m 68 | coverage xml 69 | 70 | - name: Get branch name 71 | id: get_branch_name 72 | run: echo branch_name=${GITHUB_REF#refs/heads/} >> $GITHUB_OUTPUT 73 | 74 | - name: Configure SonarQube 75 | run: .github/jobs/configure_sonarqube.sh 76 | env: 77 | SOURCE_BRANCH: ${{ steps.get_branch_name.outputs.branch_name }} 78 | WD_REFERENCE_BRANCH: ${{ github.event.inputs.reference_branch }} 79 | SONAR_HOST_URL: ${{ secrets.SONAR_HOST_URL }} 80 | SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} 81 | 82 | - name: SonarQube Scan 83 | uses: sonarsource/sonarqube-scan-action@master 84 | env: 85 | SONAR_HOST_URL: ${{ secrets.SONAR_HOST_URL }} 86 | SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} 87 | 88 | - name: SonarQube Quality Gate check 89 | id: sonarqube-quality-gate-check 90 | uses: sonarsource/sonarqube-quality-gate-action@master 91 | # Force to fail step after specific time. 92 | timeout-minutes: 5 93 | env: 94 | SONAR_HOST_URL: ${{ secrets.SONAR_HOST_URL }} 95 | SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} 96 | -------------------------------------------------------------------------------- /.github/workflows/trigger_metplus.yaml: -------------------------------------------------------------------------------- 1 | name: Trigger METplus Workflow 2 | 3 | on: 4 | push: 5 | branches: 6 | - develop 7 | - 'main_v[0-9]+.[0-9]+' 8 | paths-ignore: 9 | - 'docs/**' 10 | - '.github/pull_request_template.md' 11 | - '.github/ISSUE_TEMPLATE/**' 12 | - '**/README.md' 13 | - '**/LICENSE.md' 14 | 15 | jobs: 16 | trigger_metplus: 17 | name: Trigger METplus testing workflow 18 | runs-on: ubuntu-latest 19 | steps: 20 | - uses: dtcenter/metplus-action-trigger-use-cases@v1 21 | with: 22 | token: ${{ secrets.METPLUS_BOT_TOKEN }} 23 | -------------------------------------------------------------------------------- /.github/workflows/unit_tests.yaml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies and run tests for a variety of Python versions 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 3 | 4 | name: Unit tests 5 | 6 | on: 7 | push: 8 | branches: 9 | - develop 10 | - develop-ref 11 | - feature_* 12 | - main_* 13 | - bugfix_* 14 | - test_* 15 | paths-ignore: 16 | - 'docs/**' 17 | - '.github/pull_request_template.md' 18 | - '.github/ISSUE_TEMPLATE/**' 19 | - '**/README.md' 20 | - '**/LICENSE.md' 21 | 22 | pull_request: 23 | types: [opened, reopened, synchronize] 24 | 25 | 26 | jobs: 27 | build: 28 | 29 | runs-on: ubuntu-latest 30 | strategy: 31 | fail-fast: false 32 | matrix: 33 | python-version: ["3.10"] 34 | 35 | steps: 36 | - uses: actions/checkout@v4 37 | - name: Set up Python ${{ matrix.python-version }} 38 | uses: actions/setup-python@v5 39 | with: 40 | python-version: ${{ matrix.python-version }} 41 | - name: Install dependencies 42 | run: | 43 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi 44 | # try installing scikit-image explicitly. Using requirements.txt versions doesn't seem to build correctly 45 | python -m pip install -U scikit-image 46 | # install the coverage.py code coverage tool 47 | python3 -m pip install pytest-cov 48 | - name: Test with pytest 49 | # these are tests that don't have external dependencies (i.e. need to run on Linux test hosts where large datasets reside or require exact machine type for 50 | # image comparisons to work, etc.) 51 | run: | 52 | coverage run -m pytest 53 | coverage report -m 54 | coverage html 55 | 56 | - name: Archive code coverage results 57 | uses: actions/upload-artifact@v4 58 | with: 59 | name: code-coverage-report 60 | path: htmlcov/index.html 61 | 62 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # tilda files generated by emacs 2 | *~ 3 | 4 | # temp files surrounded by # 5 | *#*# 6 | 7 | __pycache__/ 8 | 9 | # sphinx output 10 | docs/_build 11 | docs/diag_ref/generated 12 | docs/sg_execution_times.rst 13 | docs/auto_examples 14 | 15 | /metcalcpy.egg-info/ 16 | 17 | .coverage 18 | coverage.xml 19 | -------------------------------------------------------------------------------- /.idea/.gitignore: -------------------------------------------------------------------------------- 1 | # Default ignored files 2 | /shelf/ 3 | /workspace.xml 4 | # Editor-based HTTP Client requests 5 | /httpRequests/ 6 | # Datasource local storage ignored files 7 | /dataSources/ 8 | /dataSources.local.xml 9 | /sonarlint* -------------------------------------------------------------------------------- /.idea/METcalcpy.iml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 12 | 13 | 15 | -------------------------------------------------------------------------------- /.idea/inspectionProfiles/Project_Default.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 19 | -------------------------------------------------------------------------------- /.idea/inspectionProfiles/profiles_settings.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 6 | -------------------------------------------------------------------------------- /.idea/modules.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /.idea/vcs.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yaml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | # Build all formats (htmlzip, pdf, epub) 9 | #formats: all 10 | formats: [] 11 | 12 | build: 13 | os: ubuntu-22.04 14 | tools: 15 | python: "3.10" 16 | 17 | # Optionally set the version of Python and requirements required to build your 18 | # docs 19 | python: 20 | install: 21 | - requirements: docs/requirements.txt 22 | 23 | # Configuration for Sphinx documentation (this is the default documentation 24 | # type) 25 | sphinx: 26 | builder: html 27 | configuration: docs/conf.py 28 | fail_on_warning: true 29 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "IDX.aI.enableInlineCompletion": true, 3 | "IDX.aI.enableCodebaseIndexing": true 4 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # METcalcpy 2 | Provides libraries for the following: calculation of statistics, pre-processing input, and performing diagnostics for METviewer, 3 | METexpress, and the plotting scripts in METplotpy. 4 | 5 | Please see the [METcalcpy User's Guide](https://metcalcpy.readthedocs.io/en/latest) for more information. 6 | 7 | Support for the METplus components is provided through the 8 | [METplus Discussions](https://github.com/dtcenter/METplus/discussions) forum. 9 | Users are welcome and encouraged to answer or address each other's questions there! For more 10 | information, please read 11 | "[Welcome to the METplus Components Discussions](https://github.com/dtcenter/METplus/discussions/939)". 12 | 13 | For information about the support provided for releases, see our [Release Support Policy](https://metplus.readthedocs.io/en/develop/Release_Guide/index.html#release-support-policy). 14 | 15 | Instructions for installing the metcalcpy package locally 16 | --------------------------------------------------------- 17 | - activate your conda environment (i.e. 'conda activate your-conda-env-name') 18 | - from within your active conda environment, cd to the METcalcpy/ directory, where you will see the setup.py script 19 | - from this directory, run the following on the command line: pip install -e . 20 | - the -e option stands for editable, which is useful in that you can update your METcalcpy/metcalcpy source without reinstalling it 21 | - the . indicates that you should search the current directory for the setup.py script 22 | 23 | - use metcalcpy package via import statement: 24 | - Examples: 25 | 26 | - import metcalcpy.util.ctc_statistics as cstats 27 | - to use the functions in the ctc_statistics module 28 | 29 | Instructions for installing the metcalcpy package from PyPI 30 | ----------------------------------------------------------- 31 | 32 | - activate your Python 3.10+ conda environment 33 | - run the following from the command line: 34 | - pip install metcalcpy==x.y.z where x.y.z is the version number of interest 35 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | [ -d $(BUILDDIR) ] || mkdir -p $(BUILDDIR) 21 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -w "$(BUILDDIR)/warnings.log" 22 | 23 | clean: 24 | rm -rf $(BUILDDIR)/* $(SOURCEDIR)/examples 25 | -------------------------------------------------------------------------------- /docs/README: -------------------------------------------------------------------------------- 1 | All user and contributor documentation resides in this directory. 2 | -------------------------------------------------------------------------------- /docs/Users_Guide/figure/FileReaderIO.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dtcenter/METcalcpy/b83da573f6fe46b1c4cfcefa51409a30e23996fb/docs/Users_Guide/figure/FileReaderIO.png -------------------------------------------------------------------------------- /docs/Users_Guide/figure/weighting_wind_speed_difficulty_index.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dtcenter/METcalcpy/b83da573f6fe46b1c4cfcefa51409a30e23996fb/docs/Users_Guide/figure/weighting_wind_speed_difficulty_index.png -------------------------------------------------------------------------------- /docs/Users_Guide/index.rst: -------------------------------------------------------------------------------- 1 | ############ 2 | User's Guide 3 | ############ 4 | 5 | **Foreword: A note to METcalcpy users** 6 | 7 | This User's guide is provided as an aid to users of METcalcpy. METcalcpy is a 8 | Python version of the statistics calculation functionality of METviewer, 9 | METexpress, plotting packages in METplotpy and is a stand-alone package for any 10 | other application. It is also a component of the unified METplus verification 11 | framework. More details about METplus can be found on the 12 | `METplus website `_. 13 | 14 | It is important to note here that METcalcpy is an evolving software package. 15 | This documentation describes the |release| release dated |release_date|. 16 | Intermediate releases may include bug fixes. METcalcpy is also able to accept 17 | new modules contributed by the community. If you have code you would like to 18 | contribute, we will gladly consider your contribution. Please create a post 19 | in the 20 | `METplus GitHub Discussions Forum `_. 21 | We will then determine if we will be able to 22 | include the contribution in a future version. 23 | 24 | **Model Evaluation Tools Calc Py (METcalcpy) TERMS OF USE - IMPORTANT!** 25 | 26 | Copyright |copyright| 27 | Licensed under the Apache License, Version 2.0 (the "License"); 28 | You may not use this file except in compliance with the License. 29 | 30 | You may obtain a copy of the License at 31 | 32 | http://www.apache.org/licenses/LICENSE-2.0 33 | 34 | 35 | Unless required by applicable law or agreed to in writing, software distributed under 36 | the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 37 | ANY KIND, either express or implied. See the License for the specific language 38 | governing permissions and limitations under the License. 39 | 40 | .. _citations: 41 | 42 | **Citations** 43 | 44 | The citation for this User's Guide should be: 45 | 46 | |author_list|, |release_year|: The METcalcpy Version |version| User's Guide. 47 | Developmental Testbed Center. 48 | Available at: https://github.com/dtcenter/METcalcpy/releases. 49 | 50 | **Acknowledgments** 51 | 52 | We thank all of the METplus sponsors including: DTC partners (NOAA, NCAR, 53 | USAF, and NSF), along with NOAA/Office of Atmospheric Research (OAR), 54 | NOAA/National Weather Service, NOAA/Joint Technology Transfer Program (JTTI), 55 | NOAA/Subseasonal to Seasonal (S2S) Project, NOAA/Unified Forecast System 56 | Research to Operations Project (UFS R2O), Met Office and the Naval Research 57 | Laboratory (NRL). Thanks also go to the staff at the Developmental Testbed 58 | Center for their help, advice, and many types of support. Finally, the 59 | National Center for Atmospheric Research (NCAR) is sponsored by NSF. 60 | 61 | .. toctree:: 62 | :titlesonly: 63 | :numbered: 4 64 | 65 | installation 66 | logging 67 | vertical_interpolation 68 | difficulty_index 69 | aggregation 70 | write_mpr 71 | release-notes 72 | ../diag_ref/index 73 | 74 | **Indices and tables** 75 | 76 | * :ref:`genindex` 77 | * :ref:`search` 78 | 79 | -------------------------------------------------------------------------------- /docs/Users_Guide/release-notes.rst: -------------------------------------------------------------------------------- 1 | ***************************** 2 | METcalcpy Release Information 3 | ***************************** 4 | 5 | When applicable, release notes are followed by the GitHub issue number which 6 | describes the bugfix, enhancement, or new feature: `METcalcpy GitHub issues. `_ 7 | 8 | METcalcpy Release Notes 9 | ======================= 10 | 11 | METcalcpy Velsion 3.0.0 release notes (20241218) 12 | ---------------------------------------------------- 13 | 14 | 15 | .. dropdown:: New Functionality 16 | 17 | * **Add updates to MPR writer and fix bugs for stratosphere** (`#385 `_) 18 | 19 | .. dropdown:: Enhancements 20 | 21 | * Improve logging for 5 STIGs (`METplus-Internal#46 `_) 22 | * **Enhance METcalcpy to use the TOTAL_DIR column when aggregate statistics wind direction statistics in the VL1L2, VAL1L2, and VCNT columns** (`#384 `_) 23 | * Add calculation for Terrestrial Coupling Index (`#364 `_) 24 | * Enhance aggregate statistics for ECNT,VL1L2,VAL1L2 and VCNT (`#361 `_) 25 | * **Create aggregation support for MET .stat output** (`#325 `_) 26 | 27 | .. dropdown:: Internal 28 | 29 | * Update GitHub issue and pull request templates to reflect the current development workflow details (`#326 `_) 30 | * Consider using only .yml or only .yaml extensions (`#349 `_) 31 | * Code coverage statistics (`#54 `_) 32 | * Develop SonarQube capabilities (`#367 `_) 33 | * Add GitHub action for SonarQube (`#366 `_) 34 | * Updated Python requirements.txt (`#355 `_) 35 | * Modified Python requirements section of User's Guide (`#352 `_) 36 | * Update GitHub actions workflows to switch from node 16 to node 20 (`#345 `_) 37 | * Change second person references to third (`#315 `_) 38 | * Enhanced documentation for Difficulty index (`#332 `_) 39 | 40 | .. dropdown:: Bugfixes 41 | 42 | * Bugfix missing safe_log import (`#413 `_) 43 | * Bugfix: MODE CSI calculations result in spurious results (`#360 `_) 44 | * Address negative values returned by calculate_bcmse() and calculate_bcrmse() in sl1l2_statistics module (`#329 `_) 45 | * Address negative values returned by calculate_bcmse() and calculate_bcrmse() in sl1l2_statistics module (`#329 `_) 46 | * Add missing reliability statistics (`#330 `_) 47 | * Remove reset_index from various calculations (`#322 `_) 48 | 49 | 50 | METcalcpy Upgrade Instructions 51 | ============================== 52 | 53 | None 54 | 55 | -------------------------------------------------------------------------------- /docs/Users_Guide/write_mpr.rst: -------------------------------------------------------------------------------- 1 | ********************** 2 | Write MPR 3 | ********************** 4 | 5 | Description 6 | =========== 7 | 8 | This program writes data to an output file in MET’s Matched Pair (MPR) format. It 9 | takes several inputs, which are described in the list below. The script will compute 10 | the observation input and total number of observations. It will also check to see if 11 | the output directory is present and will create that directory if it does not exist. 12 | 13 | Example 14 | ======= 15 | 16 | Examples for how to use this script can be found in the driver scripts of the use cases 17 | listed below. 18 | 19 | * `Blocking `_ 20 | * `Weather Regime `_ 21 | * `Stratosphere Polar `_ 22 | * `Stratosphere QBO `_ 23 | 24 | Information about Input Data 25 | ============================ 26 | 27 | At this time, all input arrays have to be one dimensional only and should be the same size. 28 | The script does not make an attempt to check if input arrays are the same size. If any of 29 | your input arrays are larger than the observation input array, the data will be chopped at 30 | the length of the observation input. If an array is shorter than the observation input, the 31 | program will error. 32 | 33 | Currently, the the following variables cannot be set and will be output as NA: FCST_THRESH, 34 | OBS_THRESH, COV_THRESH, ALPHA, OBS_QC, CLIMO_MEAN, CLIMO_STDEV, CLIMO_CDF. Additionally the 35 | following variables also cannot be set and have default values: INTERP_MTHD = NEAREST, 36 | INTERP_PNTS = 1, and OBTYPE = ADPUPA. 37 | 38 | data_fcst: 1D array float 39 | forecast data to write to MPR file 40 | data_obs: 1D array float 41 | observation data to write to MPR file 42 | lats_in: 1D array float 43 | data latitudes 44 | lons_in: 1D array float 45 | data longitudes 46 | fcst_lead: 1D array string of format HHMMSS 47 | forecast lead time 48 | fcst_valid: 1D array string of format YYYYmmdd_HHMMSS 49 | forecast valid time 50 | obs_lead: 1D array string of format HHMMSS 51 | observation lead time 52 | obs_valid: 1D array string of format YYYYmmdd_HHMMSS 53 | observation valid time 54 | mod_name: string 55 | output model name (the MODEL column in MET) 56 | desc: 1D array string 57 | output description (the DESC column in MET) 58 | fcst_var: 1D array string 59 | forecast variable name 60 | fcst_unit: 1D array string 61 | forecast variable units 62 | fcst_lev: 1D array string 63 | forecast variable level 64 | obs_var: 1D array string 65 | observation variable name 66 | obs_unit: 1D array string 67 | observation variable units 68 | obs_lev: 1D array string 69 | observation variable level 70 | maskname: string 71 | name of the verification masking region 72 | obsslev: 1D array string 73 | Pressure level of the observation in hPA or accumulation 74 | interval in hours 75 | outdir: string 76 | Full path including where the output data should go 77 | outfile_prefix: string 78 | Prefix to use for the output filename. The time stamp will 79 | be added in MET's format based off the first forecast time 80 | 81 | 82 | Run from a python script 83 | ========================= 84 | 85 | * Make sure you have these required Python packages: 86 | 87 | * Python 3.7 88 | 89 | * metcalcpy 90 | 91 | * numpy 92 | 93 | * os 94 | 95 | .. code-block:: ini 96 | 97 | write_mpr_file(data_fcst,data_obs,lats_in,lons_in,fcst_lead,fcst_valid,obs_lead,obs_valid,mod_name,desc,fcst_var,fcst_unit,fcst_lev,obs_var,obs_unit,obs_lev,maskname,obsslev,outdir,outfile_prefix) 98 | 99 | The output fill be a .stat file located in outdir with data in `MET's Matched Pair Format `_. The file will be labeled with outfile_prefix and then have lead time, valid YYYYMMDD, and valid HHMMSS stamped onto the file name. 100 | -------------------------------------------------------------------------------- /docs/_static/METplus_banner_photo_web.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dtcenter/METcalcpy/b83da573f6fe46b1c4cfcefa51409a30e23996fb/docs/_static/METplus_banner_photo_web.png -------------------------------------------------------------------------------- /docs/_static/custom.css: -------------------------------------------------------------------------------- 1 | /* ToC scrollbar */ 2 | .wy-menu-vertical { 3 | max-height: calc(100vh - 60px) !important; 4 | overflow-y: auto !important; 5 | } 6 | 7 | /* Make the logo and search area sticky */ 8 | .wy-side-nav-search { 9 | position: fixed; 10 | /* width: 300px; */ /* Adjust width as needed */ 11 | z-index: 1000; 12 | padding-bottom: 5px; /* Add space below logo */ 13 | } 14 | 15 | /* Prevent the ToC from being overlapped */ 16 | .wy-menu { 17 | margin-top: 280px; /* Adjust this value based on the logo height */ 18 | } 19 | 20 | /* scroll-margin-top ensures that when an anchor is clicked, the selected element is not hidden behind the sticky header */ 21 | .wy-menu-vertical .current { 22 | scroll-margin-top: 280px; /* Adjust based on your sticky header height */ 23 | } 24 | 25 | /* Hide the sticky logo so that it behaves like the original (disappearing into the hamburger menu) */ 26 | @media (max-width: 768px) { /* Adjust the width as needed */ 27 | .wy-side-nav-search { 28 | display: none; /* Hide logo + search bar when screen is small */ 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /docs/_static/met_calcpy_logo_2019_09.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dtcenter/METcalcpy/b83da573f6fe46b1c4cfcefa51409a30e23996fb/docs/_static/met_calcpy_logo_2019_09.png -------------------------------------------------------------------------------- /docs/_static/sphx_glr_cross_spectra_thumb.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dtcenter/METcalcpy/b83da573f6fe46b1c4cfcefa51409a30e23996fb/docs/_static/sphx_glr_cross_spectra_thumb.png -------------------------------------------------------------------------------- /docs/_templates/autosummary/module.rst: -------------------------------------------------------------------------------- 1 | {{ fullname | escape | underline}} 2 | 3 | .. automodule:: {{ fullname }} 4 | 5 | {% block attributes %} 6 | {% if attributes %} 7 | .. rubric:: {{ _('Module Attributes') }} 8 | 9 | .. autosummary:: 10 | {% for item in attributes %} 11 | {{ item }} 12 | {%- endfor %} 13 | {% endif %} 14 | {% endblock %} 15 | 16 | {% block functions %} 17 | {% if functions %} 18 | .. rubric:: {{ _('Functions') }} 19 | 20 | .. autosummary:: 21 | :toctree: 22 | {% for item in functions %} 23 | {{ item }} 24 | {%- endfor %} 25 | {% endif %} 26 | {% endblock %} 27 | 28 | {% block classes %} 29 | {% if classes %} 30 | .. rubric:: {{ _('Classes') }} 31 | 32 | .. autosummary:: 33 | {% for item in classes %} 34 | {{ item }} 35 | {%- endfor %} 36 | {% endif %} 37 | {% endblock %} 38 | 39 | {% block exceptions %} 40 | {% if exceptions %} 41 | .. rubric:: {{ _('Exceptions') }} 42 | 43 | .. autosummary:: 44 | {% for item in exceptions %} 45 | {{ item }} 46 | {%- endfor %} 47 | {% endif %} 48 | {% endblock %} 49 | 50 | {% block modules %} 51 | {% if modules %} 52 | .. rubric:: Modules 53 | 54 | .. autosummary:: 55 | :toctree: 56 | :recursive: 57 | {% for item in modules %} 58 | {{ item }} 59 | {%- endfor %} 60 | {% endif %} 61 | {% endblock %} 62 | -------------------------------------------------------------------------------- /docs/_templates/footer.html: -------------------------------------------------------------------------------- 1 | {% extends "!footer.html" %} 2 | {% block extrafooter %} 3 | {{ super() }} 4 | 5 | {% endblock %} 6 | -------------------------------------------------------------------------------- /docs/_templates/placeholder.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dtcenter/METcalcpy/b83da573f6fe46b1c4cfcefa51409a30e23996fb/docs/_templates/placeholder.txt -------------------------------------------------------------------------------- /docs/auto_examples/auto_examples_jupyter.zip: -------------------------------------------------------------------------------- 1 | PK -------------------------------------------------------------------------------- /docs/auto_examples/auto_examples_python.zip: -------------------------------------------------------------------------------- 1 | PK -------------------------------------------------------------------------------- /docs/auto_examples/index.rst: -------------------------------------------------------------------------------- 1 | :orphan: 2 | 3 | 4 | .. _sphx_glr_auto_examples: 5 | 6 | .. _examples-index: 7 | 8 | This is the gallery of examples for METcalcpy 9 | ============================================= 10 | This directory holds all the things needed to create 11 | an example gallery of statistics calculations. 12 | 13 | .. _general-examples: 14 | 15 | General Examples 16 | ---------------- 17 | 18 | Some general examples coming soon. 19 | 20 | 21 | 22 | .. raw:: html 23 | 24 |
25 | 26 | .. only:: html 27 | 28 | .. figure:: ../_static/sphx_glr_cross_spectra_thumb.png 29 | 30 | :ref:`sphx_glr_auto_examples_cross_spectra.py` 31 | 32 | .. raw:: html 33 | 34 |
35 | 36 | 37 | .. toctree:: 38 | :hidden: 39 | 40 | /auto_examples/cross_spectra 41 | .. raw:: html 42 | 43 |
44 | 45 | 46 | 47 | .. only :: html 48 | 49 | .. container:: sphx-glr-footer 50 | :class: sphx-glr-footer-gallery 51 | 52 | 53 | .. container:: sphx-glr-download 54 | 55 | :download:`Download all examples in Python source code: auto_examples_python.zip ` 56 | 57 | 58 | 59 | .. container:: sphx-glr-download 60 | 61 | :download:`Download all examples in Jupyter notebooks: auto_examples_jupyter.zip ` 62 | 63 | 64 | .. only:: html 65 | 66 | .. rst-class:: sphx-glr-signature 67 | 68 | `Gallery generated by Sphinx-Gallery `_ 69 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # This file only contains a selection of the most common options. For a full 4 | # list see the documentation: 5 | # http://www.sphinx-doc.org/en/master/config 6 | 7 | # -- Path setup -------------------------------------------------------------- 8 | 9 | # If extensions (or modules to document with autodoc) are in another directory, 10 | # add these directories to sys.path here. If the directory is relative to the 11 | # documentation root, use os.path.abspath to make it absolute, like shown here. 12 | # 13 | import os 14 | import sys 15 | sys.path.insert(0, os.path.abspath('..')) 16 | print(sys.path) 17 | 18 | 19 | # -- Project information ----------------------------------------------------- 20 | 21 | project = 'METcalcpy' 22 | copyright = '2024, NCAR' 23 | author = 'UCAR/NCAR, NOAA, CSU/CIRA, and CU/CIRES' 24 | author_list = 'Kalb, C., D. Adriaansen, D. Fillmore, M. Win-Gildenmeister, T. Burek, M. Smith, T. Jensen, and H. Fisher' 25 | version = '3.0.0' 26 | verinfo = version 27 | release = f'{version}' 28 | release_year = '2024' 29 | release_date = f'{release_year}-12-18' 30 | copyright = f'{release_year}, {author}' 31 | 32 | # if set, adds "Last updated on " followed by 33 | # the date in the specified format 34 | html_last_updated_fmt = '%c' 35 | 36 | # -- General configuration --------------------------------------------------- 37 | 38 | # Add any Sphinx extension module names here, as strings. They can be 39 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 40 | # ones. 41 | extensions = ['sphinx.ext.autodoc', 42 | 'sphinx.ext.autosummary', 43 | 'sphinx.ext.intersphinx', 44 | 'sphinx_gallery.gen_gallery', 45 | 'sphinx_design', 46 | 'sphinx_rtd_theme', 47 | 'sphinx.ext.napoleon',] 48 | 49 | # Turn on sphinx.ext.autosummary 50 | autosummary_generate = True 51 | 52 | # Add any paths that contain templates here, relative to this directory. 53 | templates_path = ['_templates'] 54 | 55 | # List of patterns, relative to source directory, that match files and 56 | # directories to ignore when looking for source files. 57 | # This pattern also affects html_static_path and html_extra_path. 58 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'utils/README_util.rst','auto_examples'] 59 | 60 | # Suppress certain warning messages 61 | suppress_warnings = ['ref.citation'] 62 | 63 | # -- Sphinx control ----------------------------------------------------------- 64 | #sphinx_gallery_conf = { 65 | # 'examples_dirs': [os.path.join('..', 'examples')], 66 | # 'gallery_dirs': ['examples'] 67 | #} 68 | 69 | 70 | # -- Options for HTML output ------------------------------------------------- 71 | # Add any paths that contain custom static files (such as style sheets) here, 72 | # relative to this directory. They are copied after the builtin static files, 73 | # so a file named "default.css" will overwrite the builtin "default.css". 74 | html_static_path = ['_static'] 75 | 76 | # The theme to use for HTML and HTML Help pages. See the documentation for 77 | # a list of builtin themes. 78 | # 79 | html_theme = 'sphinx_rtd_theme' 80 | html_theme_path = ["_themes", ] 81 | html_js_files = ['pop_ver.js'] 82 | html_css_files = ['theme_override.css','custom.css'] 83 | 84 | # The name of an image file (relative to this directory) to place at the top 85 | # of the sidebar. 86 | html_logo = os.path.join('_static','met_calcpy_logo_2019_09.png') 87 | 88 | # Control html_sidebars 89 | # Include global TOC instead of local TOC by default 90 | #html_sidebars = { '**': ['globaltoc.html','relations.html','sourcelink.html','searchbox.html']} 91 | 92 | # -- Intersphinx control ----------------------------------------------------- 93 | 94 | numfig = True 95 | 96 | numfig_format = { 97 | 'figure': 'Figure %s', 98 | } 99 | 100 | # -- Export variables -------------------------------------------------------- 101 | 102 | rst_epilog = """ 103 | .. |copyright| replace:: {copyrightstr} 104 | .. |author_list| replace:: {author_liststr} 105 | .. |release_date| replace:: {release_datestr} 106 | .. |release_year| replace:: {release_yearstr} 107 | """.format(copyrightstr = copyright, 108 | author_liststr = author_list, 109 | release_datestr = release_date, 110 | release_yearstr = release_year) 111 | 112 | -------------------------------------------------------------------------------- /docs/diag_ref/index.rst: -------------------------------------------------------------------------------- 1 | .. _api-index: 2 | 3 | ******************************** 4 | Diagnostic Calculation Reference 5 | ******************************** 6 | 7 | This reference contains technical documentation for various diagnostic calculations in METcalcpy. 8 | 9 | .. currentmodule:: metcalcpy.diagnostics 10 | 11 | .. autosummary:: 12 | :toctree: generated/ 13 | :recursive: 14 | 15 | land_surface 16 | 17 | * :ref:`modindex` 18 | * :ref:`genindex` 19 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.http://sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx-gallery==0.14.0 2 | pillow==10.3.0 3 | sphinxcontrib-bibtex==2.6.1 4 | sphinx==5.3.0 5 | sphinx-design==0.3.0 6 | sphinx_rtd_theme==1.3.0 7 | pandas==2.2.1 8 | xarray==2024.3.0 9 | -------------------------------------------------------------------------------- /docs/utils/README_util.rst: -------------------------------------------------------------------------------- 1 | util module 2 | 3 | convert_lon_360_to_180(longitude) 4 | 5 | Description: Takes an array (numpy or integer or float array) ranging from 0 to 360 degrees and converts these to -180 to 180 degrees 6 | 7 | convert_lons_indices(lons_in, minlon_in, range_in) 8 | 9 | Description: Takes a numpy array as input, and reorders them based on a minimum lon value and the number of longitude values 10 | 11 | Returns: A tuple, the reordered longitudes and an array of the indices of the original array of longitudes 12 | 13 | -------------------------------------------------------------------------------- /docs/version: -------------------------------------------------------------------------------- 1 | __version__="3.0.0-rc1" -------------------------------------------------------------------------------- /examples/README.txt: -------------------------------------------------------------------------------- 1 | .. _examples-index: 2 | 3 | This is the gallery of examples for METcalcpy 4 | ============================================= 5 | This directory holds all the things needed to create 6 | an example gallery of statistics calculations. 7 | 8 | .. _general-examples: 9 | 10 | General Examples 11 | ---------------- 12 | 13 | Some general examples coming soon. 14 | -------------------------------------------------------------------------------- /examples/grid_diag_gfs.sh: -------------------------------------------------------------------------------- 1 | python ../metcalcpy/grid_diag.py \ 2 | --datadir $DATA_DIR/GFS/2020-06-01-Summer \ 3 | --outfile grid_diag_gfs.nc \ 4 | --config grid_diag_gfs.yaml \ 5 | --debug 6 | -------------------------------------------------------------------------------- /examples/grid_diag_gfs.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # Filename: grid_diag_hwrf.yaml 3 | # 4 | # Sample YAML configuration file for grid_diag.py 5 | # 6 | # Author: David Fillmore 7 | # 8 | # Version Date 9 | # 0.1.0 2020/12/01 Initial version 10 | # 11 | 12 | regex: gfs_4_2020060?_??00_000.grb2 13 | 14 | level_type: surface 15 | 16 | data: 17 | 18 | - Temperature_surface: 19 | min: 200 20 | max: 350 21 | n_bins: 150 22 | 23 | - Temperature_surface: 24 | min: 200 25 | max: 350 26 | n_bins: 150 27 | -------------------------------------------------------------------------------- /examples/height_from_pressure_merra2.sh: -------------------------------------------------------------------------------- 1 | python ../metcalcpy/vertical_interp.py \ 2 | --datadir $DATA_DIR/MERRA2/2019-03-13-GreatPlainsCyclone \ 3 | --input MERRA2_400.inst3_3d_asm_Np.20190311.nc4 \ 4 | --config vertical_interp_merra2.yaml \ 5 | --output MERRA2_400.inst3_3d_asm_interp.20190311.nc4 \ 6 | --debug 7 | -------------------------------------------------------------------------------- /examples/height_from_pressure_merra2.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # Filename: height_from_pressure_merra2.yaml 3 | # 4 | # Sample YAML configuration file for vertical_interp.py 5 | # 6 | # Author: David Fillmore 7 | # 8 | # Version Date 9 | # 0.1.0 2020/09/01 Initial version 10 | # 11 | 12 | # 13 | # Set to compute geopotential height from pressure 14 | # 15 | height_from_pressure: 16 | True 17 | 18 | # 19 | # Required fields 20 | # 21 | surface_geopotential_name: 22 | 'PHIS' 23 | 24 | surface_pressure_name: 25 | 'PS' 26 | 27 | temperature_name: 28 | 'T' 29 | 30 | relative_humidity_name: 31 | 'RH' 32 | 33 | # 34 | # Field list to process 35 | # 36 | 37 | fields: 38 | - 'QV' 39 | - 'QL' 40 | - 'QI' 41 | - 'U' 42 | - 'V' 43 | 44 | # 45 | # Vertical coordinate information 46 | # 47 | 48 | vertical_coord_type_in: 49 | 'pressure' 50 | 51 | vertical_coord_type_out: 52 | 'height' 53 | 54 | vertical_dim_name: 55 | 'lev' 56 | 57 | vertical_levels : 58 | - 100 59 | - 200 60 | - 500 61 | - 1000 62 | - 2000 63 | - 3000 64 | - 4000 65 | - 5000 66 | 67 | vertical_level_units : 68 | 'meter' 69 | -------------------------------------------------------------------------------- /examples/height_from_pressure_tcrmw.sh: -------------------------------------------------------------------------------- 1 | python ../metcalcpy/vertical_interp.py \ 2 | --datadir $DATA_DIR \ 3 | --input /path/to/input/tc_rmw_example.nc \ 4 | --config height_from_pressure_tcrmw.yaml \ 5 | --output /path/to/output/tc_rmw_dev_test_vertical_interp.nc \ 6 | # --debug 7 | -------------------------------------------------------------------------------- /examples/height_from_pressure_tcrmw.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # Filename: height_from_pressure_tcrmw.yaml 3 | # 4 | # Sample YAML configuration file for vertical_interp.py 5 | # 6 | # Author: David Fillmore 7 | # Email: met_help@ucar.edu 8 | # 9 | # Version Date 10 | # 0.1.0 2021/08/31 Initial version 11 | # 12 | 13 | # 14 | # Set to compute geopotential height from pressure 15 | # 16 | height_from_pressure: 17 | True 18 | 19 | # 20 | # Required fields 21 | # 22 | zero_surface_geopotential: 23 | True 24 | 25 | surface_pressure_name: 26 | 'PRMSL' 27 | 28 | temperature_name: 29 | 'TMP' 30 | 31 | relative_humidity_name: 32 | 'RH' 33 | 34 | # 35 | # Field list to process 36 | # 37 | 38 | fields: 39 | - 'UGRD' 40 | - 'VGRD' 41 | - 'TMP' 42 | 43 | # 44 | # Vertical coordinate information 45 | # 46 | 47 | vertical_coord_type_in: 48 | 'pressure' 49 | 50 | vertical_coord_type_out: 51 | 'height' 52 | 53 | vertical_dim_name: 54 | 'pressure' 55 | 56 | vertical_levels : 57 | - 100 58 | - 200 59 | - 500 60 | - 1000 61 | - 2000 62 | - 3000 63 | - 4000 64 | - 5000 65 | 66 | vertical_level_units : 67 | 'meter' 68 | -------------------------------------------------------------------------------- /examples/read_files.xml: -------------------------------------------------------------------------------- 1 | 2 |
3 | 4 | 5 | 6 | /d2/METcalcpy_Data/MET_stat/ensemble_stat_20100101_120000V_orank.txt 7 | /d2/METcalcpy_Data/MET_stat/ensemble_stat_20100101_120000V.stat 8 | 9 | 10 | 11 | true 12 | test data for Metviewer 13 | false 14 | 1 15 | true 16 | false 17 | false 18 | false 19 | false 20 | test 21 | h projects 22 | true 23 | false 24 | false 25 | true 26 | true 27 | false 28 | 29 |
30 | 31 | -------------------------------------------------------------------------------- /examples/read_files.yaml: -------------------------------------------------------------------------------- 1 | #The environment variable CALCPY_DATA needs to point to the directory 2 | #where your sample METcalcpy data is stored e.g. /d2/METcalcpy_Data 3 | files: 4 | - !ENV '${CALCPY_DATA}/MET_stat/ensemble_stat_20100101_120000V_orank.txt' 5 | - !ENV '${CALCPY_DATA}/MET_stat/ensemble_stat_20100101_120000V.stat' 6 | 7 | -------------------------------------------------------------------------------- /examples/vertical_interp_hwrf.sh: -------------------------------------------------------------------------------- 1 | python ../metcalcpy/vertical_interp.py \ 2 | --datadir $DATA_DIR/HWRF/2019-09-01-HurricaneDorian \ 3 | --input dorian05l.2019090118.hwrfprs.core.0p015.f003.grb2 \ 4 | --config vertical_interp_hwrf.yaml \ 5 | --output dorian05l.2019090118.hwrfprs.core.interp.0p015.f003.nc4 \ 6 | --debug 7 | -------------------------------------------------------------------------------- /examples/vertical_interp_hwrf.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # Filename: vertical_interp_hwrf.yaml 3 | # 4 | # Sample YAML configuration file for vertical_interp.py 5 | # 6 | # Author: David Fillmore 7 | # 8 | # Version Date 9 | # 0.1.0 2020/09/01 Initial version 10 | # 11 | 12 | # 13 | # Set to compute geopotential height from pressure 14 | # 15 | height_from_pressure: 16 | False 17 | 18 | # 19 | # Required fields 20 | # 21 | geopotential_height_name: 22 | 'gh' 23 | 24 | # 25 | # Field list to process 26 | # 27 | 28 | fields: 29 | - 'u' 30 | - 'v' 31 | 32 | # 33 | # Vertical coordinate information 34 | # 35 | 36 | vertical_coord_type_in: 37 | 'pressure' 38 | 39 | vertical_coord_type_out: 40 | 'height' 41 | 42 | vertical_dim_name: 43 | 'isobaricInhPa' 44 | 45 | vertical_levels : 46 | - 100 47 | - 200 48 | - 500 49 | - 1000 50 | - 2000 51 | - 3000 52 | - 4000 53 | - 5000 54 | 55 | vertical_level_units : 56 | 'meter' 57 | -------------------------------------------------------------------------------- /examples/vertical_interp_merra2.sh: -------------------------------------------------------------------------------- 1 | python ../metcalcpy/vertical_interp.py \ 2 | --datadir $DATA_DIR/MERRA2/2019-03-13-GreatPlainsCyclone \ 3 | --input MERRA2_400.inst3_3d_asm_Np.20190311.nc4 \ 4 | --config vertical_interp_merra2.yaml \ 5 | --output MERRA2_400.inst3_3d_asm_interp.20190311.nc4 \ 6 | --debug 7 | -------------------------------------------------------------------------------- /examples/vertical_interp_merra2.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # Filename: vertical_interp_merra2.yaml 3 | # 4 | # Sample YAML configuration file for vertical_interp.py 5 | # 6 | # Author: David Fillmore 7 | # 8 | # Version Date 9 | # 0.1.0 2020/09/01 Initial version 10 | # 11 | 12 | # 13 | # Set to compute geopotential height from pressure 14 | # 15 | height_from_pressure: 16 | False 17 | 18 | # 19 | # Required fields 20 | # 21 | geopotential_height_name: 22 | 'H' 23 | 24 | # 25 | # Field list to process 26 | # 27 | 28 | fields: 29 | - 'QV' 30 | # - 'QL' 31 | # - 'QI' 32 | # - 'U' 33 | # - 'V' 34 | 35 | # 36 | # Vertical coordinate information 37 | # 38 | 39 | vertical_coord_type_in: 40 | 'pressure' 41 | 42 | vertical_coord_type_out: 43 | 'height' 44 | 45 | vertical_dim_name: 46 | 'lev' 47 | 48 | vertical_levels : 49 | - 100 50 | - 200 51 | - 500 52 | - 1000 53 | - 2000 54 | - 3000 55 | - 4000 56 | - 5000 57 | 58 | vertical_level_units : 59 | 'meter' 60 | -------------------------------------------------------------------------------- /internal/scripts/installation/modulefiles/3.0.0_casper: -------------------------------------------------------------------------------- 1 | #%Module###################################################################### 2 | ## 3 | ## METcalcpy 4 | ## 5 | proc ModulesHelp { } { 6 | puts stderr "Sets up the paths and environment variables to use the METcalcpy-3.0.0 7 | *** For help see the official MET webpage at http://www.dtcenter.org/met/users ***" 8 | } 9 | 10 | module load ncarenv/24.12 11 | module load intel/2024.2.1 12 | 13 | prepend-path PATH /glade/work/dtcrt/METplus/casper/miniconda/miniconda3/envs/metplus_v5.1_py3.10/bin 14 | prepend-path PYTHONPATH /glade/work/dtcrt/METplus/casper/components/METcalcpy/installations/METcalcpy-3.0.0 15 | -------------------------------------------------------------------------------- /internal/scripts/installation/modulefiles/3.0.0_derecho: -------------------------------------------------------------------------------- 1 | #%Module###################################################################### 2 | ## 3 | ## METcalcpy 4 | ## 5 | proc ModulesHelp { } { 6 | puts stderr "Sets up the paths and environment variables to use the METcalcpy-3.0.0 7 | *** For help see the official MET webpage at 8 | #http://www.dtcenter.org/met/users ***" 9 | } 10 | 11 | module load ncarenv/23.09 12 | module load intel/2023.2.1 13 | 14 | prepend-path PATH /glade/work/dtcrt/METplus/derecho/miniconda/miniconda3/envs/metplus_v5.1_py3.10/bin 15 | prepend-path PYTHONPATH /glade/work/dtcrt/METplus/derecho/components/METcalcpy/installations/METcalcpy-3.0.0 -------------------------------------------------------------------------------- /internal/scripts/installation/modulefiles/3.0.0_gaea: -------------------------------------------------------------------------------- 1 | #%Module###################################################################### 2 | ## 3 | ## METcalcpy 4 | ## 5 | proc ModulesHelp { } { 6 | puts stderr "Sets up the paths and environment variables to use the METcalcpy-3.0.0. 7 | *** For help see the official MET webpage at http://www.dtcenter.org/met/users ***" 8 | } 9 | 10 | module unload cray-libsci/24.07.0 11 | module load intel/2023.2.0 12 | 13 | prepend-path PATH /ncrc/proj/nggps_psd/Julie.Prestopnik/projects/miniconda/miniconda3/envs/metplus_v5.1_py3.10/bin 14 | prepend-path PYTHONPATH /usw/met/METcalcpy/METcalcpy-3.0.0 15 | -------------------------------------------------------------------------------- /internal/scripts/installation/modulefiles/3.0.0_hera: -------------------------------------------------------------------------------- 1 | #%Module###################################################################### 2 | ## 3 | ## METcalcpy 4 | ## 5 | proc ModulesHelp { } { 6 | puts stderr "Sets up the paths and environment variables to use the METcalcpy-3.0.0. 7 | *** For help see the official MET webpage at http://www.dtcenter.org/met/users ***" 8 | } 9 | 10 | prereq intel/2024.2.1 11 | 12 | prepend-path PATH /scratch1/BMC/dtc/miniconda/miniconda3/envs/metplus_v5.1_py3.10/bin 13 | prepend-path PYTHONPATH /contrib/METcalcpy/METcalcpy-3.0.0 14 | -------------------------------------------------------------------------------- /internal/scripts/installation/modulefiles/3.0.0_hercules: -------------------------------------------------------------------------------- 1 | #%Module###################################################################### 2 | ## 3 | ## METcalcpy 4 | ## 5 | proc ModulesHelp { } { 6 | puts stderr "Sets up the paths and environment variables to use the METcalcpy-3.0.0. 7 | *** For help see the official MET webpage at http://www.dtcenter.org/met/users ***" 8 | } 9 | 10 | module load contrib 11 | module load intel-oneapi-compilers/2022.2.1 12 | 13 | prepend-path PATH /work/noaa/ovp/miniconda/miniconda3/envs/metplus_v5.1_py3.10/bin 14 | prepend-path PYTHONPATH /apps/contrib/MET/METcalcpy/METcalcpy-3.0.0 15 | -------------------------------------------------------------------------------- /internal/scripts/installation/modulefiles/3.0.0_jet: -------------------------------------------------------------------------------- 1 | #%Module###################################################################### 2 | ## 3 | ## METcalcpy 4 | ## 5 | proc ModulesHelp { } { 6 | puts stderr "Sets up the paths and environment variables to use the METcalcpy-3.0.0. 7 | *** For help see the official MET webpage at http://www.dtcenter.org/met/users ***" 8 | } 9 | 10 | prereq intel/2024.2.1 11 | 12 | prepend-path PATH /lfs5/HFIP/dtc-hurr/METplus/miniconda/miniconda3/envs/metplus_v5.1_py3.10/bin 13 | prepend-path PYTHONPATH /contrib/met/METcalcpy/METcalcpy-3.0.0 14 | -------------------------------------------------------------------------------- /internal/scripts/installation/modulefiles/3.0.0_orion: -------------------------------------------------------------------------------- 1 | #%Module###################################################################### 2 | ## 3 | ## METcalcpy 4 | ## 5 | proc ModulesHelp { } { 6 | puts stderr "Sets up the paths and environment variables to use the METcalcpy-3.0.0. 7 | *** For help see the official MET webpage at http://www.dtcenter.org/met/users ***" 8 | } 9 | 10 | module load contrib 11 | module load intel-oneapi-compilers/2022.2.1 12 | 13 | prepend-path PATH /work/noaa/ovp/miniconda/miniconda3/envs/metplus_v5.1_py3.10/bin 14 | prepend-path PYTHONPATH /apps/contrib/MET/METcalcpy/METcalcpy-3.0.0 15 | -------------------------------------------------------------------------------- /internal/scripts/installation/modulefiles/3.0.0_wcoss2: -------------------------------------------------------------------------------- 1 | #%Module###################################################################### 2 | ## 3 | ## METcalcpy 4 | ## 5 | proc ModulesHelp { } { 6 | puts stderr "Sets up the paths and environment variables to use the METcalcpy-3.0.0. 7 | *** For help see the official MET webpage at http://www.dtcenter.org/met/users ***" 8 | } 9 | 10 | module use /apps/ops/para/libs/modulefiles/compiler/intel/19.1.3.304 11 | module load intel 12 | module use /apps/dev/modulefiles/ 13 | module load ve/evs/2.0 14 | 15 | prepend-path PYTHONPATH /apps/sw_review/emc/METcalcpy/3.0.0 16 | -------------------------------------------------------------------------------- /internal/scripts/sonarqube/development.seneca: -------------------------------------------------------------------------------- 1 | # Define the development environment for NCAR project machine seneca 2 | # Based on settings in /usr/local/src/met/README.snat 3 | 4 | # Top-level MET project directory 5 | MET_PROJ_DIR=/d1/projects/MET 6 | 7 | # SonarQube 8 | export SONARQUBE_DIR=/d1/projects/SonarQube/ 9 | export SONARQUBE_WRAPPER_BIN=$SONARQUBE_DIR/build-wrapper-linux-x86 10 | export SONARQUBE_SCANNER_BIN=$SONARQUBE_DIR/sonar-scanner-4.6.2.2472-linux/bin 11 | -------------------------------------------------------------------------------- /internal/scripts/sonarqube/run_nightly.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Run nightly SonarQube scan 4 | #======================================================================= 5 | # 6 | # This run_nightly.sh script calls the run_sonarqube.sh script. 7 | # It is intented to be run nightly through cron. Output should be 8 | # directed to the LOGFILE, per cron convention. To run this script, use 9 | # the following commands: 10 | # 11 | # git clone https://github.com/dtcenter/METcalcpy 12 | # METcalcpy/sonarqube/run_nightly.sh name 13 | # 14 | # Usage: run_nightly.sh name 15 | # where "name" specifies a branch, tag, or hash 16 | # 17 | # For example, scan the develop branch: 18 | # run_nightly.sh develop 19 | # 20 | #======================================================================= 21 | 22 | # Constants 23 | EMAIL_LIST="johnhg@ucar.edu hsoh@ucar.edu jpresto@ucar.edu minnawin@ucar.edu" 24 | KEEP_DAYS=5 25 | GIT_REPO_NAME=METcalcpy 26 | 27 | function usage { 28 | echo 29 | echo "USAGE: run_nightly.sh name" 30 | echo " where \"name\" specifies a branch, tag, or hash." 31 | echo 32 | } 33 | 34 | # Check for arguments 35 | if [ $# -lt 1 ]; then usage; exit 1; fi 36 | 37 | # Store the full path to the scripts directory 38 | SCRIPT_DIR=`dirname $0` 39 | if [[ ${0:0:1} != "/" ]]; then SCRIPT_DIR=$(pwd)/${SCRIPT_DIR}; fi 40 | 41 | # Define the development environment 42 | ENV_FILE=${SCRIPT_DIR}/development.`hostname` 43 | if [[ ! -e ${ENV_FILE} ]]; then 44 | echo "$0: ERROR -> Development environment file missing: ${ENV_FILE}" 45 | exit 1 46 | fi 47 | source ${ENV_FILE} 48 | 49 | SONARQUBE_WORK_DIR=${MET_PROJ_DIR}/MET_regression/sonarqube_${GIT_REPO_NAME} 50 | 51 | # Delete old directories 52 | find ${SONARQUBE_WORK_DIR} -mtime +${KEEP_DAYS} -name "NB*" | \ 53 | xargs rm -rf 54 | 55 | # Create and switch to a run directory 56 | TODAY=`date +%Y%m%d` 57 | YESTERDAY=`date -d "1 day ago" +%Y%m%d` 58 | RUN_DIR=${SONARQUBE_WORK_DIR}/NB${TODAY} 59 | [[ -e ${RUN_DIR} ]] && rm -rf ${RUN_DIR} 60 | mkdir -p ${RUN_DIR} 61 | cd ${RUN_DIR} 62 | 63 | # Create a logfile 64 | LOGFILE=${RUN_DIR}/run_sonarqube_${TODAY}.log 65 | 66 | # Run scan and check for bad return status 67 | ${SCRIPT_DIR}/run_sonarqube.sh ${1} >& ${LOGFILE} 68 | if [[ $? -ne 0 ]]; then 69 | echo "$0: The nightly SonarQube scanning for $GIT_REPO_NAME FAILED in `basename ${RUN_DIR}`." >> ${LOGFILE} 70 | cat ${LOGFILE} | mail -s "$GIT_REPO_NAME SonarQube scanning failed for ${1} in `basename ${RUN_DIR}` (autogen msg)" ${EMAIL_LIST} 71 | exit 1 72 | fi 73 | 74 | exit 0 75 | -------------------------------------------------------------------------------- /internal/scripts/sonarqube/run_sonarqube.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Run SonarQube Source Code Analyzer for METcalcpy 4 | #======================================================================= 5 | # 6 | # This run_sonarqube.sh script will check out the specified version 7 | # of METcalcpy and run the SonarQube Source Code Analyzer on it. First, 8 | # go to the directory where you would like the SCA output written and 9 | # then run: 10 | # 11 | # git clone https://github.com/dtcenter/METcalcpy 12 | # METcalcpy/sonarqube/run_sonarqube.sh name 13 | # 14 | # Usage: run_sonarqube.sh name 15 | # Test the specified branched version of METcalcpy: 16 | # run_sonarqube.sh {branch name} 17 | # Test the specified tagged version of METcalcpy: 18 | # run_sonarqube.sh {tag name} 19 | # 20 | #======================================================================= 21 | 22 | # Constants 23 | GIT_REPO_NAME=METcalcpy 24 | GIT_REPO="https://github.com/dtcenter/${GIT_REPO_NAME}" 25 | 26 | function usage { 27 | echo 28 | echo "USAGE: $(basename $0) name" 29 | echo " where \"name\" specifies a branch, tag, or hash." 30 | echo 31 | } 32 | 33 | # Check for arguments 34 | if [[ $# -lt 1 ]]; then usage; exit; fi 35 | 36 | # Check that SONAR_TOKEN and SONAR_HOST_URL are defined 37 | if [ -z ${SONAR_TOKEN} ]; then 38 | echo "ERROR: SONAR_TOKEN must be set" 39 | exit 1 40 | fi 41 | if [ -z ${SONAR_HOST_URL} ]; then 42 | echo "ERROR: SONAR_HOST_URL must be set" 43 | exit 1 44 | fi 45 | 46 | # Check that SONARQUBE_WRAPPER_BIN is defined 47 | if [ -z ${SONARQUBE_WRAPPER_BIN} ]; then 48 | which build-wrapper-linux-x86-64 2> /dev/null 49 | if [ $? -eq 0 ]; then 50 | SONARQUBE_WRAPPER_BIN=$(which build-wrapper-linux-x86-64 2> /dev/null) 51 | else 52 | which build-wrapper 2> /dev/null 53 | if [ $? -eq 0 ]; then 54 | SONARQUBE_WRAPPER_BIN=$(which build-wrapper 2> /dev/null) 55 | else 56 | echo "ERROR: SONARQUBE_WRAPPER_BIN must be set" 57 | exit 1 58 | fi 59 | fi 60 | fi 61 | if [ ! -e ${SONARQUBE_WRAPPER_BIN} ]; then 62 | echo "ERROR: SONARQUBE_WRAPPER_BIN (${SONARQUBE_WRAPPER_BIN}) does not exist" 63 | exit 1 64 | fi 65 | 66 | # Check that SONARQUBE_SCANNER_BIN is defined 67 | if [ -z ${SONARQUBE_SCANNER_BIN} ]; then 68 | which sonar-scanner 2> /dev/null 69 | if [ $? -eq 0 ]; then 70 | SONARQUBE_SCANNER_BIN=$(which sonar-scanner 2> /dev/null) 71 | else 72 | echo "ERROR: SONARQUBE_SCANNER_BIN must be set" 73 | exit 1 74 | fi 75 | fi 76 | if [ ! -e ${SONARQUBE_SCANNER_BIN} ]; then 77 | echo "ERROR: SONARQUBE_SCANNER_BIN (${SONARQUBE_SCANNER_BIN}) does not exist" 78 | exit 1 79 | fi 80 | 81 | # Sub-routine for running a command and checking return status 82 | function run_command() { 83 | 84 | # Print the command being called 85 | echo "CALLING: $1" 86 | 87 | # Run the command and store the return status 88 | $1 89 | STATUS=$? 90 | 91 | # Check return status 92 | if [[ ${STATUS} -ne 0 ]]; then 93 | echo "ERROR: Command returned with non-zero status ($STATUS): $1" 94 | exit ${STATUS} 95 | fi 96 | 97 | return ${STATUS} 98 | } 99 | 100 | # Store the full path to the scripts directory 101 | SCRIPT_DIR=`dirname $0` 102 | if [[ ${0:0:1} != "/" ]]; then SCRIPT_DIR=$(pwd)/${SCRIPT_DIR}; fi 103 | 104 | # Clone repo into a sub-directory and checkout the requested version 105 | REPO_DIR="${GIT_REPO_NAME}-${1}" 106 | 107 | if [ -e ${REPO_DIR} ]; then 108 | run_command "rm -rf ${REPO_DIR}" 109 | fi 110 | run_command "git clone ${GIT_REPO} ${REPO_DIR}" 111 | run_command "cd ${REPO_DIR}" 112 | run_command "git checkout ${1}" 113 | 114 | # Define the version string 115 | SONAR_PROJECT_VERSION=$(cat docs/version | cut -d'=' -f2 | tr -d '" ') 116 | 117 | SONAR_PROPERTIES=sonar-project.properties 118 | 119 | # Configure the sonar-project.properties 120 | [ -e $SONAR_PROPERTIES ] && rm $SONAR_PROPERTIES 121 | sed -e "s|SONAR_PROJECT_VERSION|$SONAR_PROJECT_VERSION|" \ 122 | -e "s|SONAR_HOST_URL|$SONAR_HOST_URL|" \ 123 | -e "s|SONAR_TOKEN|$SONAR_TOKEN|" \ 124 | -e "s|SONAR_BRANCH_NAME|${1}|" \ 125 | $SCRIPT_DIR/$SONAR_PROPERTIES > $SONAR_PROPERTIES 126 | 127 | # Run SonarQube scan for Python code 128 | run_command "${SONARQUBE_SCANNER_BIN}/sonar-scanner" 129 | 130 | -------------------------------------------------------------------------------- /internal/scripts/sonarqube/sonar-project.properties: -------------------------------------------------------------------------------- 1 | # Project and source code settings 2 | sonar.projectKey=METcalcpy 3 | sonar.projectName=METcalcpy 4 | sonar.projectVersion=SONAR_PROJECT_VERSION 5 | sonar.branch.name=SONAR_BRANCH_NAME 6 | sonar.sources=metcalcpy,scratch,test 7 | sonar.coverage.exclusions=test/** 8 | sonar.python.coverage.reportPaths=coverage.xml 9 | sonar.sourceEncoding=UTF-8 10 | 11 | # SonarQube server 12 | sonar.host.url=SONAR_HOST_URL 13 | sonar.token=SONAR_TOKEN 14 | -------------------------------------------------------------------------------- /license.txt: -------------------------------------------------------------------------------- 1 | # Apache 2 license 2 | # https://apache.org/licenses/LICENSE-2.0 3 | 4 | # from Github's license chooser: 5 | # https://choosealicense.com/licenses/apache-2.0/ -------------------------------------------------------------------------------- /metcalcpy/LICENSE: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dtcenter/METcalcpy/b83da573f6fe46b1c4cfcefa51409a30e23996fb/metcalcpy/LICENSE -------------------------------------------------------------------------------- /metcalcpy/README: -------------------------------------------------------------------------------- 1 | This directory contains the directories of modules that make up the 2 | METcalcpy package. 3 | 4 | For each new module, create a new sub-directory under the METcalcpy/src directory. 5 | 6 | The METcalcpy/util directory contains modules that provide common functionality across multiple modules. 7 | The METcalcpy/pre_processing directory contains modules that provide common functionality for pre-processing data. 8 | The METcalcpy/diagnostics directory contains those modules useful for calculating diagnostics. 9 | 10 | Required packages: 11 | 12 | refer to the User's Manual on read the docs: 13 | https://metcalcpy.readthedocs.io/en/develop/Users_Guide/index.html 14 | 15 | -------------------------------------------------------------------------------- /metcalcpy/__init__.py: -------------------------------------------------------------------------------- 1 | # ============================* 2 | # ** Copyright UCAR (c) 2020 3 | # ** University Corporation for Atmospheric Research (UCAR) 4 | # ** National Center for Atmospheric Research (NCAR) 5 | # ** Research Applications Lab (RAL) 6 | # ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA 7 | # ============================* 8 | 9 | 10 | 11 | """This module contains a variety of statistical calculations.""" 12 | GROUP_SEPARATOR = ':' 13 | DATE_TIME_REGEX = r'\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2}' 14 | -------------------------------------------------------------------------------- /metcalcpy/contributed/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dtcenter/METcalcpy/b83da573f6fe46b1c4cfcefa51409a30e23996fb/metcalcpy/contributed/__init__.py -------------------------------------------------------------------------------- /metcalcpy/contributed/blocking_weather_regime/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dtcenter/METcalcpy/b83da573f6fe46b1c4cfcefa51409a30e23996fb/metcalcpy/contributed/blocking_weather_regime/__init__.py -------------------------------------------------------------------------------- /metcalcpy/contributed/mjo_enso/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dtcenter/METcalcpy/b83da573f6fe46b1c4cfcefa51409a30e23996fb/metcalcpy/contributed/mjo_enso/__init__.py -------------------------------------------------------------------------------- /metcalcpy/contributed/rmm_omi/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dtcenter/METcalcpy/b83da573f6fe46b1c4cfcefa51409a30e23996fb/metcalcpy/contributed/rmm_omi/__init__.py -------------------------------------------------------------------------------- /metcalcpy/contributed/spacetime/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dtcenter/METcalcpy/b83da573f6fe46b1c4cfcefa51409a30e23996fb/metcalcpy/contributed/spacetime/__init__.py -------------------------------------------------------------------------------- /metcalcpy/contributed/spacetime/spacetime_utils.py: -------------------------------------------------------------------------------- 1 | # ============================* 2 | # ** Copyright UCAR (c) 2020 3 | # ** University Corporation for Atmospheric Research (UCAR) 4 | # ** National Center for Atmospheric Research (NCAR) 5 | # ** Research Applications Lab (RAL) 6 | # ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA 7 | # ============================* 8 | 9 | 10 | 11 | """ 12 | This is a collection of utility functions. 13 | 14 | List of functions: 15 | 16 | save_Spectra: 17 | Save space-time spectra for plottting. 18 | 19 | lonFlip: 20 | Flip longitudes from -180:180 to 0:360 or vice versa. 21 | 22 | """ 23 | import os 24 | import numpy as np 25 | import xarray as xr 26 | from netCDF4 import Dataset 27 | 28 | 29 | def save_Spectra(STCin, freq_in, wnum_in, filename, filepath, opt=False): 30 | fname = filename + '.nc' 31 | full_filename = os.path.join(filepath, fname) 32 | nc = Dataset(full_filename, 'w', format='NETCDF4') 33 | 34 | nvar, nfrq, nwave = STCin.shape 35 | # dimensions 36 | nc.createDimension('freq', nfrq) 37 | nc.createDimension('wnum', nwave) 38 | nc.createDimension('var', nvar) 39 | 40 | # variables 41 | freq = nc.createVariable('freq', 'double', ('freq',)) 42 | wnum = nc.createVariable('wnum', 'int', ('wnum',)) 43 | var = nc.createVariable('var', 'int', ('var',)) 44 | STC = nc.createVariable('STC', 'double', ('var', 'freq', 'wnum',)) 45 | 46 | # attributes 47 | STC.varnames = ['PX', 'PY', 'CXY', 'QXY', 'COH2', 'PHA', 'V1', 'V2'] 48 | STC.long_name = "Space time spectra" 49 | freq.units = "cpd" 50 | freq.long_name = "frequency" 51 | wnum.units = "" 52 | wnum.long_name = "zonal wavenumber" 53 | var.long_name = "variable number" 54 | 55 | # data 56 | var[:] = np.linspace(0, nvar - 1, nvar) 57 | freq[:] = freq_in 58 | wnum[:] = wnum_in 59 | STC[:, :, :] = STCin 60 | 61 | nc.close() 62 | 63 | 64 | def lonFlip(data,lon): 65 | """ 66 | Change the longitude coordinates from -180:180 to 0:360 or vice versa. 67 | :param data: Input xarray data array (time x lat x lon). 68 | :param lon: Longitude array of the input data. 69 | :return: dataflip 70 | """ 71 | 72 | lonnew = lon.values 73 | 74 | if lonnew.min() < 0: 75 | # change longitude to 0:360 76 | ilonneg = np.where(lon<0) 77 | nlonneg = len(ilonneg[0]) 78 | ilonpos = np.where(lon>=0) 79 | nlonpos = len(ilonpos[0]) 80 | 81 | lonnew[0:nlonpos] = lon[ilonpos[0]].values 82 | lonnew[nlonpos:] = lon[ilonneg[0]].values + 360 83 | 84 | dataflip = xr.DataArray(np.roll(data, nlonneg, axis=2), 85 | dims=data.dims, 86 | coords={'time': data['time'], 'lat': data['lat'], 87 | 'lon': lonnew}) 88 | 89 | else: 90 | # change longitude to -180:180 91 | ilonneg = np.where(lon >= 180) 92 | nlonneg = len(ilonneg[0]) 93 | ilonpos = np.where(lon < 180) 94 | nlonpos = len(ilonpos[0]) 95 | 96 | lonnew[0:nlonneg] = lon[ilonneg[0]].values - 360 97 | lonnew[nlonneg:] = lon[ilonpos[0]].values 98 | 99 | dataflip = xr.DataArray(np.roll(data, nlonpos, axis=2), 100 | dims=data.dims, 101 | coords={'time': data['time'], 102 | 'lat': data['lat'], 'lon': lonnew}) 103 | 104 | return dataflip 105 | -------------------------------------------------------------------------------- /metcalcpy/contributed/tropical_diagnostics/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dtcenter/METcalcpy/b83da573f6fe46b1c4cfcefa51409a30e23996fb/metcalcpy/contributed/tropical_diagnostics/__init__.py -------------------------------------------------------------------------------- /metcalcpy/contributed/tropical_diagnostics/compute_omi_example.py: -------------------------------------------------------------------------------- 1 | # ============================* 2 | # ** Copyright UCAR (c) 2020 3 | # ** University Corporation for Atmospheric Research (UCAR) 4 | # ** National Center for Atmospheric Research (NCAR) 5 | # ** Research Applications Lab (RAL) 6 | # ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA 7 | # ============================* 8 | 9 | 10 | 11 | """ 12 | Compute OMI index from input OLR data. 13 | """ 14 | 15 | import numpy as np 16 | import xarray as xr 17 | import datetime 18 | import pandas as pd 19 | 20 | import compute_mjo_indices as cmi 21 | from plot_mjo_indices import phase_diagram 22 | 23 | # set dates to read 24 | datestrt = '1979-01-01' 25 | datelast = '2012-12-31' 26 | 27 | spd = 1 # number of obs per day 28 | time = np.arange(datestrt,datelast, dtype='datetime64[D]') 29 | ntim = len(time) 30 | 31 | # read OLR from file 32 | ds = xr.open_dataset('/data/mgehne/OLR/olr.1x.7920.nc') 33 | olr = ds['olr'].sel(lat=slice(-20,20),time=slice(datestrt,datelast)) 34 | lat = ds['lat'].sel(lat=slice(-20,20)) 35 | lon = ds['lon'] 36 | print(olr.min(), olr.max()) 37 | 38 | # project OLR onto EOFs 39 | PC1, PC2 = cmi.omi(olr[0:ntim,:,:], time, spd, './data/') 40 | 41 | print(PC1.min(), PC1.max()) 42 | 43 | # set dates to plot 44 | datestrt = '2012-01-01' 45 | datelast = '2012-03-31' 46 | 47 | time = np.arange(datestrt,datelast, dtype='datetime64[D]') 48 | ntim = len(time) 49 | PC1 = PC1.sel(time=slice(datestrt,datelast)) 50 | PC2 = PC2.sel(time=slice(datestrt,datelast)) 51 | PC1 = PC1[0:ntim] 52 | PC2 = PC2[0:ntim] 53 | 54 | months = [] 55 | days = [] 56 | for idx, val in enumerate(time): 57 | date = pd.to_datetime(val).timetuple() 58 | month = date.tm_mon 59 | day = date.tm_mday 60 | months.append(month) 61 | days.append(day) 62 | 63 | # plot the PC phase diagram 64 | phase_diagram('OMI',PC1,PC2,time,months,days,'OMI_comp_phase','png') -------------------------------------------------------------------------------- /metcalcpy/contributed/tropical_diagnostics/compute_rmm_example.py: -------------------------------------------------------------------------------- 1 | # ============================* 2 | # ** Copyright UCAR (c) 2020 3 | # ** University Corporation for Atmospheric Research (UCAR) 4 | # ** National Center for Atmospheric Research (NCAR) 5 | # ** Research Applications Lab (RAL) 6 | # ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA 7 | # ============================* 8 | 9 | 10 | 11 | """ 12 | Compute RMM index from input U850, U200 and OLR data. Data is averaged from 20S-20N 13 | """ 14 | 15 | import numpy as np 16 | import xarray as xr 17 | import datetime 18 | import pandas as pd 19 | 20 | import compute_mjo_indices as cmi 21 | import plot_mjo_indices as pmi 22 | 23 | # set dates to read 24 | datestrt = '2000-01-01' 25 | datelast = '2002-12-31' 26 | 27 | spd = 1 # number of obs per day 28 | time = np.arange(datestrt,datelast, dtype='datetime64[D]') 29 | ntim = len(time) 30 | 31 | ####################################### 32 | # read RMM EOFs from file and plot 33 | EOF1, EOF2 = cmi.read_rmm_eofs('./data/') 34 | pmi.plot_rmm_eofs(EOF1, EOF2, 'RMM_EOFs','png') 35 | 36 | ####################################### 37 | # read data from file 38 | ds = xr.open_dataset('/data/mgehne/OLR/olr.1x.7920.anom7901.nc') 39 | olr = ds['olr'].sel(lat=slice(-15,15),time=slice(datestrt,datelast)) 40 | lon = ds['lon'] 41 | olr = olr.mean('lat') 42 | print(olr.min(), olr.max()) 43 | 44 | ds = xr.open_dataset('/data/mgehne/ERAI/uwnd.erai.an.2p5.850.daily.anom7901.nc') 45 | u850 = ds['uwnd'].sel(lat=slice(-15,15),time=slice(datestrt,datelast)) 46 | u850 = u850.mean('lat') 47 | print(u850.min(), u850.max()) 48 | 49 | ds = xr.open_dataset('/data/mgehne/ERAI/uwnd.erai.an.2p5.200.daily.anom7901.nc') 50 | u200 = ds['uwnd'].sel(lat=slice(-15,15),time=slice(datestrt,datelast)) 51 | u200 = u200.mean('lat') 52 | print(u200.min(), u200.max()) 53 | 54 | ######################################## 55 | # project data onto EOFs 56 | PC1, PC2 = cmi.rmm(olr[0:ntim,:], u850[0:ntim,:], u200[0:ntim,:], time, spd, './data/') 57 | 58 | print(PC1.min(), PC1.max()) 59 | 60 | 61 | ######################################## 62 | # plot phase diagram 63 | datestrt = '2002-01-01' 64 | datelast = '2002-12-31' 65 | 66 | time = np.arange(datestrt,datelast, dtype='datetime64[D]') 67 | ntim = len(time) 68 | PC1 = PC1.sel(time=slice(datestrt,datelast)) 69 | PC2 = PC2.sel(time=slice(datestrt,datelast)) 70 | PC1 = PC1[0:ntim] 71 | PC2 = PC2[0:ntim] 72 | 73 | months = [] 74 | days = [] 75 | for idx, val in enumerate(time): 76 | date = pd.to_datetime(val).timetuple() 77 | month = date.tm_mon 78 | day = date.tm_mday 79 | months.append(month) 80 | days.append(day) 81 | 82 | # plot the PC phase diagram 83 | pmi.phase_diagram('RMM',PC1,PC2,time,months,days,'RMM_comp_phase','png') 84 | 85 | # plot PC time series 86 | pmi.pc_time_series('RMM',PC1,PC2,time,months,days,'RMM_time_series','png') -------------------------------------------------------------------------------- /metcalcpy/contributed/tropical_diagnostics/example_kelvin_activity.py: -------------------------------------------------------------------------------- 1 | # ============================* 2 | # ** Copyright UCAR (c) 2020 3 | # ** University Corporation for Atmospheric Research (UCAR) 4 | # ** National Center for Atmospheric Research (NCAR) 5 | # ** Research Applications Lab (RAL) 6 | # ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA 7 | # ============================* 8 | 9 | 10 | 11 | import numpy as np 12 | import xarray as xr 13 | """ 14 | local scripts, if loading from a different directory include that with a '.' between 15 | directory name and script name 16 | """ 17 | import ccew_activity as ccew 18 | 19 | """ 20 | Paths to plot and data directories. The annual EOF files for the waves are provided. The eofpath should point 21 | to the location of these files. The filenames need to match what is being read in in ccew_activity.waveact 22 | """ 23 | # plotpath = '../plots/' 24 | # eofpath = '../data/EOF/' 25 | # datapath = '../data/' 26 | plotpath = '/Users/fillmore/working/' 27 | eofpath = '/Volumes/d1/fillmore/Data/TropicalDiagnostics/' 28 | datapath = '/Volumes/d1/fillmore/Data/TropicalDiagnostics/' 29 | """ 30 | Parameters to set for plotting Kelvin activity index. 31 | """ 32 | wave = 'Kelvin' 33 | datestrt = '2015-12-01 00:00:00' 34 | datelast = '2016-03-31 13:00:00' 35 | 36 | 37 | print("reading ERAI data from file:") 38 | # spd = 1 39 | spd = 2 40 | ds = xr.open_dataset(datapath+'/precip.erai.sfc.1p0.'+str(spd)+'x.2014-2016.nc') 41 | A = ds.precip 42 | print("extracting time period:") 43 | A = A.sel(time=slice(datestrt, datelast)) 44 | A = A.squeeze() 45 | timeA = ds.time.sel(time=slice(datestrt, datelast)) 46 | ds.close() 47 | A = A * 1000/4 48 | A.attrs['units'] = 'mm/d' 49 | 50 | print("project data onto wave EOFs") 51 | waveactA = ccew.waveact(A, wave, eofpath, spd, '1p0', 180, 'annual') 52 | print(waveactA.min(), waveactA.max()) 53 | 54 | 55 | print("reading observed precipitation data from file:") 56 | # spd = 1 57 | spd = 2 58 | ds = xr.open_dataset(datapath+'/precip.trmm.'+str(spd)+'x.1p0.v7a.fillmiss.comp.2014-2016.nc') 59 | B = ds.precip 60 | print("extracting time period:") 61 | B = B.sel(time=slice(datestrt, datelast)) 62 | B = B.squeeze() 63 | timeB = ds.time.sel(time=slice(datestrt, datelast)) 64 | ds.close() 65 | B.attrs['units'] = 'mm/d' 66 | 67 | print("project data onto wave EOFs") 68 | waveactB = ccew.waveact(B, wave, eofpath, spd, '1p0', 180, 'annual') 69 | print(waveactB.min(), waveactB.max()) 70 | 71 | exps = [0, 1] 72 | explabels = ['trmm', 'erai'] 73 | nexps = len(exps) 74 | 75 | print("computing skill") 76 | skill = ccew.wave_skill(B) 77 | 78 | 79 | ##### maybe this next routine needs to be moved to METplotpy? ############ 80 | # ccew.plot_skill(skill, wave, explabels, plotpath) 81 | -------------------------------------------------------------------------------- /metcalcpy/contributed/tropical_diagnostics/phase_diagram_example.py: -------------------------------------------------------------------------------- 1 | # ============================* 2 | # ** Copyright UCAR (c) 2020 3 | # ** University Corporation for Atmospheric Research (UCAR) 4 | # ** National Center for Atmospheric Research (NCAR) 5 | # ** Research Applications Lab (RAL) 6 | # ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA 7 | # ============================* 8 | 9 | 10 | 11 | """ 12 | Read in OMI or RMM indices and plot phase diagram for specified dates. OMI values 13 | can be obtained from https://psl.noaa.gov/mjo/, RMM values can be obtained from 14 | http://www.bom.gov.au/climate/mjo/graphics/rmm.74toRealtime.txt 15 | """ 16 | 17 | import numpy as np 18 | import pandas as pd 19 | import datetime 20 | 21 | from plot_mjo_indices import phase_diagram 22 | 23 | # which index are we plotting 24 | indexname = 'RMM' # 'RMM' or 'OMI' 25 | 26 | # set dates to read and plot 27 | datestrt = 20120101 28 | datelast = 20120331 29 | 30 | # read data from text file 31 | if indexname=='OMI': 32 | data = pd.read_csv('./data/omi.1x.txt', header=None, delim_whitespace=True, names=['yyyy','mm','dd','hh','pc1','pc2','amp']) 33 | elif indexname=='RMM': 34 | data = pd.read_csv('./data/rmm.1x.txt', header=None, delim_whitespace=True, names=['yyyy','mm','dd', 'pc1','pc2','phase','amp','source']) 35 | 36 | DATES = data.yyyy.values*10000 + data.mm.values*100 + data.dd.values 37 | MONTHS = data.mm.values 38 | DAYS = data.dd.values 39 | #print(dates) 40 | 41 | istrt = np.where(DATES==datestrt)[0][0] 42 | ilast = np.where(DATES==datelast)[0][0] 43 | print(DATES[istrt], DATES[ilast]) 44 | #print(istrt, ilast) 45 | 46 | # subset data to only the dates we want to plot 47 | dates = DATES[istrt:ilast+1] 48 | months = MONTHS[istrt:ilast+1] 49 | days = DAYS[istrt:ilast+1] 50 | print(dates.min(), dates.max()) 51 | PC1 = data.pc1.values[istrt:ilast+1] 52 | PC2 = data.pc2.values[istrt:ilast+1] 53 | 54 | # plot the phase diagram 55 | phase_diagram(indexname,PC1,PC2,dates,months,days,indexname+'_phase','png') -------------------------------------------------------------------------------- /metcalcpy/contributed/tropical_diagnostics/readme.md: -------------------------------------------------------------------------------- 1 | # tropical-diagnostics 2 | Python scripts for tropical diagnostics of NWP forecasts. 3 | 4 | The diagnostics are meant to be applied to gridded forecast data and example scripts are provided to show how 5 | to apply the diagnostics at different lead times. 6 | 7 | Required model output is primarily precipitation. This is enough to compute Hovmoeller diagrams and 8 | compare to observations and to project onto the convectively coupled equatorial wave (CCEW) EOFs to 9 | analyze CCEW activity and skill in model forecasts. 10 | 11 | To compute cross-spectra between precipitation and dynamical variables, single level dynamical fields are also 12 | required. In the example coherence spectra between divergence (850hPa or 200hPa) and precipitation are considered. 13 | 14 | ## tropical_diagnostics 15 | Contains the functions and modules necessary to compute the various diagnostics. The main diagnostics 16 | included are: 17 | 18 | ### Hovmoeller diagrams 19 | Functions to compute hovmoeller latitudinal averages and pattern correlation are included in 20 | **hovmoeller_calc.py**. Plotting routines are included in **hovmoeller_plotly.py**. The driver script is 21 | **example_hovmoeller.py**, this reads in data from the default data directory ../data and computes latitude 22 | averages and calls the plotting routine. 23 | 24 | ### Space-time spectra 25 | Functions for computing 2D Fourier transforms and 2D power and cross-spectra are included in **spacetime.py**. 26 | To plot the spectra **spacetime_plot.py** uses pyngl, which is based on NCL and provides similar control 27 | over plotting resources. The driver script is **example_cross_spectra.py**, this reads in data from the default 28 | data directory ../data and computes cross-spectral estimates. The output is saved as netcdf and the output directory 29 | needs to be specified. The driver script for plotting is **example_cross_spectra_plot.py**, this reads the output 30 | files from **example_cross_spectra.py** and calls the plotting routine. 31 | 32 | ### CCEW activity and skill 33 | Functions to project precipitation (either from model output or observations) onto CCEW EOF patterns and 34 | compute wave activity and a CCEW skill score are included in **ccew_activity.py**. Also included are routines 35 | to plot the activity and the skill compared to observations. The EOFs are provided on a 1 degree lat-lon grid, the 36 | path to the location of the EOF files needs to be specified in the driver script **example_kelvin_activity.py**. This 37 | script calls the routines to compute the activity index and the plotting routines. -------------------------------------------------------------------------------- /metcalcpy/contributed/tropical_diagnostics/utils.py: -------------------------------------------------------------------------------- 1 | # ============================* 2 | # ** Copyright UCAR (c) 2020 3 | # ** University Corporation for Atmospheric Research (UCAR) 4 | # ** National Center for Atmospheric Research (NCAR) 5 | # ** Research Applications Lab (RAL) 6 | # ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA 7 | # ============================* 8 | 9 | 10 | 11 | """ 12 | This is a collection of utility functions. 13 | 14 | List of functions: 15 | 16 | save_Spectra: 17 | Save space-time spectra for plottting. 18 | 19 | lonFlip: 20 | Flip longitudes from -180:180 to 0:360 or vice versa. 21 | 22 | """ 23 | 24 | import numpy as np 25 | import xarray as xr 26 | from netCDF4 import Dataset 27 | 28 | 29 | def save_Spectra(STCin, freq_in, wnum_in, filename, filepath, opt=False): 30 | nc = Dataset(filepath + filename + '.nc', 'w', format='NETCDF4') 31 | 32 | nvar, nfrq, nwave = STCin.shape 33 | # dimensions 34 | nc.createDimension('freq', nfrq) 35 | nc.createDimension('wnum', nwave) 36 | nc.createDimension('var', nvar) 37 | 38 | # variables 39 | freq = nc.createVariable('freq', 'double', ('freq',)) 40 | wnum = nc.createVariable('wnum', 'int', ('wnum',)) 41 | var = nc.createVariable('var', 'int', ('var',)) 42 | STC = nc.createVariable('STC', 'double', ('var', 'freq', 'wnum',)) 43 | 44 | # attributes 45 | STC.varnames = ['PX', 'PY', 'CXY', 'QXY', 'COH2', 'PHA', 'V1', 'V2'] 46 | STC.long_name = "Space time spectra" 47 | freq.units = "cpd" 48 | freq.long_name = "frequency" 49 | wnum.units = "" 50 | wnum.long_name = "zonal wavenumber" 51 | var.long_name = "variable number" 52 | 53 | # data 54 | var[:] = np.linspace(0, nvar - 1, nvar) 55 | freq[:] = freq_in 56 | wnum[:] = wnum_in 57 | STC[:, :, :] = STCin 58 | 59 | nc.close() 60 | 61 | 62 | def lonFlip(data,lon): 63 | """ 64 | Change the longitude coordinates from -180:180 to 0:360 or vice versa. 65 | :param data: Input xarray data array (time x lat x lon). 66 | :param lon: Longitude array of the input data. 67 | :return: dataflip 68 | """ 69 | 70 | lonnew = lon.values 71 | 72 | if lonnew.min() < 0: 73 | # change longitude to 0:360 74 | ilonneg = np.where(lon<0) 75 | nlonneg = len(ilonneg[0]) 76 | ilonpos = np.where(lon>=0) 77 | nlonpos = len(ilonpos[0]) 78 | 79 | lonnew[0:nlonpos] = lon[ilonpos[0]].values 80 | lonnew[nlonpos:] = lon[ilonneg[0]].values + 360 81 | 82 | dataflip = xr.DataArray(np.roll(data, nlonneg, axis=2), dims=data.dims, 83 | coords={'time': data['time'], 'lat': data['lat'], 'lon': lonnew}) 84 | 85 | else: 86 | # change longitude to -180:180 87 | ilonneg = np.where(lon >= 180) 88 | nlonneg = len(ilonneg[0]) 89 | ilonpos = np.where(lon < 180) 90 | nlonpos = len(ilonpos[0]) 91 | 92 | lonnew[0:nlonneg] = lon[ilonneg[0]].values - 360 93 | lonnew[nlonneg:] = lon[ilonpos[0]].values 94 | 95 | dataflip = xr.DataArray(np.roll(data, nlonpos, axis=2), dims=data.dims, 96 | coords={'time': data['time'], 'lat': data['lat'], 'lon': lonnew}) 97 | 98 | return dataflip 99 | -------------------------------------------------------------------------------- /metcalcpy/diagnostics/README: -------------------------------------------------------------------------------- 1 | This is where any useful diagnostics modules is located. 2 | -------------------------------------------------------------------------------- /metcalcpy/diagnostics/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dtcenter/METcalcpy/b83da573f6fe46b1c4cfcefa51409a30e23996fb/metcalcpy/diagnostics/__init__.py -------------------------------------------------------------------------------- /metcalcpy/diagnostics/land_surface.py: -------------------------------------------------------------------------------- 1 | """Diagnostics relevant to Land/Surface applications""" 2 | 3 | from xarray.core.dataarray import DataArray 4 | from pandas.core.series import Series 5 | 6 | def calc_tci(soil_data,sfc_flux_data,skipna=True): 7 | """ Function for computing the Terrestrial Coupling Index 8 | 9 | Args: 10 | soil_data (Xarray DataArray or Pandas Series): The moisture variable to use for computing TCI. 11 | sfc_flux_data (Xarray DataArray or Pandas Series): The latent heat flux variable to use for computing TCI. 12 | skipna (bool): Skip NA values. Passed to Pandas or Xarray. 13 | 14 | Returns: 15 | Xarray DataArray or float32: If Xarray DataArray's are passed, then an Xarray DataArray 16 | containing the gridded TCI is returned. If a Pandas Series is passed, then a single TCI 17 | value is returned. 18 | 19 | Raises: 20 | TypeError: If an unrecognized object type is passed, or the object types do not match. 21 | 22 | Reference: 23 | Dirmeyer, P. A., 2011: The terrestrial segment of soil moisture-climate coupling. *Geophys. Res. Lett.*, **38**, L16702, doi: 10.1029/2011GL048268. 24 | 25 | """ 26 | 27 | # For Xarray objects, compute the mean 28 | if isinstance(soil_data,DataArray) and isinstance(sfc_flux_data,DataArray): 29 | soil_mean = soil_data.mean(dim='time',skipna=skipna) 30 | soil_count = soil_data.count(dim='time') 31 | sfc_flux_mean = sfc_flux_data.mean(dim='time',skipna=skipna) 32 | soil_std = soil_data.std(dim='time',skipna=skipna) 33 | numer = ((soil_data-soil_mean) * (sfc_flux_data-sfc_flux_mean)).sum(dim='time',skipna=skipna) 34 | 35 | # For Pandas objects, compute the mean 36 | elif isinstance(soil_data,Series) and isinstance(sfc_flux_data,Series): 37 | soil_mean = soil_data.mean(skipna=skipna) 38 | soil_count = soil_data.count() 39 | sfc_flux_mean = sfc_flux_data.mean(skipna=skipna) 40 | soil_std = soil_data.std(skipna=skipna) 41 | numer = ((soil_data-soil_mean) * (sfc_flux_data-sfc_flux_mean)).sum(skipna=skipna) 42 | 43 | # No other object types are supported 44 | else: 45 | raise TypeError("Only Xarray DataArray or Pandas DataFrame Objects are supported. Input objects must be of the same type. Got "+str(type(soil_data))+" for soil_data and "+str(type(sfc_flux_data))+" for sfc_flux_data") 46 | 47 | # Compute the covariance term 48 | covarTerm = numer / soil_count 49 | 50 | # Return the Terrestrial Coupling Index (TCI) 51 | return covarTerm/soil_std 52 | 53 | -------------------------------------------------------------------------------- /metcalcpy/event_equalize_against_values.py: -------------------------------------------------------------------------------- 1 | # ============================* 2 | # ** Copyright UCAR (c) 2020 3 | # ** University Corporation for Atmospheric Research (UCAR) 4 | # ** National Center for Atmospheric Research (NCAR) 5 | # ** Research Applications Lab (RAL) 6 | # ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA 7 | # ============================* 8 | 9 | 10 | 11 | """ 12 | Program Name: event_equalize_against_values.py 13 | """ 14 | 15 | import pandas as pd 16 | from metcalcpy.util.safe_log import safe_log 17 | __author__ = 'Tatiana Burek' 18 | __version__ = '0.1.0' 19 | 20 | 21 | def event_equalize_against_values(series_data, input_unique_cases, logger=None): 22 | """Performs event equalisation. 23 | 24 | event_equalize_against_values assumes that the input series_data contains data 25 | indexed by fcst_valid, series values and the independent variable values. 26 | It builds a new data frame which contains the same 27 | data except for records that don't have corresponding fcst_valid 28 | and fcst_lead values from ee_stats_equalize 29 | 30 | Args: 31 | series_data: data frame containing the records to equalize, including fcst_valid_beg, series 32 | values and independent variable values 33 | 34 | input_unique_cases: unique cases to equalize against 35 | Returns: 36 | A data frame that contains equalized records or empty frame 37 | """ 38 | 39 | warning_remove = "WARNING: event equalization removed {} rows" 40 | 41 | safe_log(logger, "info", "Starting event equalization.") 42 | 43 | column_names = list(series_data) 44 | 45 | if 'fcst_valid' in column_names: 46 | # always use fcst_valid for equalization 47 | # create a unique member to use for equalization 48 | series_data.insert(len(series_data.columns), 'equalize', 49 | series_data['fcst_valid'].astype(str) 50 | + ' ' 51 | + series_data['fcst_lead'].astype(str)) 52 | else: 53 | safe_log(logger, "warning", "WARNING: eventEqualize() did not run due to lack of valid time field.") 54 | print("WARNING: eventEqualize() did not run due to lack of valid time field") 55 | return pd.DataFrame() 56 | 57 | # create an equalized set of data for the minimal list of dates based on the input cases 58 | data_for_unique_cases = series_data[(series_data['equalize'].isin(input_unique_cases))] 59 | n_row_cases = len(data_for_unique_cases) 60 | if n_row_cases == 0: 61 | safe_log(logger, "warning", "WARNING: discarding all members. No matching cases found.") 62 | print(" WARNING: discarding all members") 63 | return pd.DataFrame() 64 | 65 | n_row_ = len(series_data) 66 | if n_row_cases != n_row_: 67 | safe_log(logger, "warning", warning_remove.format(n_row_ - n_row_cases)) 68 | print(warning_remove.format(n_row_ - n_row_cases)) 69 | 70 | # remove 'equalize' column 71 | data_for_unique_cases = data_for_unique_cases.drop(['equalize'], axis=1) 72 | safe_log(logger, "info", "Event equalization completed successfully.") 73 | return data_for_unique_cases 74 | -------------------------------------------------------------------------------- /metcalcpy/logging_config.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sys 4 | import getpass 5 | import signal 6 | import time 7 | 8 | class UserIDFormatter(logging.Formatter): 9 | """ 10 | Custom formatter to add user_id in place of the logger name. 11 | """ 12 | def __init__(self, user_id, fmt=None, datefmt=None): 13 | super().__init__(fmt, datefmt) 14 | self.user_id = user_id 15 | 16 | def format(self, record): 17 | # Override the 'name' attribute with user_id 18 | record.name = self.user_id 19 | return super().format(record) 20 | 21 | def handle_signals(signum, frame): 22 | """ 23 | Handle signals to perform clean shutdown or other custom actions. 24 | """ 25 | logger = logging.getLogger() 26 | logger.warning(f'Received signal {signal.strsignal(signum)}. Shutting down.') 27 | sys.exit(0) 28 | 29 | def setup_logging(config_params): 30 | """ 31 | Set up logging based on the configuration from a YAML file. 32 | 33 | Args: 34 | config_params (dict): The dictionary containing logging configuration (log directory, filename, level). 35 | 36 | Returns: 37 | logger (logging.Logger): Configured logger. 38 | """ 39 | # Get user ID and command line 40 | user_id = getpass.getuser() 41 | command_line = " ".join(sys.argv) 42 | # Create log directory if it doesn't exist, using the path from the config 43 | log_dir = config_params.get('log_dir') # No default here, expect it from YAML 44 | if not log_dir: 45 | log_dir = './logs' # Set default only if not provided 46 | if not os.path.exists(log_dir): 47 | os.makedirs(log_dir) 48 | 49 | # Set log filename, incorporating the log directory path from the config 50 | log_filename = config_params.get('log_filename') # No default here, expect it from YAML 51 | if not log_filename: 52 | #log_filename = 'application.log' # Set default only if not provided 53 | return None 54 | log_file = os.path.join(log_dir, log_filename) 55 | 56 | # Set log level from YAML or use default; convert to appropriate logging level 57 | log_level = config_params.get('log_level') # No default here, expect it from YAML 58 | if not log_level: 59 | log_level = 'WARNING' # Set default only if not provided 60 | log_level = log_level.upper() 61 | 62 | 63 | # Create a custom formatter that uses UTC for date and includes user_id instead of logger name 64 | # Add ' UTC' to the format string for the time 65 | formatter = UserIDFormatter( 66 | user_id=user_id, 67 | fmt='%(asctime)s UTC - %(name)s - %(levelname)s - %(message)s', 68 | datefmt='%Y-%m-%d %H:%M:%S' 69 | ) 70 | 71 | # Set up logging to write to a file 72 | file_handler = logging.FileHandler(log_file) 73 | file_handler.setLevel(getattr(logging, log_level, logging.INFO)) 74 | file_handler.setFormatter(formatter) 75 | 76 | logger = logging.getLogger() 77 | logger.setLevel(getattr(logging, log_level, logging.INFO)) 78 | logger.addHandler(file_handler) 79 | 80 | # Set logger to use UTC time 81 | logging.Formatter.converter = time.gmtime 82 | 83 | # Register signal handlers for graceful shutdown 84 | signal.signal(signal.SIGINT, handle_signals) 85 | signal.signal(signal.SIGTERM, handle_signals) 86 | 87 | logger.info(f"User: {user_id} has started the script with command: {command_line}") 88 | 89 | return logger 90 | 91 | -------------------------------------------------------------------------------- /metcalcpy/piecewise_linear.py: -------------------------------------------------------------------------------- 1 | # ============================* 2 | # ** Copyright UCAR (c) 2020 3 | # ** University Corporation for Atmospheric Research (UCAR) 4 | # ** National Center for Atmospheric Research (NCAR) 5 | # ** Research Applications Lab (RAL) 6 | # ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA 7 | # ============================* 8 | 9 | 10 | 11 | # -*- coding: utf-8 -*- 12 | """ 13 | Piecewise linear function class. 14 | 15 | """ 16 | 17 | import numpy as np 18 | from metcalcpy.util.safe_log import safe_log 19 | 20 | __author__ = 'Bill Campbell (NRL)' 21 | __version__ = '0.1.0' 22 | 23 | 24 | class IncompatibleLengths(Exception): 25 | """Custom exception for PiecewiseLinear input checking.""" 26 | 27 | 28 | class UnsortedArray(Exception): 29 | """Custom exception for PiecewiseLinear input checking.""" 30 | 31 | 32 | class PiecewiseLinear(): 33 | """ 34 | Defines a piecewise linear function with a given domain and range. 35 | 36 | Xdomain is a numpy array of knot locations. Yrange is a numpy array. 37 | """ 38 | 39 | def __init__(self, x_domain, y_range, xunits='feet', 40 | left=np.nan, right=np.nan, name="", logger=None): 41 | 42 | self.logger = logger 43 | len_x = len(x_domain) 44 | if len_x < 2: 45 | safe_log(logger, "error", f'X_domain (in {xunits})') 46 | raise IncompatibleLengths('Length of xdomain must be at least 2.') 47 | if np.any(np.diff(x_domain)) < 0: 48 | safe_log(logger, "error", "X_domain (in {}) is {}".format(xunits, x_domain)) 49 | print("X_domain (in {}) is {}".format(xunits, x_domain)) 50 | raise UnsortedArray('Xdomain must be sorted in ascending order.') 51 | len_y = len(y_range) 52 | if len_x != len_y: 53 | safe_log(logger, "error", "X_domain and Y_range must have the same length.") 54 | raise IncompatibleLengths('X_domain and Y_range must have same ' + 55 | 'length.\n Use left and right to set ' + 56 | 'value for points outside the x_domain\n') 57 | self.x_domain = np.array(x_domain) 58 | self.y_range = np.array(y_range) 59 | self.xunits = xunits 60 | self.left = left 61 | self.right = right 62 | self.name = name 63 | 64 | def get_ymax(self): 65 | """Find maximum of envelope function""" 66 | return np.max(self.y_range) 67 | 68 | def values(self, xinput): 69 | """ 70 | Evaluate piecewise linear function for the set of points in xinput. 71 | 72 | xinput is a set of points inside xdomain. 73 | """ 74 | if not isinstance(xinput, np.ndarray): 75 | xinput = np.array(xinput) 76 | xin_shape = np.shape(xinput) 77 | # Treat xinput as one-dimensional for use by np.interp 78 | xview = xinput.ravel() 79 | yflat = np.interp(xview, self.x_domain, self.y_range, 80 | left=self.left, right=self.right) 81 | # Restore output to same shape as input 82 | youtput = np.reshape(yflat, xin_shape) 83 | 84 | return youtput 85 | 86 | if __name__ == "__main__": 87 | pass 88 | -------------------------------------------------------------------------------- /metcalcpy/pre_processing/README: -------------------------------------------------------------------------------- 1 | This is where pre-processing modules will reside. 2 | -------------------------------------------------------------------------------- /metcalcpy/pre_processing/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dtcenter/METcalcpy/b83da573f6fe46b1c4cfcefa51409a30e23996fb/metcalcpy/pre_processing/__init__.py -------------------------------------------------------------------------------- /metcalcpy/pre_processing/aggregation/.gitignore: -------------------------------------------------------------------------------- 1 | workdir/ 2 | temp/ 3 | plots/ 4 | -------------------------------------------------------------------------------- /metcalcpy/pre_processing/aggregation/config/config_agg_stat.yaml: -------------------------------------------------------------------------------- 1 | agg_stat_input: /path-to-METcalcpy-base/test/data/rrfs_ecnt_for_agg.data 2 | agg_stat_output: /path-to/ecnt_aggregated.data 3 | alpha: 0.05 4 | append_to_file: null 5 | circular_block_bootstrap: True 6 | derived_series_1: [] 7 | derived_series_2: [] 8 | event_equal: False 9 | fcst_var_val_1: 10 | TMP: 11 | - ECNT_RMSE 12 | - ECNT_SPREAD_PLUS_OERR 13 | fcst_var_val_2: {} 14 | indy_vals: 15 | - '30000' 16 | - '60000' 17 | - '90000' 18 | - '120000' 19 | - '150000' 20 | - '160000' 21 | - '170000' 22 | - '180000' 23 | - '200000' 24 | - '240000' 25 | - '270000' 26 | - '300000' 27 | - '330000' 28 | - '340000' 29 | - '360000' 30 | indy_var: fcst_lead 31 | line_type: ecnt 32 | list_stat_1: 33 | - ECNT_RMSE 34 | - ECNT_SPREAD_PLUS_OERR 35 | list_stat_2: [] 36 | method: perc 37 | num_iterations: 1 38 | num_threads: -1 39 | random_seed: null 40 | series_val_1: 41 | model: 42 | - RRFS_GEFS_GF.SPP.SPPT 43 | series_val_2: {} 44 | -------------------------------------------------------------------------------- /metcalcpy/pre_processing/aggregation/config/config_aggregation_preprocessor.yaml: -------------------------------------------------------------------------------- 1 | prefix: "/scratch2/BMC/fv3lam/HIWT/expt_dirs/RRFS_GDAS_GF.SPP.SPPT_20220501-06/" 2 | suffix: "/metprd/grid_stat_cmn" 3 | dates: 4 | - '2022050100' 5 | - '2022050200' 6 | - '2022050300' 7 | - '2022050400' 8 | - '2022050500' 9 | - '2022050600' 10 | members: 11 | - 'mem01' 12 | - 'mem02' 13 | - 'mem03' 14 | - 'mem04' 15 | - 'mem05' 16 | - 'mem06' 17 | - 'mem07' 18 | - 'mem08' 19 | - 'mem09' 20 | - 'mem10' 21 | group_members: False 22 | group_name: "RRFS_GDAS_GF.SPP_agg" 23 | output_xml_file: "point_stat.xml" 24 | output_yaml_file: "point_stat.yaml" 25 | output_reformatted_file: "grid_stat_reformatted.txt" 26 | output_aggregate_file: "grid_stat_reformatted.agg.txt" 27 | metdataio_dir: "/home/Vanderlei.Vargas/Packages/METdataio" 28 | fcst_var: 29 | - APCP_03 30 | fcst_thresh: 31 | - ">0.0" 32 | list_stat: 33 | - GSS 34 | log_file: log.agg_wflow 35 | -------------------------------------------------------------------------------- /metcalcpy/pre_processing/aggregation/config/config_plot_cmn.yaml: -------------------------------------------------------------------------------- 1 | # General plot appearance and settings 2 | alpha: 0.05 3 | colors: 4 | - '#32cd32' # LimeGreen 5 | - '#ff6347' # Tomato 6 | - '#4682b4' # SteelBlue 7 | - '#ffa500' # Orange 8 | - '#6a5acd' # SlateBlue 9 | - '#20b2aa' # LightSeaGreen 10 | - '#4682b4' # SteelBlue 11 | - '#ffa500' # Orange 12 | - '#6a5acd' # SlateBlue 13 | - '#20b2aa' # LightSeaGreen 14 | plot_height: 8.5 15 | plot_res: 72 16 | plot_units: in 17 | plot_width: 11.0 18 | plot_caption: '' 19 | plot_filename: ./test.png 20 | title: Performance Diagram (APCP3h - May 1st to 6th, 2022) 21 | title_align: 0.5 22 | title_offset: -2 23 | title_size: 1.8 24 | title_weight: 2.0 25 | 26 | # Grid configuration 27 | grid_col: '#cccccc' 28 | grid_lty: 3 29 | grid_lwd: 1 30 | grid_on: 'True' 31 | 32 | # Axis labels and tick configuration 33 | xaxis: Success Ratio 34 | xlab_offset: 0 35 | xlab_size: 1.75 36 | xlab_weight: 2 37 | xtlab_horiz: 0.5 38 | xtlab_orient: 2 39 | xtlab_size: 1.2 40 | yaxis_1: Probability of Detection (PODY) 41 | ylab_offset: 0 42 | ylab_size: 2.5 43 | ylab_weight: 1 44 | ytlab_orient: 1 45 | ytlab_size: 0.9 46 | 47 | # Caption settings 48 | caption_align: 0.0 49 | caption_col: '#333333' 50 | caption_offset: 30.0 51 | caption_size: 0.8 52 | caption_weight: 1 53 | 54 | # Legend configuration 55 | legend_box: o # 'o' likely stands for the shape or border style of the legend box 56 | legend_inset: 57 | x: 0.25 58 | y: -0.27 59 | legend_ncol: 3 60 | legend_size: 1.0 61 | user_legend: 62 | - 'mem01' 63 | - 'mem02' 64 | - 'mem03' 65 | - 'mem04' 66 | - 'mem05' 67 | - 'mem06' 68 | - 'mem07' 69 | - 'mem08' 70 | - 'mem09' 71 | - 'mem10' 72 | 73 | # Series plot settings 74 | plot_ci: 75 | - none 76 | - none 77 | - none 78 | - none 79 | - none 80 | - none 81 | - none 82 | - none 83 | - none 84 | - none 85 | plot_disp: 86 | - 'True' 87 | - 'True' 88 | - 'True' 89 | - 'True' 90 | - 'True' 91 | - 'True' 92 | - 'True' 93 | - 'True' 94 | - 'True' 95 | - 'True' 96 | series_order: 97 | - 1 98 | - 2 99 | - 3 100 | - 4 101 | - 5 102 | - 6 103 | - 7 104 | - 8 105 | - 9 106 | - 10 107 | series_line_style: 108 | - -- 109 | - -. 110 | - ':' 111 | - -- 112 | - ':' 113 | - -. 114 | - ':' 115 | - '-' 116 | - -. 117 | - ':' 118 | series_line_width: 119 | - 1 120 | - 1 121 | - 1 122 | - 1 123 | - 1 124 | - 1 125 | - 1 126 | - 1 127 | - 1 128 | - 1 129 | series_symbols: 130 | - o 131 | - ^ 132 | - s 133 | - d 134 | - o 135 | - ^ 136 | - . 137 | - d 138 | - o 139 | - ^ 140 | series_type: 141 | - joined lines 142 | 143 | # Model series values 144 | series_val_1: 145 | model: 146 | - RRFS_GDAS_GF.SPP.SPPT_mem01 147 | - RRFS_GDAS_GF.SPP.SPPT_mem02 148 | - RRFS_GDAS_GF.SPP.SPPT_mem03 149 | - RRFS_GDAS_GF.SPP.SPPT_mem04 150 | - RRFS_GDAS_GF.SPP.SPPT_mem05 151 | - RRFS_GDAS_GF.SPP.SPPT_mem06 152 | - RRFS_GDAS_GF.SPP.SPPT_mem07 153 | - RRFS_GDAS_GF.SPP.SPPT_mem08 154 | - RRFS_GDAS_GF.SPP.SPPT_mem09 155 | - RRFS_GDAS_GF.SPP.SPPT_mem10 156 | 157 | # Data input and derived series settings 158 | stat_input: ./output.txt 159 | fcst_var_val_1: 160 | APCP_03: 161 | - FBIAS 162 | derived_series_1: [] 163 | derived_series_2: [] 164 | series_val_2: {} 165 | stat_curve: None 166 | 167 | # Bootstrap and data dumping settings for METviewer compatibility 168 | circular_block_bootstrap: 'True' 169 | dump_points_1: 'False' 170 | add_point_thresholds: false 171 | create_html: 'False' 172 | 173 | # Independent variable settings for plotting 174 | indy_vals: 175 | - '30000' 176 | - '60000' 177 | - '90000' 178 | - '120000' 179 | - '150000' 180 | - '180000' 181 | - '210000' 182 | - '240000' 183 | - '270000' 184 | - '300000' 185 | - '330000' 186 | - '360000' 187 | indy_var: fcst_lead 188 | 189 | # Miscellaneous settings 190 | ensss_pts: -1 191 | ensss_pts_disp: 'True' 192 | equalize_by_indep: 'False' 193 | event_equal: false 194 | roc_ctc: true 195 | roc_pct: false 196 | show_nstats: 'False' 197 | show_signif: 198 | - 'False' 199 | -------------------------------------------------------------------------------- /metcalcpy/pre_processing/aggregation/config/custom_line.yaml: -------------------------------------------------------------------------------- 1 | indy_label: 2 | - '3' 3 | - '6' 4 | - '9' 5 | - '12' 6 | - '15' 7 | - '18' 8 | - '21' 9 | - '24' 10 | - '27' 11 | - '30' 12 | - '33' 13 | - '36' 14 | con_series: 15 | - 1 16 | - 1 17 | - 1 18 | - 1 19 | - 1 20 | - 1 21 | - 1 22 | - 1 23 | - 1 24 | - 1 25 | fcst_var_val_1: 26 | APCP_03: 27 | - GSS 28 | list_stat_1: 29 | - GSS 30 | series_type: 31 | - b 32 | - b 33 | - b 34 | - b 35 | - b 36 | - b 37 | - b 38 | - b 39 | - b 40 | - b 41 | stat_input: ./output.txt 42 | title: 'Frequency Bias: 3h Accumulated Precipitation > 0 mm (04 May 2022)' 43 | # Axis labels and tick configuration 44 | xaxis: Forecast Lead Time (h) 45 | xlab_offset: -20. 46 | xlab_size: 8 47 | xlab_weight: 1 48 | xtlab_horiz: 0.5 49 | xtlab_orient: 1 50 | xtlab_size: 0.9 51 | yaxis_1: Frequency Bias (mm) 52 | ylab_offset: -20. 53 | ylab_size: 8 54 | ylab_weight: 1 55 | ytlab_orient: 1 56 | ytlab_size: 0.9 57 | plot_filename: plot_FBIAS_APCP3h.png 58 | 59 | # Legend configuration 60 | legend_box: o # 'o' likely stands for the shape or border style of the legend box 61 | legend_inset: 62 | x: 0 63 | y: -0.2 64 | legend_ncol: 3 65 | legend_size: 1.0 66 | user_legend: 67 | - 'mem01' 68 | - 'mem02' 69 | - 'mem03' 70 | - 'mem04' 71 | - 'mem05' 72 | - 'mem06' 73 | - 'mem07' 74 | - 'mem08' 75 | - 'mem09' 76 | - 'mem10' 77 | ylim: [0.8,2.0] 78 | 79 | -------------------------------------------------------------------------------- /metcalcpy/pre_processing/aggregation/config/custom_performance_diagram.yaml: -------------------------------------------------------------------------------- 1 | title: "Performance Diagram" 2 | # Axis labels and tick configuration 3 | xaxis: Success Ratio 4 | xlab_offset: -0.1 5 | xlab_size: 1.5 6 | xlab_weight: 1 7 | xtlab_horiz: 0.5 8 | xtlab_orient: 1 9 | xtlab_size: 0.9 10 | yaxis_1: Probability of Detection (PODY) 11 | ylab_offset: -0.1 12 | ylab_size: 1.5 13 | ylab_weight: 1 14 | ytlab_orient: 1 15 | ytlab_size: 0.9 16 | plot_filename: plot_performance_diagram_APCP3h.png -------------------------------------------------------------------------------- /metcalcpy/pre_processing/aggregation/config/custom_taylor_diagram.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # custom config file to override some of the settings in the 3 | # default config file taylor_diagram_defaults.yaml 4 | stat_input: ./plot_dlwr_sample.data 5 | plot_filename: ./test_neg_and_pos_corr_plot.png 6 | taylor_show_gamma: True 7 | # Show only positive values of correlation 8 | taylor_voc: False 9 | legend_inset: 10 | x: 0.1 11 | y: -0.25 -------------------------------------------------------------------------------- /metcalcpy/pre_processing/aggregation/src/yaml_preprocessor.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import yaml 3 | 4 | def read_yaml_file(file_path): 5 | """Reads a YAML file and returns its contents as a dictionary.""" 6 | with open(file_path, 'r') as file: 7 | return yaml.safe_load(file) 8 | 9 | def write_yaml_file(data, file_path): 10 | """Writes a dictionary to a YAML file.""" 11 | with open(file_path, 'w') as file: 12 | yaml.dump(data, file, default_flow_style=False) 13 | 14 | def combine_configs(base_config_path, override_config_path): 15 | """Combines two configurations, giving preference to the override config.""" 16 | base_config = read_yaml_file(base_config_path) 17 | override_config = read_yaml_file(override_config_path) 18 | combined = base_config.copy() # Start with the base config 19 | combined.update(override_config) # Update with override config, overwriting base settings 20 | return combined 21 | 22 | def main(): 23 | parser = argparse.ArgumentParser(description="Combine two YAML configuration files.") 24 | parser.add_argument("base_config", help="The path to the base YAML configuration file.") 25 | parser.add_argument("override_config", help="The path to the YAML configuration file that will override the base config.") 26 | parser.add_argument("-o", "--output", default="combined_config.yaml", 27 | help="Output path for the combined YAML configuration file.") 28 | 29 | args = parser.parse_args() 30 | 31 | # Combine both configurations using provided arguments 32 | combined_config = combine_configs(args.base_config, args.override_config) 33 | 34 | # Write the combined configuration to the specified output file 35 | write_yaml_file(combined_config, args.output) 36 | print(f"Combined configuration written to {args.output}") 37 | 38 | if __name__ == "__main__": 39 | main() 40 | -------------------------------------------------------------------------------- /metcalcpy/pre_processing/aggregation/wrapper/environment.yaml: -------------------------------------------------------------------------------- 1 | 2 | METCALCPY: "/path/to/METcalcpy" 3 | METPLOTPY: "/path/to/METplotpy" 4 | AGG_WFLOW: "/path/to/METcalcpy/metcalcpy/pre_processing/aggregation" 5 | 6 | AGG_STAT_YAML: "../config/config_agg_stat.yaml" 7 | 8 | WORKDIR: "./workdir" 9 | 10 | AGG_STAT: True 11 | AGG_PREP: True 12 | 13 | # Plots 14 | PERFORMANCE_DIAGRAM: False 15 | LINE: True 16 | TAYLOR: False 17 | 18 | 19 | -------------------------------------------------------------------------------- /metcalcpy/pre_processing/directional_means.py: -------------------------------------------------------------------------------- 1 | # ============================* 2 | # ** Copyright UCAR (c) 2020 3 | # ** University Corporation for Atmospheric Research (UCAR) 4 | # ** National Center for Atmospheric Research (NCAR) 5 | # ** Research Applications Lab (RAL) 6 | # ** CIRES, Regents of the University of Colorado 7 | # ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA 8 | # ============================* 9 | 10 | 11 | 12 | import numpy as np 13 | import xarray as xr 14 | 15 | 16 | def zonal_mean(dat,dimvar='longitude'): 17 | """Compute the zonal mean. 18 | Parameters 19 | ---------- 20 | dat : `xarray.DataArray` or `xarray.Dataset` 21 | data containing a dimension that you want to compute a zonal mean on 22 | dimvar: Name of the dimension to compute the zonal mean. Longitude is the 23 | default if it's not specified 24 | Returns 25 | ------- 26 | `xarray.DataArray` or `xarray.Dataset` 27 | the mean across the zonal dimension 28 | """ 29 | return dat.mean(dimvar) 30 | 31 | 32 | def meridional_mean(dat, lat1, lat2, dimvar='latitude'): 33 | """Compute the cos(lat) weighted mean of a quantity between two latitudes. 34 | Parameters 35 | ---------- 36 | dat : `xarray.DataArray` or `xarray.Dataset` 37 | data containing a dimension that you want ot compute a meridional mean 38 | on that spans lat1 and lat2 39 | lat1 : float 40 | The beginning latitude limit of the band average. This should always be less 41 | than lat2 42 | lat2 : float 43 | The ending latitude limit of the band average. This should always be greater 44 | than lat1 45 | dimvar: Name of the dimension to compute the meridional mean. Latitude is the 46 | default if it's not specified 47 | Returns 48 | ------- 49 | `xarray.DataArray` or `xarray.Dataset` 50 | the weighted mean across the latitude dimension limited 51 | by lat1 and lat2 52 | """ 53 | 54 | # Check inputs 55 | if lat1 > lat2: 56 | raise ValueError('lat1 is greater than lat2, but it must be less than lat2') 57 | elif lat1 == lat2: 58 | raise ValueError('lat1 is equal to lat2, but it must be less than lat2') 59 | 60 | wgts = np.cos(np.deg2rad(dat[dimvar].where((dat[dimvar] >= lat1) & (dat[dimvar] <= lat2),drop=True))) 61 | 62 | return dat.where((dat[dimvar] >= lat1) & (dat[dimvar] <= lat2),drop=True).weighted(wgts).mean(dimvar) 63 | -------------------------------------------------------------------------------- /metcalcpy/util/README: -------------------------------------------------------------------------------- 1 | This directory contains all the utility code/code that 2 | is commonly used by multiple modules or packages, or 3 | can be used stand-alone. 4 | -------------------------------------------------------------------------------- /metcalcpy/util/README_util.rst: -------------------------------------------------------------------------------- 1 | :orphan: 2 | Module that contains other utilities that can be used for 'odds and ends' 3 | ========================================================================== 4 | Input: 5 | a list or numpy array (floats or ints) of longitudes from the range 0 to 360 6 | 7 | Output: 8 | a numpy array (float or integer, depending on input) of longitudes from -180 to 180 9 | 10 | 11 | 12 | convert_lons_indices() 13 | Input: 14 | lons_in: a list of longitudes to convert 15 | 16 | minlon_in: The minimum value/start value for converted longitudes 17 | 18 | 19 | 20 | Returns: 21 | 22 | reordered_lons: sorted array of longitudes 23 | 24 | lonsortlocs: sorted array indices 25 | 26 | -------------------------------------------------------------------------------- /metcalcpy/util/__init__.py: -------------------------------------------------------------------------------- 1 | # ============================* 2 | # ** Copyright UCAR (c) 2020 3 | # ** University Corporation for Atmospheric Research (UCAR) 4 | # ** National Center for Atmospheric Research (NCAR) 5 | # ** Research Applications Lab (RAL) 6 | # ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA 7 | # ============================* 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /metcalcpy/util/eclv_statistics.py: -------------------------------------------------------------------------------- 1 | # ============================* 2 | # ** Copyright UCAR (c) 2020 3 | # ** University Corporation for Atmospheric Research (UCAR) 4 | # ** National Center for Atmospheric Research (NCAR) 5 | # ** Research Applications Lab (RAL) 6 | # ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA 7 | # ============================* 8 | 9 | 10 | 11 | """ 12 | Program Name: eclv_statistics.py 13 | """ 14 | from typing import Union 15 | import warnings 16 | import numpy as np 17 | 18 | from metcalcpy.util.ctc_statistics import calculate_economic_value 19 | from metcalcpy.util.utils import sum_column_data_by_name 20 | from metcalcpy.util.safe_log import safe_log 21 | __author__ = 'Tatiana Burek' 22 | 23 | 24 | def calculate_eclv(input_data: np.array, columns_names: np.array, 25 | thresh: Union[float, None], line_type: str, cl_pts: list, add_base_rate: int = 0, logger=None) \ 26 | -> Union[dict, None]: 27 | """Performs calculation of ECLV - The Economic Cost Loss Value 28 | 29 | Args: 30 | input_data: 2-dimensional numpy array with data for the calculation 31 | 1st dimension - the row of data frame 32 | 2nd dimension - the column of data frame 33 | columns_names: names of the columns for the 2nd dimension as Numpy array 34 | thresh: threshold value for 'pct' line type 35 | cl_pts: Cost loss ratio. The relative value of being unprepared 36 | and taking a loss to that of un-necessarily preparing. For example, 37 | cl = 0.1 indicates it would cost $ 1 to prevent a $10 loss. 38 | This defaults to the sequence 0.05 to 0.95 by 0.05. 39 | line_type: line type of the data 'ctc' or 'pct' 40 | add_base_rate: add Base rate point to cl or not (1 = add, 0 = don't add) 41 | 42 | Returns: 43 | Returns: 44 | If assigned to an object, the following values are reported in the dictionary : 45 | vmax - Maximum value 46 | V - Vector of values for each cl value 47 | F - Conditional false alarm rate. 48 | H - Conditional hit rate 49 | cl - Vector of cost loss ratios. 50 | s - Base rate 51 | or None if some of the data values are missing or invalid 52 | """ 53 | warnings.filterwarnings('error') 54 | 55 | 56 | # some validation 57 | if line_type != 'ctc' and line_type != 'pct': 58 | safe_log(logger, "error", f"Incorrect line type {line_type} for calculating ECLV.") 59 | print(f'ERROR: incorrect line type {line_type} for calculating ECLV ') 60 | return None 61 | if line_type == 'pct' and thresh is None: 62 | safe_log(logger, "error", "Threshold is required for line type 'pct' in calculating ECLV.") 63 | print(f'ERROR: provide thresh for calculating ECLV ') 64 | return None 65 | 66 | try: 67 | if line_type == 'pct': 68 | index_thresh_i = np.where(columns_names == 'thresh_i')[0] 69 | index_oy_i = np.where(columns_names == 'oy_i')[0] 70 | index_on_i = np.where(columns_names == 'on_i')[0] 71 | thresh_i_more = input_data[:, index_thresh_i] > thresh 72 | thresh_i_less = input_data[:, index_thresh_i] <= thresh 73 | 74 | n11 = np.nansum(input_data[:, index_oy_i][thresh_i_more].astype(np.float)) 75 | n10 = np.nansum(input_data[:, index_on_i][thresh_i_more].astype(np.float)) 76 | n01 = np.nansum(input_data[:, index_oy_i][thresh_i_less].astype(np.float)) 77 | n00 = np.nansum(input_data[:, index_on_i][thresh_i_less].astype(np.float)) 78 | else: 79 | n11 = sum_column_data_by_name(input_data, columns_names, 'fy_oy') 80 | n10 = sum_column_data_by_name(input_data, columns_names, 'fy_on') 81 | n01 = sum_column_data_by_name(input_data, columns_names, 'fn_oy') 82 | n00 = sum_column_data_by_name(input_data, columns_names, 'fn_on') 83 | safe_log(logger, "debug", f"n11: {n11}, n10: {n10}, n01: {n01}, n00: {n00}") 84 | result = calculate_economic_value(np.array([n11, n10, n01, n00]), cl_pts, add_base_rate == 1) 85 | safe_log(logger, "info", "ECLV calculation completed successfully.") 86 | except (TypeError, ZeroDivisionError, Warning, ValueError) as e: 87 | safe_log(logger, "error", f"ECLV calculation failed due to an error: {e}") 88 | result = None 89 | warnings.filterwarnings('ignore') 90 | return result 91 | -------------------------------------------------------------------------------- /metcalcpy/util/mcts_statistics.py: -------------------------------------------------------------------------------- 1 | # ============================* 2 | # ** Copyright UCAR (c) 2020 3 | # ** University Corporation for Atmospheric Research (UCAR) 4 | # ** National Center for Atmospheric Research (NCAR) 5 | # ** Research Applications Lab (RAL) 6 | # ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA 7 | # ============================* 8 | 9 | 10 | 11 | """ 12 | Program Name: mcts_statistics.py 13 | """ 14 | import warnings 15 | import numpy as np 16 | from metcalcpy.util.utils import round_half_up, sum_column_data_by_name, PRECISION 17 | from metcalcpy.util.safe_log import safe_log 18 | 19 | __author__ = 'Tatiana Burek' 20 | 21 | 22 | def calculate_mcts_hss_ec(input_data, columns_names, logger=None): 23 | """Performs calculation of HSS_EC - a skill score based on Accuracy, 24 | 25 | Args: 26 | input_data: 2-dimensional numpy array with data for the calculation 27 | 1st dimension - the row of data frame 28 | 2nd dimension - the column of data frame 29 | columns_names: names of the columns for the 2nd dimension as Numpy array 30 | 31 | Returns: 32 | calculated HSS_EC as float 33 | or None if some of the data values are missing or invalid 34 | """ 35 | warnings.filterwarnings('error') 36 | 37 | try: 38 | row = input_data[0].copy() 39 | n_cat = row[np.where(columns_names == 'n_cat')[0][0]] 40 | ec_value = row[np.where(columns_names == 'ec_value')[0][0]] 41 | 42 | safe_log(logger, "debug", f"Number of categories (n_cat): {n_cat}") 43 | safe_log(logger, "debug", f"Expected correct (ec_value): {ec_value}") 44 | 45 | # Aggregate all fi_oj in one row 46 | for index in range(n_cat * n_cat): 47 | column_name = 'fi_oj_' + str(index) 48 | row[np.where(columns_names == column_name)[0][0]] = \ 49 | sum_column_data_by_name(input_data, columns_names, column_name) 50 | 51 | # Initialize contingency table 52 | cont_table = [[0] * n_cat for _ in range(n_cat)] 53 | 54 | # Fill contingency table 55 | for index in range(n_cat * n_cat): 56 | i_value = row[np.where(columns_names == 'i_value_' + str(index))[0][0]] 57 | j_value = row[np.where(columns_names == 'j_value_' + str(index))[0][0]] 58 | fi_oj = row[np.where(columns_names == 'fi_oj_' + str(index))[0][0]] 59 | cont_table[i_value - 1][j_value - 1] = fi_oj 60 | 61 | safe_log(logger, "debug", f"Contingency table: {cont_table}") 62 | 63 | # Calculate the sum of the counts on the diagonal and the sum of the counts across the whole MCTC table 64 | diag_count = sum([cont_table[i][j] for i in range(n_cat) for j in range(n_cat) if i == j]) 65 | sum_all = sum(sum(cont_table, [])) 66 | result = (diag_count - (ec_value * sum_all)) / (sum_all - (ec_value * sum_all)) 67 | result = round_half_up(result, PRECISION) 68 | 69 | safe_log(logger, "debug", f"Calculated HSS_EC: {result}") 70 | 71 | except (TypeError, ZeroDivisionError, Warning, ValueError) as e: 72 | safe_log(logger, "error", f"Error encountered during calculation: {str(e)}") 73 | result = None 74 | 75 | warnings.filterwarnings('ignore') 76 | return result 77 | -------------------------------------------------------------------------------- /metcalcpy/util/met_stats.py: -------------------------------------------------------------------------------- 1 | # ============================* 2 | # ** Copyright UCAR (c) 2020 3 | # ** University Corporation for Atmospheric Research (UCAR) 4 | # ** National Center for Atmospheric Research (NCAR) 5 | # ** Research Applications Lab (RAL) 6 | # ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA 7 | # ============================* 8 | 9 | 10 | 11 | """ 12 | Program Name: met_stats.py 13 | """ 14 | import math 15 | import numpy as np 16 | from metcalcpy.util.safe_log import safe_log 17 | 18 | __author__ = 'Tatiana Burek' 19 | __version__ = '0.1.0' 20 | 21 | 22 | def get_column_index_by_name(columns, column_name, logger=None): 23 | """Finds the index of the specified column in the array 24 | 25 | Args: 26 | columns: names of the columns as Numpy array 27 | column_name: the name of the column 28 | 29 | Returns: 30 | the index of the column 31 | or None if the column name does not exist in the array 32 | """ 33 | index_array = np.where(columns == column_name)[0] 34 | 35 | if index_array.size == 0: 36 | safe_log(logger, "warning", f"Column '{column_name}' not found in the array.") 37 | return None 38 | 39 | column_index = index_array[0] 40 | safe_log(logger, "debug", f"Column '{column_name}' found at index {column_index}.") 41 | return column_index 42 | 43 | 44 | def calc_direction(u_comp, v_comp, logger=None): 45 | """ Calculated the direction of the wind from it's u and v components in degrees 46 | Args: 47 | u_comp: u wind component 48 | v: v wind component 49 | 50 | Returns: 51 | direction of the wind in degrees or None if one of the components is less then tolerance 52 | """ 53 | tolerance = 1e-5 54 | 55 | if abs(u_comp) < tolerance and abs(v_comp) < tolerance: 56 | safe_log(logger, "warning", "Both u and v components are below tolerance, returning None.") 57 | return None 58 | 59 | direction = np.arctan2(u_comp, v_comp) 60 | # Convert to [0, 360] 61 | direction = direction - 360 * math.floor(direction / 360) 62 | safe_log(logger, "debug", f"Calculated wind direction: {direction} degrees.") 63 | 64 | return direction 65 | 66 | 67 | def calc_speed(u_comp, v_comp, logger=None): 68 | """ Calculated the speed of the wind from it's u and v components 69 | Args: 70 | u_comp: u wind component 71 | v_comp: v wind component 72 | 73 | Returns: 74 | speed of the wind or None 75 | """ 76 | try: 77 | result = np.sqrt(u_comp * u_comp + v_comp * v_comp) 78 | safe_log(logger, "debug", f"Calculated wind speed: {result}.") 79 | except (TypeError, Warning) as e: 80 | result = None 81 | safe_log(logger, "warning", f"Failed to calculate wind speed: {str(e)}.") 82 | 83 | return result 84 | -------------------------------------------------------------------------------- /metcalcpy/util/read_env_vars_in_config.py: -------------------------------------------------------------------------------- 1 | # ============================* 2 | # ** Copyright UCAR (c) 2020 3 | # ** University Corporation for Atmospheric Research (UCAR) 4 | # ** National Center for Atmospheric Research (NCAR) 5 | # ** Research Applications Lab (RAL) 6 | # ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA 7 | # ============================* 8 | 9 | 10 | 11 | import os 12 | import re 13 | import yaml 14 | from metcalcpy.util.safe_log import safe_log 15 | 16 | def parse_config(path=None, data=None, tag='!ENV',logger=None): 17 | """ 18 | Load a yaml configuration file and resolve any environment variables 19 | The environment variables must have !ENV before them and be in this format 20 | to be parsed: ${VAR_NAME}. 21 | E.g.: 22 | 23 | database: 24 | host: !ENV ${HOST} 25 | port: !ENV ${PORT} 26 | app: 27 | log_path: !ENV '/var/${LOG_PATH}' 28 | something_else: !ENV '${AWESOME_ENV_VAR}/var/${A_SECOND_AWESOME_VAR}' 29 | 30 | :param str path: the path to the yaml file 31 | :param str data: the yaml data itself as a stream 32 | :param str tag: the tag to look for 33 | """ 34 | # pattern for global vars: look for ${word} 35 | pattern = re.compile(r'.*?\${(\w+)}.*?') 36 | loader = yaml.SafeLoader 37 | 38 | # the tag will be used to mark where to start searching for the pattern 39 | # e.g. somekey: !ENV somestring${MYENVVAR}blah blah blah 40 | loader.add_implicit_resolver(tag, pattern, None) 41 | 42 | def constructor_env_variables(loader, node): 43 | """ 44 | Extracts the environment variable from the node's value 45 | :param yaml.Loader loader: the yaml loader 46 | :param node: the current node in the yaml 47 | :return: the parsed string that contains the value of the environment 48 | variable 49 | """ 50 | value = loader.construct_scalar(node) 51 | safe_log(logger, "debug", f"Processing value: {value}") 52 | match = pattern.findall(value) # to find all env variables in line 53 | if match: 54 | full_value = value 55 | for g in match: 56 | full_value = full_value.replace( 57 | f'${{{g}}}', os.environ.get(g, g) 58 | ) 59 | safe_log(logger, "debug", f"Replaced {g} with {full_value}") 60 | return full_value 61 | return value 62 | 63 | loader.add_constructor(tag, constructor_env_variables) 64 | 65 | if path: 66 | safe_log(logger, "debug", f"Loading YAML configuration from path: {path}") 67 | with open(path) as conf_data: 68 | return yaml.load(conf_data, Loader=loader) 69 | elif data: 70 | safe_log(logger, "debug", "Loading YAML configuration from provided data stream.") 71 | return yaml.load(data, Loader=loader) 72 | else: 73 | raise ValueError('Either a path or data should be defined as input') 74 | 75 | safe_log(logger, "debug", "YAML configuration loaded and processed successfully.") -------------------------------------------------------------------------------- /metcalcpy/util/safe_log.py: -------------------------------------------------------------------------------- 1 | # ============================* 2 | # ** Copyright UCAR (c) 2020 3 | # ** University Corporation for Atmospheric Research (UCAR) 4 | # ** National Center for Atmospheric Research (NCAR) 5 | # ** Research Applications Lab (RAL) 6 | # ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA 7 | # ============================* 8 | 9 | 10 | def safe_log(logger, log_level, message): 11 | """ 12 | Safely logs a message using the provided logger and log level. 13 | 14 | Args: 15 | logger (logging.Logger): The logger object. If None, the message will not be logged. 16 | log_level (str): The logging level to use (e.g., "info", "debug"). 17 | message (str): The message to log. 18 | """ 19 | if logger: 20 | log_method = getattr(logger, log_level, None) 21 | if callable(log_method): 22 | log_method(message) -------------------------------------------------------------------------------- /metcalcpy/util/wald_wolfowitz_runs_test.py: -------------------------------------------------------------------------------- 1 | # ============================* 2 | # ** Copyright UCAR (c) 2020 3 | # ** University Corporation for Atmospheric Research (UCAR) 4 | # ** National Center for Atmospheric Research (NCAR) 5 | # ** Research Applications Lab (RAL) 6 | # ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA 7 | # ============================* 8 | 9 | 10 | 11 | from typing import Union 12 | import math 13 | 14 | from statistics import mean, median 15 | import numpy as np 16 | from itertools import groupby 17 | from scipy.stats import norm 18 | 19 | from metcalcpy.util.correlation import remove_none 20 | 21 | 22 | def runs_test(x, alternative="two.sided", threshold='median') -> dict: 23 | """ 24 | Wald-Wolfowitz Runs Test. 25 | Performs the Wald-Wolfowitz runs test of randomness for continuous data. 26 | Method used to compute the p-value is normal 27 | Mimics runs.test Rscript function 28 | 29 | :param x: a numeric vector containing the observations 30 | :param alternative: a character string with the alternative hypothesis. 31 | Must be one of "two.sided" (default), "left.sided" or "right.sided". 32 | :param threshold: the cut-point to transform the data into a dichotomous vector 33 | Must be one of "median" (default) or "mean". 34 | :return: Wald-Wolfowitz Runs Test results as a dictionary 35 | statistic - the value of the normalized statistic test 36 | p_value - the p-value of the test 37 | runs - the total number of runs 38 | mean_value - the mean value of the statistic test 39 | variance - the variance of the statistic test 40 | """ 41 | result = { 42 | 'statistic': None, 43 | 'p_value': None, 44 | 'runs': None, 45 | 'mean_value ': None, 46 | 'variance': None 47 | } 48 | if alternative != "two.sided" and alternative != "left.sided" and alternative != "right.sided": 49 | print(" runs_test must get a valid alternative") 50 | return result 51 | if len(x) == 0: 52 | return result 53 | 54 | x = remove_none(x) 55 | if threshold == 'median': 56 | x_threshold = median(x) 57 | elif threshold == "mean": 58 | x_threshold = mean(x) 59 | else: 60 | print('ERROR incorrect threshold') 61 | x_threshold = None 62 | x = [elem for elem in x if elem != x_threshold] 63 | res = [i - x_threshold for i in x] 64 | s = np.sign(res) 65 | n1 = 0 66 | n2 = 0 67 | for num in s: 68 | if num > 0: 69 | n1 += 1 70 | elif num < 0: 71 | n2 += 1 72 | runs = [(k, sum(1 for i in g)) for k, g in groupby(s)] 73 | r1 = 0 74 | r2 = 0 75 | for run in runs: 76 | if run[0] == 1: 77 | r1 += 1 78 | elif run[0] == -1: 79 | r2 += 1 80 | n = n1 + n2 81 | mean_value = 1 + 2 * n1 * n2 / (n1 + n2) 82 | variance = 2 * n1 * n2 * (2 * n1 * n2 - n1 - n2) / (n * n * (n - 1)) 83 | rr = r1 + r2 84 | pv = 0 85 | pv0 = norm.cdf((rr - mean_value) / math.sqrt(variance)) 86 | if alternative == "two.sided": 87 | pv = 2 * min(pv0, 1 - pv0) 88 | if alternative == "left.sided": 89 | pv = pv0 90 | if alternative == "right.sided": 91 | pv = 1 - pv0 92 | 93 | result['statistic'] = (rr - mean_value) / math.sqrt(variance) 94 | result['p_value'] = pv 95 | result['runs'] = rr 96 | result['mean_value'] = mean_value 97 | result['variance'] = variance 98 | 99 | return result 100 | -------------------------------------------------------------------------------- /nco_requirements.txt: -------------------------------------------------------------------------------- 1 | numpy>=1.24.2 2 | netcdf4>=1.6.2 3 | pytest>=7.2.1 4 | pyyaml>=6.0 5 | pandas>=1.5.2 6 | xarray>=2023.1.0 7 | scipy>=1.11.1 8 | metpy>=1.4.0 9 | pint>=0.20.1 10 | pip>=22.2.2 11 | python-dateutil>=2.8.2 12 | 13 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | imageio==2.25.0 2 | imutils==0.5.4 3 | metpy==1.4.0 4 | netCDF4==1.6.2 5 | numpy==1.24.2 6 | opencv-python>=4.7.0.72 7 | pandas==1.5.2 8 | pint==0.20.1 9 | pytest==7.2.1 10 | PyYAML==6.0 11 | scikit-image==0.19.3 12 | scipy>=1.11.1 13 | xarray==2023.1.0 14 | scikit-learn>=1.2.1 15 | eofs>=1.4.0 16 | 17 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | from setuptools import setup, find_packages 3 | from distutils.util import convert_path 4 | 5 | with open("README.md", "r") as fh: 6 | long_description = fh.read() 7 | 8 | main_ns = {} 9 | version_path = convert_path('docs/version') 10 | with open(version_path) as version_file: 11 | exec(version_file.read(), main_ns) 12 | 13 | setuptools.setup( 14 | name="metcalcpy", 15 | version=main_ns['__version__'], 16 | author="METplus", 17 | author_email="met-help@ucar.edu", 18 | description="statistics and util package for METplus", 19 | long_description=long_description, 20 | long_description_content_type="text/markdown", 21 | url="https://github.com/dtcenter/METcalcpy", 22 | packages=setuptools.find_packages(), 23 | classifiers=[ 24 | "Programming Language :: Python :: 3", 25 | "License :: OSI Approved :: Apache Software License", 26 | "Operating System :: OS Independent", 27 | ], 28 | python_requires='>=3.6', 29 | ) 30 | -------------------------------------------------------------------------------- /test/README: -------------------------------------------------------------------------------- 1 | This directory contains the tests (unit and others) for the source code in 2 | the src/ directory. 3 | -------------------------------------------------------------------------------- /test/__init__.py: -------------------------------------------------------------------------------- 1 | r"""This module contains a variety of statistical calculations.""" 2 | 3 | -------------------------------------------------------------------------------- /test/convert_headers.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import argparse 3 | ''' 4 | Converts the uppercase MET headers into lower case for command line usage of 5 | agg_stat.py and other statistics modules. 6 | 7 | Requires the full path to the input file and a full path to the output file. 8 | 9 | ''' 10 | 11 | def change_header_case(input_args): 12 | ''' 13 | 14 | Args: 15 | input_args: the argparse argument object containing the input and output filenames 16 | 17 | Returns: 18 | None. Creates the MET input file with lower case headers. Saved in the location specified 19 | from the command line arguments 20 | ''' 21 | 22 | print(f"reading MET input with upper case headers:{input_args.input_file} ") 23 | df = pd.read_csv(input_args.input_file, sep=r"\s+") 24 | uc_cols = df.columns.to_list() 25 | lc_cols = [lc_cols.lower() for lc_cols in uc_cols] 26 | df.columns = lc_cols 27 | 28 | df.to_csv(input_args.output_file, sep="\t", index=False) 29 | 30 | 31 | print(f"saving MET output with lower case headers: {input_args.output_file} ") 32 | 33 | 34 | 35 | if __name__ == "__main__": 36 | parser = argparse.ArgumentParser(description="Changes upper case MET headers into lower case") 37 | 38 | parser.add_argument('-i', type=str, dest='input_file', required=True) 39 | parser.add_argument('-o', type=str, dest='output_file', required=True) 40 | 41 | input_args = parser.parse_args() 42 | change_header_case(input_args) -------------------------------------------------------------------------------- /test/data/.gitignore: -------------------------------------------------------------------------------- 1 | agg_eclv_data_output.data 2 | agg_ratio_data_output.data 3 | ee_av_output_py.data 4 | pstd.data 5 | scorecard_output.data 6 | -------------------------------------------------------------------------------- /test/data/ROC_CTC_thresh.data: -------------------------------------------------------------------------------- 1 | fcst_thresh fy_oy fy_on fn_oy fn_on fcst_valid_beg fcst_lead 2 | >0.635 151 1134 117 25222 2012-04-09 12:00:00 150000 3 | >0.635 149 1179 119 25177 2012-04-09 12:00:00 150000 4 | >0.635 151 1226 117 25130 2012-04-09 12:00:00 150000 5 | >0.635 151 1247 117 25109 2012-04-09 12:00:00 150000 6 | >0.635 152 1270 116 25086 2012-04-09 12:00:00 150000 7 | >0.635 146 1253 122 25103 2012-04-09 12:00:00 150000 8 | >0.635 151 1235 117 25121 2012-04-09 12:00:00 150000 9 | >0.635 119 821 149 25535 2012-04-09 12:00:00 120000 10 | >0.635 118 831 150 25525 2012-04-09 12:00:00 120000 11 | >0.635 122 876 146 25480 2012-04-09 12:00:00 120000 12 | >0.635 124 881 144 25475 2012-04-09 12:00:00 120000 13 | >0.635 122 900 146 25456 2012-04-09 12:00:00 120000 14 | >0.635 127 921 141 25435 2012-04-09 12:00:00 120000 15 | >0.635 122 872 146 25484 2012-04-09 12:00:00 120000 16 | -------------------------------------------------------------------------------- /test/data/agg_eclv_data.data: -------------------------------------------------------------------------------- 1 | model fcst_lev fcst_init_beg fcst_valid_beg fcst_lead interp_mthd fcst_var stat_name stat_value total fy_oy fy_on fn_oy fn_on 2 | WRF Z10 2005-08-07 00:00:00 2005-08-07 12:00:00 120000 NEAREST VGRD ECLV NA 5558 497 187 685 4189 3 | WRF Z10 2005-08-07 00:00:00 2005-08-07 12:00:00 120000 NEAREST VGRD ECLV NA 5558 497 187 685 4189 4 | WRF Z10 2005-08-07 00:00:00 2005-08-07 12:00:00 120000 NEAREST VGRD ECLV NA 5558 24 14 232 5288 5 | WRF Z10 2005-08-07 00:00:00 2005-08-07 12:00:00 120000 NEAREST VGRD ECLV NA 5558 24 14 232 5288 6 | WRF Z10 2005-08-07 00:00:00 2005-08-07 12:00:00 120000 NEAREST VGRD ECLV NA 6862 1618 640 562 4042 7 | WRF Z10 2005-08-07 00:00:00 2005-08-07 12:00:00 120000 NEAREST VGRD ECLV NA 6862 1618 640 562 4042 8 | WRF Z10 2005-08-07 00:00:00 2005-08-07 12:00:00 120000 NEAREST VGRD ECLV NA 6862 255 165 155 6287 9 | WRF Z10 2005-08-07 00:00:00 2005-08-07 12:00:00 120000 NEAREST VGRD ECLV NA 6862 255 165 155 6287 10 | WRF Z10 2005-08-07 00:00:00 2005-08-07 12:00:00 120000 NEAREST VGRD ECLV NA 5479 785 330 662 3702 11 | WRF Z10 2005-08-07 00:00:00 2005-08-07 12:00:00 120000 NEAREST VGRD ECLV NA 5479 785 330 662 3702 12 | WRF Z10 2005-08-07 00:00:00 2005-08-07 12:00:00 120000 NEAREST VGRD ECLV NA 5479 23 20 232 5204 13 | WRF Z10 2005-08-07 00:00:00 2005-08-07 12:00:00 120000 NEAREST VGRD ECLV NA 5479 23 20 232 5204 14 | WRF Z10 2005-08-07 00:00:00 2005-08-07 12:00:00 120000 NEAREST VGRD ECLV NA 524 6 35 12 471 15 | WRF Z10 2005-08-07 00:00:00 2005-08-07 12:00:00 120000 NEAREST VGRD ECLV NA 524 6 35 12 471 16 | WRF Z10 2005-08-07 00:00:00 2005-08-07 12:00:00 120000 NEAREST VGRD ECLV NA 524 0 0 0 524 17 | WRF Z10 2005-08-07 00:00:00 2005-08-07 12:00:00 120000 NEAREST VGRD ECLV NA 524 0 0 0 524 18 | WRF Z10 2012-04-09 00:00:00 2012-04-09 12:00:00 120000 NEAREST VGRD ECLV NA 1 0 0 0 1 19 | WRF Z10 2012-04-09 00:00:00 2012-04-09 12:00:00 120000 NEAREST VGRD ECLV NA 80 0 0 1 79 20 | WRF Z10 2012-04-09 00:00:00 2012-04-09 12:00:00 120000 NEAREST VGRD ECLV NA 4474 40 40 11 4383 21 | WRF Z10 2012-04-09 00:00:00 2012-04-09 12:00:00 120000 NEAREST VGRD ECLV NA 2511 37 37 4 2433 22 | WRF Z10 2012-04-09 00:00:00 2012-04-09 12:00:00 120000 NEAREST VGRD ECLV NA 1963 3 3 7 1950 23 | WRF P850-700 2012-04-09 00:00:00 2012-04-09 12:00:00 120000 NEAREST VGRD ECLV NA 666 112 25 33 496 24 | WRF P850-700 2012-04-09 00:00:00 2012-04-09 12:00:00 120000 NEAREST VGRD ECLV NA 350 73 9 22 246 25 | WRF P850-700 2012-04-09 00:00:00 2012-04-09 12:00:00 120000 NEAREST VGRD ECLV NA 316 39 16 11 250 26 | WRF Z10 2005-08-07 00:00:00 2005-08-07 12:00:00 120000 NEAREST VGRD ECLV NA 14162 2316 941 1329 9576 27 | WRF Z10 2005-08-07 00:00:00 2005-08-07 12:00:00 120000 NEAREST VGRD ECLV NA 14162 350 192 407 13213 28 | -------------------------------------------------------------------------------- /test/data/calc_tci_jja_xarray_input.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dtcenter/METcalcpy/b83da573f6fe46b1c4cfcefa51409a30e23996fb/test/data/calc_tci_jja_xarray_input.nc -------------------------------------------------------------------------------- /test/data/calc_tci_jja_xarray_output.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dtcenter/METcalcpy/b83da573f6fe46b1c4cfcefa51409a30e23996fb/test/data/calc_tci_jja_xarray_output.nc -------------------------------------------------------------------------------- /test/data/img_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dtcenter/METcalcpy/b83da573f6fe46b1c4cfcefa51409a30e23996fb/test/data/img_1.png -------------------------------------------------------------------------------- /test/data/img_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dtcenter/METcalcpy/b83da573f6fe46b1c4cfcefa51409a30e23996fb/test/data/img_2.png -------------------------------------------------------------------------------- /test/data/img_diff.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dtcenter/METcalcpy/b83da573f6fe46b1c4cfcefa51409a30e23996fb/test/data/img_diff.png -------------------------------------------------------------------------------- /test/data/perf_diagram.data: -------------------------------------------------------------------------------- 1 | model fcst_lead stat_name fcst_var stat_value stat_btcl stat_btcu nstats 2 | HRRR_mem0_ctrl_hrconus 180000 FAR APCP_03 0.6731129 NA NA 90 3 | HRRR_mem0_ctrl_hrconus 180000 PODY APCP_03 0.3730645 NA NA 90 4 | HRRR_mem1_hrconus 180000 FAR APCP_03 0.7007665 NA NA 90 5 | HRRR_mem1_hrconus 180000 PODY APCP_03 0.336271 NA NA 90 6 | HRRR_mem2_hrconus 180000 FAR APCP_03 0.7000867 NA NA 90 7 | HRRR_mem2_hrconus 180000 PODY APCP_03 0.337225 NA NA 90 8 | HRRR_mem3_hrconus 180000 FAR APCP_03 0.702622 NA NA 90 9 | HRRR_mem3_hrconus 180000 PODY APCP_03 0.3385542 NA NA 90 10 | HRRR_mem4_hrconus 180000 FAR APCP_03 0.7011272 NA NA 90 11 | HRRR_mem4_hrconus 180000 PODY APCP_03 0.3389978 NA NA 90 12 | HRRR_mem5_hrconus 180000 FAR APCP_03 0.7012277 NA NA 90 13 | HRRR_mem5_hrconus 180000 PODY APCP_03 0.3368767 NA NA 90 14 | HRRR_mem6_hrconus 180000 FAR APCP_03 0.6970877 NA NA 90 15 | HRRR_mem6_hrconus 180000 PODY APCP_03 0.3376367 NA NA 90 16 | HRRR_mem7_hrconus 180000 FAR APCP_03 0.7037681 NA NA 90 17 | HRRR_mem7_hrconus 180000 PODY APCP_03 0.3330137 NA NA 90 -------------------------------------------------------------------------------- /test/data/point_stat/point_stat_GRIB1_NAM_GDAS_MASK_SID_120000L_20120409_120000V_val1l2.txt: -------------------------------------------------------------------------------- 1 | VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL UFABAR VFABAR UOABAR VOABAR UVFOABAR UVFFABAR UVOOABAR FA_SPEED_BAR OA_SPEED_BAR TOTAL_DIR DIRA_ME DIRA_MAE DIRA_MSE 2 | V12.0.0 FCST NA 120000 20120409_120000 20120409_120000 000000 20120409_113000 20120409_123000 UGRD_VGRD m/s Z10 UGRD_VGRD NA Z10 ADPSFC KDEN NEAREST 1 NA NA NA NA VAL1L2 1 0.028193 2.82935 3.975 1.80126 5.20847 8.00603 19.04515 2.82949 4.36408 1 -65.05155 65.05155 4231.70355 3 | V12.0.0 FCST NA 120000 20120409_120000 20120409_120000 000000 20120409_113000 20120409_123000 UGRD_VGRD m/s Z10 UGRD_VGRD NA Z10 ADPSFC SID_CO NEAREST 1 NA NA NA NA VAL1L2 80 0.15363 -0.18967 0.42924 -0.67093 2.82714 4.33289 12.74486 1.86527 3.0602 80 -9.33664 68.33537 7105.99676 4 | V12.0.0 FCST NA 120000 20120409_120000 20120409_120000 000000 20120409_113000 20120409_123000 UGRD_VGRD m/s Z10 UGRD_VGRD NA Z10 ADPSFC ALLLATLON NEAREST 1 NA NA NA NA VAL1L2 4474 0.022949 -0.049517 -0.22407 0.099405 1.44868 2.48646 6.53755 1.36863 2.21133 4474 11.82836 65.55116 6780.64383 5 | V12.0.0 FCST NA 120000 20120409_120000 20120409_120000 000000 20120409_113000 20120409_123000 UGRD_VGRD m/s Z10 UGRD_VGRD NA Z10 ADPSFC LATGE39 NEAREST 1 NA NA NA NA VAL1L2 2511 0.15753 -0.12932 -0.2034 0.067257 1.44861 2.75812 7.45429 1.42761 2.35083 2511 11.46784 69.61895 7486.54656 6 | V12.0.0 FCST NA 120000 20120409_120000 20120409_120000 000000 20120409_113000 20120409_123000 UGRD_VGRD m/s Z10 UGRD_VGRD NA Z10 ADPSFC LATLT39 NEAREST 1 NA NA NA NA VAL1L2 1963 -0.14921 0.05256 -0.25051 0.14053 1.44876 2.13897 5.36487 1.29318 2.03288 1963 12.28952 60.34779 5877.67808 7 | V12.0.0 FCST NA 120000 20120409_120000 20120409_120000 000000 20120409_113000 20120409_123000 UGRD_VGRD m/s P850-700 UGRD_VGRD NA P850-700 ADPUPA ALLLATLON NEAREST 1 NA NA NA NA VAL1L2 666 -0.0061872 -0.088203 0.12263 -0.052885 4.33928 7.35944 14.22557 2.29765 3.2832 666 4.04997 65.97315 6716.40647 8 | V12.0.0 FCST NA 120000 20120409_120000 20120409_120000 000000 20120409_113000 20120409_123000 UGRD_VGRD m/s P850-700 UGRD_VGRD NA P850-700 ADPUPA LATGE39 NEAREST 1 NA NA NA NA VAL1L2 350 -0.15854 0.040702 0.19073 0.28343 5.93602 8.57598 16.59683 2.4931 3.54764 350 0.0076746 59.78505 5673.48854 9 | V12.0.0 FCST NA 120000 20120409_120000 20120409_120000 000000 20120409_113000 20120409_123000 UGRD_VGRD m/s P850-700 UGRD_VGRD NA P850-700 ADPUPA LATLT39 NEAREST 1 NA NA NA NA VAL1L2 316 0.16256 -0.23098 0.047209 -0.42538 2.57074 6.012 11.59919 2.08116 2.99031 316 8.52719 72.82706 7871.53709 10 | -------------------------------------------------------------------------------- /test/data/point_stat/point_stat_GRIB2_SREF_GDAS_150000L_20120409_120000V_vl1l2.txt: -------------------------------------------------------------------------------- 1 | VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL UFBAR VFBAR UOBAR VOBAR UVFOBAR UVFFBAR UVOOBAR F_SPEED_BAR O_SPEED_BAR TOTAL_DIR DIR_ME DIR_MAE DIR_MSE 2 | V12.0.0 FCST NA 150000 20120409_120000 20120409_120000 000000 20120409_113000 20120409_123000 UGRD_VGRD m/s Z10 UGRD_VGRD NA Z10 ADPSFC DTC165 NEAREST 1 NA NA NA NA VL1L2 975 -0.46545 0.17491 0.17569 -0.34882 3.81343 8.59213 8.64609 2.63981 2.03047 668 22.84654 60.61709 6129.41176 3 | V12.0.0 FCST NA 150000 20120409_120000 20120409_120000 000000 20120409_113000 20120409_123000 UGRD_VGRD m/s Z10 UGRD_VGRD NA Z10 ADPSFC DTC166 NEAREST 1 NA NA NA NA VL1L2 3167 2.29778 -0.93596 1.53745 -0.48039 12.12615 18.28487 12.47356 3.72012 2.51923 2138 13.00804 26.14698 1477.83177 4 | V12.0.0 FCST NA 150000 20120409_120000 20120409_120000 000000 20120409_113000 20120409_123000 UGRD_VGRD m/s Z10 UGRD_VGRD NA Z10 ADPSFC LMV NEAREST 1 NA NA NA NA VL1L2 398 0.61447 0.45488 0.075879 0.12864 1.70128 4.12033 2.52065 1.79043 0.94038 160 21.39089 40.07857 3510.11816 5 | V12.0.0 FCST NA 150000 20120409_120000 20120409_120000 000000 20120409_113000 20120409_123000 UGRD_VGRD m/s Z10 UGRD_VGRD NA Z10 ADPSFC TMP_Z2<280 NEAREST 1 NA NA NA NA VL1L2 2439 2.08769 -0.84956 1.66216 -0.628 15.65831 22.90417 17.44308 4.38431 3.28098 2000 14.04923 30.71628 2166.27565 6 | V12.0.0 FCST NA 150000 20120409_120000 20120409_120000 000000 20120409_113000 20120409_123000 UGRD_VGRD m/s P850-700 UGRD_VGRD NA P850-700 ADPUPA DTC165 NEAREST 1 NA NA NA NA VL1L2 195 0.82373 4.53786 1.01897 4.17487 64.52684 64.87804 79.44585 7.24105 7.60994 192 1.52159 26.3225 1996.26142 7 | V12.0.0 FCST NA 150000 20120409_120000 20120409_120000 000000 20120409_113000 20120409_123000 UGRD_VGRD m/s P850-700 UGRD_VGRD NA P850-700 ADPUPA DTC166 NEAREST 1 NA NA NA NA VL1L2 359 7.54661 -5.96296 8.05738 -5.95042 194.93784 189.36022 218.38941 11.87017 12.90323 358 4.02219 16.12332 774.11779 8 | V12.0.0 FCST NA 150000 20120409_120000 20120409_120000 000000 20120409_113000 20120409_123000 UGRD_VGRD m/s P850-700 UGRD_VGRD NA P850-700 ADPUPA LMV NEAREST 1 NA NA NA NA VL1L2 38 7.42465 -5.05512 7.24474 -5.86053 97.78354 92.97952 113.31 9.20198 10.0743 38 -5.24855 11.81144 253.41991 9 | V12.0.0 FCST NA 150000 20120409_120000 20120409_120000 000000 20120409_113000 20120409_123000 UGRD_VGRD m/s P850-700 UGRD_VGRD NA P850-700 ADPUPA TMP_Z2<280 NEAREST 1 NA NA NA NA VL1L2 338 4.83509 -1.651 4.98935 -1.34793 191.60558 187.94454 214.95556 12.26955 13.04549 337 2.63748 15.27596 679.74357 10 | -------------------------------------------------------------------------------- /test/data/roc_sample.data: -------------------------------------------------------------------------------- 1 | model i_value thresh_i oy_i on_i fcst_valid_beg fcst_lead 2 | WRF 1 0 351 1027 2005-08-08 00:00:00 1080000 3 | WRF 2 0.1 336 522 2005-08-08 00:00:00 1080000 4 | WRF 3 0.2 225 124 2005-08-08 00:00:00 1080000 5 | WRF 4 0.3 97 16 2005-08-08 00:00:00 1080000 6 | WRF 5 0.4 0 0 2005-08-08 00:00:00 1080000 7 | WRF 6 0.5 0 0 2005-08-08 00:00:00 1080000 8 | WRF 7 0.6 0 0 2005-08-08 00:00:00 1080000 9 | WRF 8 0.7 0 0 2005-08-08 00:00:00 1080000 10 | WRF 9 0.8 0 0 2005-08-08 00:00:00 1080000 11 | WRF 10 0.9 0 0 2005-08-08 00:00:00 1080000 12 | WRF 1 0 29 212 2005-08-08 00:00:00 1080000 13 | WRF 2 0.1 615 1382 2005-08-08 00:00:00 1080000 14 | WRF 3 0.2 544 634 2005-08-08 00:00:00 1080000 15 | WRF 4 0.3 246 140 2005-08-08 00:00:00 1080000 16 | WRF 5 0.4 1 4 2005-08-08 00:00:00 1080000 17 | WRF 6 0.5 0 0 2005-08-08 00:00:00 1080000 18 | WRF 7 0.6 0 0 2005-08-08 00:00:00 1080000 19 | WRF 8 0.7 0 0 2005-08-08 00:00:00 1080000 20 | WRF 9 0.8 0 0 2005-08-08 00:00:00 1080000 21 | WRF 10 0.9 0 0 2005-08-08 00:00:00 1080000 22 | WRF 1 0 351 1027 2005-08-08 00:00:00 1080000 23 | WRF 2 0.1 336 522 2005-08-08 00:00:00 1080000 24 | WRF 3 0.2 225 124 2005-08-08 00:00:00 1080000 25 | WRF 4 0.3 97 16 2005-08-08 00:00:00 1080000 26 | WRF 5 0.4 0 0 2005-08-08 00:00:00 1080000 27 | WRF 6 0.5 0 0 2005-08-08 00:00:00 1080000 28 | WRF 7 0.6 0 0 2005-08-08 00:00:00 1080000 29 | WRF 8 0.7 0 0 2005-08-08 00:00:00 1080000 30 | WRF 9 0.8 0 0 2005-08-08 00:00:00 1080000 31 | WRF 10 0.9 0 0 2005-08-08 00:00:00 1080000 32 | WRF 1 0 29 212 2005-08-08 00:00:00 1080000 33 | WRF 2 0.1 615 1382 2005-08-08 00:00:00 1080000 34 | WRF 3 0.2 544 634 2005-08-08 00:00:00 1080000 35 | WRF 4 0.3 246 140 2005-08-08 00:00:00 1080000 36 | WRF 5 0.4 1 4 2005-08-08 00:00:00 1080000 37 | WRF 6 0.5 0 0 2005-08-08 00:00:00 1080000 38 | WRF 7 0.6 0 0 2005-08-08 00:00:00 1080000 39 | WRF 8 0.7 0 0 2005-08-08 00:00:00 1080000 40 | WRF 9 0.8 0 0 2005-08-08 00:00:00 1080000 41 | WRF 10 0.9 0 0 2005-08-08 00:00:00 1080000 42 | WRF 1 0 351 1027 2005-08-08 00:00:00 1080000 43 | WRF 2 0.1 336 522 2005-08-08 00:00:00 1080000 44 | WRF 3 0.2 225 124 2005-08-08 00:00:00 1080000 45 | WRF 4 0.3 97 16 2005-08-08 00:00:00 1080000 46 | WRF 5 0.4 0 0 2005-08-08 00:00:00 1080000 47 | WRF 6 0.5 0 0 2005-08-08 00:00:00 1080000 48 | WRF 7 0.6 0 0 2005-08-08 00:00:00 1080000 49 | WRF 8 0.7 0 0 2005-08-08 00:00:00 1080000 50 | WRF 9 0.8 0 0 2005-08-08 00:00:00 1080000 51 | WRF 10 0.9 0 0 2005-08-08 00:00:00 1080000 52 | WRF 1 0 29 212 2005-08-08 00:00:00 1080000 53 | WRF 2 0.1 615 1382 2005-08-08 00:00:00 1080000 54 | WRF 3 0.2 544 634 2005-08-08 00:00:00 1080000 55 | WRF 4 0.3 246 140 2005-08-08 00:00:00 1080000 56 | WRF 5 0.4 1 4 2005-08-08 00:00:00 1080000 57 | WRF 6 0.5 0 0 2005-08-08 00:00:00 1080000 58 | WRF 7 0.6 0 0 2005-08-08 00:00:00 1080000 59 | WRF 8 0.7 0 0 2005-08-08 00:00:00 1080000 60 | WRF 9 0.8 0 0 2005-08-08 00:00:00 1080000 61 | WRF 10 0.9 0 0 2005-08-08 00:00:00 1080000 -------------------------------------------------------------------------------- /test/data/stat_analysis/met_ecnt_agg.txt: -------------------------------------------------------------------------------- 1 | JOB_LIST: -job aggregate -line_type ECNT 2 | COL_NAME: TOTAL N_ENS CRPS CRPSS IGN ME RMSE SPREAD ME_OERR RMSE_OERR SPREAD_OERR SPREAD_PLUS_OERR CRPSCL CRPS_EMP CRPSCL_EMP CRPSS_EMP CRPS_EMP_FAIR SPREAD_MD MAE MAE_OERR BIAS_RATIO N_GE_OBS ME_GE_OBS N_LT_OBS ME_LT_OBS IGN_CONV_OERR IGN_CORR_OERR 3 | ECNT: 18 5 1.79694 NA 13.23697 0.70583 4.36199 3.04271 1.69983 6.25053 4.61729 4.61991 NA 1.79168 NA NA 1.60754 1.84139 2.38669 3.63251 1.1678 174613 2.68118 114952 -2.29593 60.11017 24.60137 4 | 5 | -------------------------------------------------------------------------------- /test/data/stat_analysis/met_val1l2_aggregated.txt: -------------------------------------------------------------------------------- 1 | JOB_LIST: -job aggregate -line_type VAL1L2 2 | COL_NAME: TOTAL UFABAR VFABAR UOABAR VOABAR UVFOABAR UVFFABAR UVOOABAR FA_SPEED_BAR OA_SPEED_BAR TOTAL_DIR DIRA_ME DIRA_MAE DIRA_MSE 3 | VAL1L2: 10361 0.020211 -0.055296 -0.17405 0.074044 1.8313 3.12772 7.57505 1.49204 2.35589 10361 10.65754 65.62686 6774.65166 4 | 5 | -------------------------------------------------------------------------------- /test/data/stat_analysis/met_vcnt_from_vl1l2.txt: -------------------------------------------------------------------------------- 1 | JOB_LIST: -job aggregate_stat -line_type VL1L2 -out_line_type VCNT 2 | COL_NAME: TOTAL FBAR FBAR_BCL FBAR_BCU OBAR OBAR_BCL OBAR_BCU FS_RMS FS_RMS_BCL FS_RMS_BCU OS_RMS OS_RMS_BCL OS_RMS_BCU MSVE MSVE_BCL MSVE_BCU RMSVE RMSVE_BCL RMSVE_BCU FSTDEV FSTDEV_BCL FSTDEV_BCU OSTDEV OSTDEV_BCL OSTDEV_BCU FDIR FDIR_BCL FDIR_BCU ODIR ODIR_BCL ODIR_BCU FBAR_SPEED FBAR_SPEED_BCL FBAR_SPEED_BCU OBAR_SPEED OBAR_SPEED_BCL OBAR_SPEED_BCU VDIFF_SPEED VDIFF_SPEED_BCL VDIFF_SPEED_BCU VDIFF_DIR VDIFF_DIR_BCL VDIFF_DIR_BCU SPEED_ERR SPEED_ERR_BCL SPEED_ERR_BCU SPEED_ABSERR SPEED_ABSERR_BCL SPEED_ABSERR_BCU DIR_ERR DIR_ERR_BCL DIR_ERR_BCU DIR_ABSERR DIR_ABSERR_BCL DIR_ABSERR_BCU ANOM_CORR ANOM_CORR_NCL ANOM_CORR_NCU ANOM_CORR_BCL ANOM_CORR_BCU ANOM_CORR_UNCNTR ANOM_CORR_UNCNTR_BCL ANOM_CORR_UNCNTR_BCU TOTAL_DIR DIR_ME DIR_ME_BCL DIR_ME_BCU DIR_MAE DIR_MAE_BCL DIR_MAE_BCU DIR_MSE DIR_MSE_BCL DIR_MSE_BCU DIR_RMSE DIR_RMSE_BCL DIR_RMSE_BCU 3 | VCNT: 7909 4.54312 NA NA 3.69744 NA NA 5.85878 NA NA 5.75927 NA NA 8.81873 NA NA 2.96963 NA NA 3.6996 NA NA 4.41594 NA NA 291.5452 NA NA 290.64735 NA NA 2.30357 NA NA 1.91564 NA NA 0.38933 NA NA 295.96716 NA NA 0.38793 NA NA 0.38793 NA NA -0.89785 NA NA 0.89785 NA NA NA NA NA NA NA NA NA NA 5891 13.07336 NA NA 30.66754 NA NA 2214.79231 NA NA 47.06158 NA NA 4 | 5 | -------------------------------------------------------------------------------- /test/data/stat_analysis/met_vl1l2_aggregated.txt: -------------------------------------------------------------------------------- 1 | JOB_LIST: -job aggregate -line_type VL1L2 2 | COL_NAME: TOTAL UFBAR VFBAR UOBAR VOBAR UVFOBAR UVFFBAR UVOOBAR F_SPEED_BAR O_SPEED_BAR TOTAL_DIR DIR_ME DIR_MAE DIR_MSE 3 | VL1L2: 7909 2.14262 -0.84595 1.79259 -0.67548 29.33787 34.32529 33.16919 4.54312 3.69744 5891 13.07336 30.66754 2214.79231 4 | 5 | -------------------------------------------------------------------------------- /test/data/threshold.csv: -------------------------------------------------------------------------------- 1 | fcst_thresh,x,Y 2 | nan 3 | 0,1,0 4 | >=0,-0.1,-0.2 5 | >0.01,1,3 6 | >=0,2,4 7 | <5,3,9 8 | <=1,4,16 9 | >35,5,7 10 | >35&&<100.0,15,17 11 | ==3,6,18 12 | >=1&<=22.2,7,9 13 | >=1,17,19 14 | 100,8,24 15 | 20,9,21 16 | >=0,0.1,1.1 17 | 18 | 19 | -------------------------------------------------------------------------------- /test/ecnt_agg_stat.yaml: -------------------------------------------------------------------------------- 1 | agg_stat_input: !ENV "${TEST_DIR}/data/stat_analysis/ensemble_stat_OBSERR_20120410_120000V_ecnt.txt" 2 | agg_stat_output: !ENV "${TEST_DIR}/calcpy_ecnt_agg.txt" 3 | alpha: 0.05 4 | append_to_file: null 5 | circular_block_bootstrap: True 6 | derived_series_1: [] 7 | derived_series_2: [] 8 | event_equal: False 9 | fcst_var_val_1: 10 | APCP_24: 11 | - ECNT_IGN_CONV_OERR 12 | - ECNT_IGN_CORR_OERR 13 | fcst_var_val_2: {} 14 | indy_vals: 15 | - '240000' 16 | indy_var: fcst_lead 17 | line_type: ecnt 18 | list_stat_1: 19 | - ECNT_IGN_CONV_OERR 20 | - ECNT_IGN_CORR_OERR 21 | list_stat_2: [] 22 | method: perc 23 | num_iterations: 1 24 | num_threads: -1 25 | random_seed: null 26 | series_val_1: 27 | model: 28 | - FCST 29 | series_val_2: {} 30 | log_dir: !ENV "${TEST_DIR}/logs" 31 | log_filename: log_agg_stat_ecnt.txt 32 | log_level: WARNING 33 | -------------------------------------------------------------------------------- /test/pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | # Deprecation warnings suppressed. 3 | 4 | 5 | # DeprecationWarning in xarray backend to netCDF4: 6 | # netCDF4_.py: 405: DeprecationWarning: 7 | # 8 | # tostring() is deprecated.Use 9 | # tobytes() 10 | # instead. 11 | filterwarnings = 12 | ignore 13 | default:::netCDF4.* 14 | ignore 15 | default:::numpy.* 16 | -------------------------------------------------------------------------------- /test/rrfs_ecnt_config_agg_stat.yaml: -------------------------------------------------------------------------------- 1 | agg_stat_input: !ENV "${TEST_DIR}/data/rrfs_ecnt_for_agg.data" 2 | agg_stat_output: !ENV "${TEST_DIR}/rrfs_ecnt_aggregated.data" 3 | alpha: 0.05 4 | append_to_file: null 5 | circular_block_bootstrap: True 6 | derived_series_1: [] 7 | derived_series_2: [] 8 | event_equal: False 9 | fcst_var_val_1: 10 | TMP: 11 | - ECNT_RMSE 12 | - ECNT_SPREAD_PLUS_OERR 13 | fcst_var_val_2: {} 14 | indy_vals: 15 | - '30000' 16 | - '60000' 17 | - '90000' 18 | - '120000' 19 | - '150000' 20 | - '160000' 21 | - '170000' 22 | - '180000' 23 | - '200000' 24 | - '240000' 25 | - '270000' 26 | - '300000' 27 | - '330000' 28 | - '340000' 29 | - '360000' 30 | indy_var: fcst_lead 31 | line_type: ecnt 32 | list_stat_1: 33 | - ECNT_RMSE 34 | - ECNT_SPREAD_PLUS_OERR 35 | list_stat_2: [] 36 | method: perc 37 | num_iterations: 1 38 | num_threads: -1 39 | random_seed: null 40 | series_val_1: 41 | model: 42 | - RRFS_GEFS_GF.SPP.SPPT 43 | series_val_2: {} 44 | log_dir: !ENV "${TEST_DIR}/logs" 45 | log_filename: log_agg_stat_rrfs_ecnt.txt 46 | log_level: DEBUG 47 | -------------------------------------------------------------------------------- /test/run_all_nco.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python3 -m pytest test_agg_eclv.py 4 | python3 -m pytest test_agg_ratio.py 5 | python3 -m pytest test_agg_stats_and_boot.py 6 | python3 -m pytest test_agg_stats_with_groups.py 7 | python3 -m pytest test_calc_difficulty_index.py 8 | python3 -m pytest test_convert_lon_indices.py 9 | python3 -m pytest test_ctc_statistics.py 10 | python3 -m pytest test_event_equalize_against_values.py 11 | python3 -m pytest test_event_equalize.py 12 | python3 -m pytest test_grid_diag.py 13 | python3 -m pytest test_lon_360_to_180.py 14 | python3 -m pytest test_scorecard.py 15 | python3 -m pytest test_spacetime.py 16 | python3 -m pytest test_statistics.py 17 | python3 -m pytest test_tost_paired.py 18 | python3 -m pytest test_agg_stat.py 19 | python3 -m pytest test_utils.py 20 | python3 -m pytest test_reformatted_for_agg.py 21 | -------------------------------------------------------------------------------- /test/test_agg_eclv.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import pytest 4 | 5 | from metcalcpy.agg_eclv import AggEclv, pd 6 | 7 | cwd = os.path.dirname(__file__) 8 | 9 | def test_calculate_value_and_ci(settings): 10 | agg_eclv = settings['agg_stat'] 11 | agg_eclv.calculate_stats_and_ci() 12 | result_frame = pd.read_csv( 13 | agg_eclv.params['agg_stat_output'], 14 | header=[0], 15 | sep='\t' 16 | ) 17 | assert result_frame.size == 304 18 | assert result_frame.shape == (38, 8) 19 | assert np.allclose(result_frame['y_pnt_i'][2], 0.5285295) 20 | assert np.allclose(result_frame['y_pnt_i'][26], 0.65747) 21 | assert np.allclose(result_frame['nstats'][0], 23) 22 | assert result_frame['stat_btcl'][9] <= result_frame['y_pnt_i'][9] <= result_frame['stat_btcu'][9] 23 | assert result_frame['stat_btcl'][24] <= result_frame['y_pnt_i'][24] <= result_frame['stat_btcu'][24] 24 | 25 | 26 | @pytest.fixture 27 | def settings(): 28 | """Initialise values for testing. 29 | 30 | Returns: 31 | dictionary with values of different type 32 | """ 33 | params = {'random_seed': 1, 'indy_var': '', 34 | 'method': 'perc', 35 | 'num_iterations': 100, 'event_equal': 'False', 36 | 'agg_stat_input': f'{cwd}/data/agg_eclv_data.data', 37 | 'agg_stat_output': f'{cwd}/data/agg_eclv_data_output.data', 38 | 'fixed_vars_vals_input': {}, 39 | 'series_val_1': {'model': ['WRF'], 'fcst_lev': ['Z10', 'P850-700']}, 40 | 'alpha': 0.05, 'line_type': 'ctc', 41 | 'num_threads': -1, 42 | 'indy_vals': [], 43 | 'agg_stat1': ['ECLV'], 44 | 'circular_block_bootstrap': True, 45 | 'equalize_by_indep': 'True', 46 | 'cl_step': 0.05, 47 | 'log_dir': f'{cwd}/logs/', 48 | 'log_filename': 'log_agg_eclv.txt', 49 | 'log_level': 'WARNING' 50 | } 51 | agg_stat = AggEclv(params) 52 | settings_dict = dict() 53 | settings_dict['agg_stat'] = agg_stat 54 | return settings_dict 55 | -------------------------------------------------------------------------------- /test/test_agg_ratio.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import pytest 4 | 5 | from metcalcpy.agg_stat import AggStat, pd 6 | 7 | cwd = os.path.dirname(__file__) 8 | 9 | def test_agg_ratio(settings): 10 | agg_stat = settings['agg_stat'] 11 | agg_stat.calculate_stats_and_ci() 12 | result_frame = pd.read_csv( 13 | agg_stat.params['agg_stat_output'], 14 | header=[0], 15 | sep='\t' 16 | ) 17 | assert result_frame.size == 24 18 | assert result_frame.shape == (3, 8) 19 | assert np.allclose(result_frame['stat_value'][0], 3.15696) 20 | assert np.allclose(result_frame['stat_value'][1], 3.52230) 21 | assert np.allclose(result_frame['stat_value'][2], 0.89628) 22 | 23 | 24 | 25 | @pytest.fixture 26 | def settings(): 27 | """Initialise values for testing. 28 | 29 | Returns: 30 | dictionary with values of different type 31 | """ 32 | params = {'random_seed': 1, 'indy_var': 'fcst_lead', 33 | 'method': 'perc', 34 | 'num_iterations': 10, 'event_equal': 'True', 35 | 'agg_stat_input': f'{cwd}/data/agg_ratio.data', 36 | 'agg_stat_output': f'{cwd}/data/agg_ratio_data_output.data', 37 | 'fixed_vars_vals_input': { 38 | 'obtype': { 39 | 'obtype_0' : ['CCPA'] 40 | }, 41 | 'interp_mthd': { 42 | 'interp_mthd_3': ['NEAREST'] 43 | }, 44 | 'vx_mask': { 45 | 'vx_mask_1': ['CONUS'] 46 | }, 47 | 'fcst_init_beg': { 48 | 'fcst_init_beg_2': ['2022-04-30 00:00:00', '2022-05-01 00:00:00', '2022-05-02 00:00:00','2022-05-03 00:00:00', '2022-05-04 00:00:00', 49 | '2022-05-05 00:00:00', '2022-05-06 00:00:00', '2022-05-07 00:00:00', '2022-05-08 00:00:00', '2022-05-09 00:00:00', 50 | '2022-05-10 00:00:00','2022-05-11 00:00:00', '2022-05-12 00:00:00' ] 51 | } 52 | }, 53 | 'series_val_1': {'model': ['RRFSE_CONUS_ICperts_nostoch.rrfs_conuscompact_3km']}, 54 | 'series_val_2': {}, 55 | 'alpha': 0.05, 'line_type': 'ecnt', 56 | 'num_threads': -1, 57 | 'indy_vals': ['30000'], 58 | 'circular_block_bootstrap': True, 59 | 'equalize_by_indep': 'True', 60 | 'cl_step': 0.05, 61 | 'derived_series_1':[ 62 | ['RRFSE_CONUS_ICperts_nostoch.rrfs_conuscompact_3km APCP_03 ECNT_RMSE', 63 | 'RRFSE_CONUS_ICperts_nostoch.rrfs_conuscompact_3km APCP_03 ECNT_SPREAD', 64 | 'RATIO'] 65 | ], 66 | 'fcst_var_val_1':{ 67 | 'APCP_03': ['ECNT_RMSE','ECNT_SPREAD'] 68 | }, 69 | 'list_stat_1':['ECNT_RMSE', 'ECNT_SPREAD'], 70 | 'list_stat_2':[], 71 | 'log_dir': f'{cwd}/logs/', 72 | 'log_filename': 'log_agg_stat.txt', 73 | 'log_level': 'WARNING' 74 | } 75 | agg_stat = AggStat(params) 76 | settings_dict = dict() 77 | settings_dict['agg_stat'] = agg_stat 78 | return settings_dict 79 | -------------------------------------------------------------------------------- /test/test_agg_stats_with_groups.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import os 3 | 4 | from metcalcpy import GROUP_SEPARATOR 5 | from metcalcpy.agg_stat import AggStat 6 | 7 | cwd = os.path.dirname(__file__) 8 | 9 | def test_groups(): 10 | # prepare parameters 11 | params = {'random_seed': 1, 'indy_var': 'fcst_lead', 12 | 'method': 'perc', 13 | 'num_iterations': 1, 'event_equal': 'True', 14 | 'derived_series_1': [ 15 | ['Group_y1_1 TMP ME', 'Group_y1_2 TMP ME', 'DIFF']], 16 | 'derived_series_2': [], 17 | 'agg_stat_input': f'{cwd}/data/agg_stat_with_groups_data.data', 18 | 'fcst_var_val_1': {'TMP': ['ME']}, 19 | 'fcst_var_val_2': {}, 20 | 'agg_stat_output': f'{cwd}/data/agg_stat_with_groups_output.data', 21 | 'fixed_vars_vals_input': {'fcst_lev': {'fcst_lev_0': ['Z02']}}, 22 | 'series_val_1': {'model': ['GTS+RAIN3mm' + GROUP_SEPARATOR + 'GTS+RAIN4mm', 23 | 'GTS+RAIN50p' + GROUP_SEPARATOR + 'GTS+RAIN5mm']}, 24 | 'series_val_2': {}, 25 | 'alpha': 0.05, 'line_type': 'sl1l2', 26 | 'num_threads': -1, 27 | 'indy_vals': ['0', '120000', '240000'], 28 | 'list_stat_1': ['ME'], 29 | 'list_stat_2': []} 30 | # start aggregation logic 31 | AGG_STAT = AggStat(params) 32 | AGG_STAT.calculate_stats_and_ci() 33 | 34 | # read the output 35 | input_data = pd.read_csv( 36 | params['agg_stat_output'], 37 | header=[0], 38 | sep='\t' 39 | ) 40 | assert len(input_data) == 9 41 | assert input_data.loc[(input_data['model'] == 'GTS+RAIN3mm' + GROUP_SEPARATOR + 'GTS+RAIN4mm') 42 | & (input_data['fcst_lead'] == 120000)]['stat_value'].item() == -1.4384707 43 | assert input_data.loc[(input_data['model'] == 'GTS+RAIN50p' + GROUP_SEPARATOR + 'GTS+RAIN5mm') 44 | & (input_data['fcst_lead'] == 240000)]['stat_value'].item() == -1.5292608 45 | assert input_data.loc[(input_data['model'] == 'DIFF(Group_y1_1 TMP ME-Group_y1_2 TMP ME)') 46 | & (input_data['fcst_lead'] == 0)]['stat_value'].item() == -0.02839 47 | # remove the output 48 | os.remove(params['agg_stat_output']) 49 | -------------------------------------------------------------------------------- /test/test_calc_difficulty_index.py: -------------------------------------------------------------------------------- 1 | """Tests the operation of calc_difficulty_index.py""" 2 | 3 | import numpy as np 4 | import pytest 5 | from metcalcpy.calc_difficulty_index import forecast_difficulty 6 | 7 | __author__ = "Lindsay Blank (NCAR)" 8 | 9 | def test_forecast_difficulty(): 10 | """ 11 | Test that the output of forecast_difficulty function is correct. 12 | 13 | Returns 14 | ------- 15 | None. 16 | """ 17 | 18 | #Settings 19 | EPS = np.finfo(np.float32).eps 20 | nlon = 18 21 | nlat = 9 22 | nmembers = 10 23 | np.random.seed(12345) 24 | 25 | # Wave heights and wind speeds generally follow the Rayleigh distribution, 26 | # which is a Weibull distribution with a scale factor of 2, 27 | # and any shape parameter. 28 | # If the shape parameter is 1, then it is the same as a chi-square 29 | # distribution with 2 dofs. 30 | # Expected value E[x] = sigma * sqrt(pi/2) 31 | # mean_windspeed = 6.64 # meters/second 32 | xunits = 'feet' 33 | mean_height = 11.0 #mean wave height in feet 34 | mode_height = np.sqrt(2.0 / np.pi) * mean_height 35 | 36 | fieldijn = np.random.rayleigh(scale=mode_height, 37 | size=(nlat, nlon, nmembers)) 38 | muij = np.mean(fieldijn, axis=-1) 39 | pertijn = fieldijn - np.dstack([muij] * nmembers) 40 | sigmaij = np.sqrt(np.mean(pertijn * pertijn, axis=-1)) 41 | 42 | threshold = 9.0 43 | regularize = 0.01 44 | smax = 9.0 45 | sigma_max = smax + np.zeros_like(sigmaij) 46 | thresh_eps = 2.0 47 | kwargs = {'thresh_eps': thresh_eps, 'threshold_type': 'proximity'} 48 | 49 | assert 0.9095608641027515 == forecast_difficulty(sigmaij, muij, threshold, fieldijn, 50 | Aplin=None, sigma_over_mu_ref=EPS)[0][0] 51 | assert 0.8191620255148825 == forecast_difficulty(sigmaij, muij, threshold, fieldijn, 52 | Aplin=None, sigma_over_mu_ref=EPS)[8][17] 53 | assert 1.227707670365556 == forecast_difficulty(sigmaij, muij, threshold, fieldijn, 54 | Aplin=None, sigma_over_mu_ref=EPS)[4][9] 55 | 56 | 57 | if __name__ == "__main__": 58 | test_forecast_difficulty() 59 | 60 | -------------------------------------------------------------------------------- /test/test_compare_images.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import os 3 | 4 | from metcalcpy.compare_images import CompareImages 5 | 6 | cwd = os.path.dirname(__file__) 7 | 8 | @pytest.fixture 9 | def settings(): 10 | compare_diff = CompareImages(f'{cwd}/data/img_1.png', f'{cwd}/data/img_2.png') 11 | compare_same = CompareImages(f'{cwd}/data/img_1.png', f'{cwd}/data/img_1.png') 12 | settings_dict = dict() 13 | settings_dict['compare_diff'] = compare_diff 14 | settings_dict['compare_same'] = compare_same 15 | return settings_dict 16 | 17 | 18 | def test_get_ssim(settings): 19 | assert settings['compare_diff'].get_mssim() != 1.0 20 | assert settings['compare_same'].get_mssim() == 1.0 21 | 22 | 23 | def test_save_difference_image(settings): 24 | settings['compare_diff'].save_difference_image('data/img_diff.png') 25 | -------------------------------------------------------------------------------- /test/test_convert_lon_indices.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import numpy as np 3 | import metcalcpy.util.utils as utils 4 | 5 | def test_convert_lon_indices_working(): 6 | 7 | # Verify that some values were correctly converted 8 | # and that the ordering in the array is from negative to positive 9 | np_lon = np.linspace(0, 359, 360) 10 | minlon_in = -180 11 | range_in = 360 12 | west_east, west_east_indices = utils.convert_lons_indices(np_lon, minlon_in, range_in) 13 | assert west_east[0] == -180.0 14 | assert west_east[359] == 179.0 15 | 16 | 17 | if __name__ == "__main__": 18 | test_convert_lon_indices_working() 19 | -------------------------------------------------------------------------------- /test/test_diagnostics_land_surface.py: -------------------------------------------------------------------------------- 1 | """Tests the functions in diagnostics/land_surface.py""" 2 | 3 | from metcalcpy.diagnostics.land_surface import calc_tci 4 | from xarray.testing import assert_equal 5 | import xarray as xr 6 | import pandas as pd 7 | import numpy as np 8 | 9 | import os 10 | 11 | __author__ = "Daniel Adriaansen (NCAR)" 12 | 13 | cwd = os.path.dirname(__file__) 14 | 15 | def test_calc_tci(): 16 | """ 17 | Test that the output of the calc_tci function is correct. 18 | 19 | Returns 20 | ------- 21 | None. 22 | """ 23 | 24 | doXarray = True 25 | doPandas = True 26 | 27 | if doXarray: 28 | ###### Xarray DataArray case 29 | # Input data for Xarray case 30 | xr_input = xr.open_dataset(f'{cwd}/data/calc_tci_jja_xarray_input.nc') 31 | 32 | # Output data for Xarray case 33 | xr_truth_var = '__xarray_dataarray_variable__' 34 | xr_truth = xr.open_dataset(f'{cwd}/data/calc_tci_jja_xarray_output.nc') 35 | 36 | # Compute TCI 37 | xr_test = calc_tci(xr_input['SOILWATER_10CM'],xr_input['LHFLX']) 38 | 39 | # Validate Xarray case 40 | assert_equal(xr_truth[xr_truth_var],xr_test) 41 | 42 | if doPandas: 43 | ###### Pandas DataFrame case 44 | # Input data for Pandas case 45 | pd_input = pd.read_csv(f'{cwd}/data/calc_tci_jja_pandas_input.csv') 46 | 47 | # There are three sites in the test data, each should have its own TCI value 48 | pd_test = np.array([]) 49 | for name,site in pd_input.groupby('station_id'): 50 | pd_test = np.append(pd_test,calc_tci(site['SWC_F_MDS_1'],site['LE_F_MDS'])) 51 | 52 | # The truth values 53 | pd_truth = np.array([-1.851168960504201,11.861239905560712,-2.0781980819945076]) 54 | 55 | # Validate Pandas case 56 | for test,truth in tuple(zip(pd_test,pd_truth)): 57 | assert test==truth 58 | 59 | if __name__ == "__main__": 60 | test_calc_tci() 61 | -------------------------------------------------------------------------------- /test/test_event_equalize_against_values.py: -------------------------------------------------------------------------------- 1 | """Tests the operation of METcalcpy's event_equalize_against_values code.""" 2 | 3 | import os 4 | import pandas as pd 5 | 6 | from metcalcpy.event_equalize_against_values import event_equalize_against_values 7 | 8 | cwd = os.path.dirname(__file__) 9 | 10 | def test_event_equalize_against_values(): 11 | """Tests event equalization against values.""" 12 | 13 | indy_var = "fcst_lead" 14 | series_val = dict({'model': ["AFWAOCv3.5.1_d01", "NoahMPv3.5.1_d01"]}) 15 | 16 | fcst_var_val = dict({'APCP_03': ["RATIO_FSA_ASA"]}) 17 | input_data_file = f'{cwd}/data/ee_av_input.data' 18 | stats_input_data_file = f'{cwd}/data/stats_ee_av_input.data' 19 | output_data_file = f'{cwd}/data/ee_av_output_py.data' 20 | 21 | # read the input data file into a data frame 22 | input_data = pd.read_csv(input_data_file, header=[0], sep='\t') 23 | stats_data = pd.read_csv(stats_input_data_file, header=[0], sep='\t') 24 | output_data = pd.DataFrame() 25 | 26 | for fcst_var, fcst_var_stats in fcst_var_val.items(): 27 | for series_var, series_var_vals in series_val.items(): 28 | # ungroup series value 29 | series_var_vals_no_group = [] 30 | for val in series_var_vals: 31 | split_val = val.split(',') 32 | series_var_vals_no_group.extend(split_val) 33 | 34 | ee_stats_equalize = input_data[ 35 | (input_data['fcst_var'] == fcst_var) 36 | & (input_data[series_var].isin(series_var_vals_no_group)) 37 | ] 38 | f_plot = stats_data[(stats_data['fcst_var'] == fcst_var) 39 | & (stats_data[series_var].isin(series_var_vals_no_group)) 40 | ] 41 | ee_stats_equalize_unique = (list(set(ee_stats_equalize['equalize']))) 42 | f_plot = event_equalize_against_values(f_plot, ee_stats_equalize_unique) 43 | 44 | # append EE data to result 45 | if output_data.empty: 46 | output_data = f_plot 47 | else: 48 | output_data.append(f_plot) 49 | # save to file 50 | output_data.to_csv(index=False, sep='\t', path_or_buf=output_data_file) 51 | 52 | 53 | if __name__ == "__main__": 54 | test_event_equalize_against_values() 55 | -------------------------------------------------------------------------------- /test/test_grid_diag.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import math 3 | import numpy as np 4 | 5 | if __name__ == '__main__': 6 | 7 | nx_default = 10 8 | ny_default = 10 9 | 10 | parser = argparse.ArgumentParser() 11 | parser.add_argument('--nx', type=int, 12 | default=nx_default, 13 | help='Number of x grid points') 14 | parser.add_argument('--ny', type=int, 15 | default=ny_default, 16 | help='Number of y grid points') 17 | parser.add_argument('--x_min', type=float, 18 | default=0, 19 | help='Domain left x-coordinate') 20 | parser.add_argument('--x_max', type=float, 21 | default=nx_default, 22 | help='Domain right x-coordinate') 23 | parser.add_argument('--y_min', type=float, 24 | default=0, 25 | help='Domain bottom y-coordinate') 26 | parser.add_argument('--y_max', type=float, 27 | default=ny_default, 28 | help='Domain top y-coordinate') 29 | parser.add_argument('--sigma', type=float, 30 | default=1, 31 | help='Normal distribution width') 32 | parser.add_argument('--mu_x', type=float, 33 | default=nx_default/2, 34 | help='Normal distribution x-mean') 35 | parser.add_argument('--mu_y', type=float, 36 | default=ny_default/2, 37 | help='Normal distribution y-mean') 38 | parser.add_argument('--n_bin', type=int, 39 | default=10, 40 | help='Number of bins') 41 | parser.add_argument('--min', type=int, 42 | default=0, 43 | help='Bin minimum') 44 | parser.add_argument('--max', type=int, 45 | default=0.001, 46 | help='Bin maximum') 47 | args = parser.parse_args() 48 | 49 | x_coords = np.linspace(args.x_min, args.x_max, args.nx + 1) 50 | y_coords = np.linspace(args.y_min, args.y_max, args.ny + 1) 51 | x_mesh, y_mesh = np.meshgrid(x_coords, y_coords) 52 | 53 | r2 = (x_mesh - args.mu_x) * (x_mesh - args.mu_x) \ 54 | + (y_mesh - args.mu_y) * (y_mesh - args.mu_y) 55 | sigma2 = args.sigma * args.sigma 56 | values = np.exp(- r2 / (2 * sigma2)) / math.sqrt(2 * math.pi * sigma2) 57 | 58 | print(values) 59 | 60 | pdf = np.histogram(values, bins=args.n_bin, range=(args.min, args.max)) 61 | print(pdf) 62 | 63 | -------------------------------------------------------------------------------- /test/test_lon_360_to_180.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import numpy as np 3 | import metcalcpy.util.utils as utils 4 | 5 | def test_lon_from_360_to_180(): 6 | 7 | print("running test") 8 | # Verify that longitude type is maintained, ie an int isn't converted to 9 | # a float when converting coord values. 10 | i_lon = [i_lon for i_lon in range (0, 359)] 11 | np_lon = np.linspace(0, 359, 360) 12 | 13 | i_west_east = utils.convert_lon_360_to_180(i_lon) 14 | np_west_east = utils.convert_lon_360_to_180(np_lon) 15 | 16 | 17 | if ( isinstance(i_west_east[0], int) or isinstance(i_west_east[0], np.int64) or isinstance(i_west_east[0], np.int32) ) and (isinstance(np_west_east[0], np.float64) or not isinstance(np_west_east[0], float)): 18 | assert True 19 | 20 | 21 | # Verify that some values were correctly converted 22 | # and that the ordering in the array is from negative to positive 23 | assert np_west_east[0] == -180.0 24 | assert np_west_east[359] == 179.0 25 | 26 | 27 | 28 | 29 | 30 | -------------------------------------------------------------------------------- /test/test_no_ARIMA_utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import pandas as pd 4 | import metcalcpy.util.utils as utils 5 | 6 | cwd = os.path.dirname(__file__) 7 | 8 | def test_no_arima(): 9 | data_file = f"{cwd}/data/scorecard.csv" 10 | df = pd.read_csv(data_file) 11 | 12 | stat_values:pd.Series = df['stat_value'] 13 | # convert dataframe to numpy array 14 | np_data:np.array = stat_values.to_numpy() 15 | size_data_from_file = np_data.size 16 | 17 | try: 18 | std_err, ratio_flag, ar_1, size_data =utils.compute_std_err_from_median_variance_inflation_factor(np_data) 19 | assert size_data_from_file == size_data 20 | except NameError: 21 | # if ARIMA is still present, expect "NameError: name 'ARIMA' is not defined 22 | assert False 23 | 24 | -------------------------------------------------------------------------------- /test/test_reformatted_for_agg.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pandas as pd 3 | import pytest 4 | import yaml 5 | from metcalcpy.agg_stat import AggStat 6 | from metcalcpy.util.read_env_vars_in_config import parse_config 7 | 8 | cwd = os.path.dirname(__file__) 9 | os.environ['TEST_DIR'] = cwd 10 | 11 | def test_reformatted_input(): 12 | ''' 13 | Test that the reformatted input generates output that doesn't have NA or none for 14 | the RMSE or SPREAD_PLUS_OERR statistics. 15 | 16 | The reformatted input was generated by METdataio's METreformat module for the ECNT linetype. 17 | 18 | :return: 19 | ''' 20 | 21 | # Read in the YAML config file 22 | config_file = f"{cwd}/rrfs_ecnt_config_agg_stat.yaml" 23 | parms = parse_config(config_file) 24 | 25 | # Calculate the aggregation statistics using the specified YAML config file and reformatted 26 | # ECNT linetype data (reformatted via the METdataio METreformat module) 27 | AGG_STAT = AggStat(parms) 28 | AGG_STAT.calculate_stats_and_ci() 29 | 30 | output_file = parms['agg_stat_output'] 31 | df:pd.DataFrame = pd.read_csv(output_file, sep="\t") 32 | 33 | # Verify that the ECNT_RMSE and ECNT_SPREAD_PLUS_OERR stat_values are NOT 34 | # NA (NaN) values. 35 | rmse_only:pd.DataFrame = df.loc[ df['stat_name'] == 'ECNT_RMSE' ] 36 | spread_plus_oerr_only:pd.DataFrame = df.loc[ df['stat_name'] == 'ECNT_SPREAD_PLUS_OERR' ] 37 | 38 | num_rmse_rows = rmse_only.shape[0] 39 | num_spread_plus_oerr_rows = spread_plus_oerr_only.shape[0] 40 | 41 | # Remove NA (NaN) from and stat_name columns in the rmse_only and spread_plus_oerr_only 42 | # dataframes. If any exist, the total number of rows will be reduced and the test fails, as this 43 | # input data should only produce valid stat_name values. 44 | clean_rmse = rmse_only[rmse_only['stat_value'].notna()] 45 | clean_spread_plus_oerr = spread_plus_oerr_only[spread_plus_oerr_only['stat_value'].notna()] 46 | num_cleaned_rmse = clean_rmse.shape[0] 47 | num_cleaned_spread_plus_oerr = clean_spread_plus_oerr.shape[0] 48 | 49 | assert num_rmse_rows == num_cleaned_rmse 50 | assert num_spread_plus_oerr_rows == num_cleaned_spread_plus_oerr 51 | 52 | # Clean up the aggregation output file 53 | os.remove(output_file) 54 | 55 | 56 | 57 | 58 | -------------------------------------------------------------------------------- /test/test_scorecard.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import os 3 | import numpy as np 4 | 5 | from metcalcpy.scorecard import Scorecard, pd 6 | 7 | cwd = os.path.dirname(__file__) 8 | 9 | def test_calculate_scorecard_data(settings): 10 | scorecard = settings['scorecard'] 11 | scorecard.calculate_scorecard_data() 12 | result_frame = pd.read_csv( 13 | scorecard.params['sum_stat_output'], 14 | header=[0], 15 | sep='\t' 16 | ) 17 | assert result_frame.size == 72 18 | assert result_frame.shape == (9, 8) 19 | assert np.allclose( 20 | result_frame[(result_frame['model'] == 'DIFF(P200 AFWAOCv3.5.1_d01 120000:240000 HGT BCMSE-P200 NoahMPv3.5.1_d01 120000:240000 HGT BCMSE)') 21 | &(result_frame['vx_mask'] == 'EAST') 22 | & (result_frame['fcst_lead'] == '120000:240000')]['stat_value'],-6.29828) 23 | 24 | assert np.allclose( 25 | result_frame[(result_frame[ 26 | 'model'] == 'DIFF_SIG(P200 AFWAOCv3.5.1_d01 360000 HGT BCMSE-P200 NoahMPv3.5.1_d01 360000 HGT BCMSE)') 27 | & (result_frame['vx_mask'] == 'NMT') 28 | & (result_frame['fcst_lead'] == '120000:240000')]['stat_value'], -0.76703) 29 | 30 | assert np.allclose( 31 | result_frame[(result_frame[ 32 | 'model'] == 'SINGLE(P200 AFWAOCv3.5.1_d01 480000 HGT BCMSE-P200 NoahMPv3.5.1_d01 480000 HGT BCMSE)') 33 | & (result_frame['vx_mask'] == 'EAST') 34 | & (result_frame['fcst_lead'] == '480000')]['stat_value'], 439.42705) 35 | 36 | 37 | 38 | 39 | @pytest.fixture 40 | def settings(): 41 | """Initialise values for testing. 42 | 43 | Returns: 44 | dictionary with values of different type 45 | """ 46 | params = {'append_to_file': False, 47 | 'derived_series_1': 48 | [['P200 AFWAOCv3.5.1_d01 120000:240000 HGT BCMSE', 49 | 'P200 NoahMPv3.5.1_d01 120000:240000 HGT BCMSE', 50 | 'DIFF'], 51 | ['P200 AFWAOCv3.5.1_d01 360000 HGT BCMSE', 52 | 'P200 NoahMPv3.5.1_d01 360000 HGT BCMSE', 53 | 'DIFF_SIG'], 54 | ['P200 AFWAOCv3.5.1_d01 480000 HGT BCMSE', 55 | 'P200 NoahMPv3.5.1_d01 480000 HGT BCMSE', 56 | 'SINGLE'] 57 | ], 58 | 'equalize_by_indep': True, 59 | 'event_equal': False, 60 | 'fcst_var_val_1': { 61 | 'HGT': 62 | ['BCMSE']}, 63 | 'fix_val_list_eq': [], 64 | 'fixed_vars_vals_input': {}, 65 | 'indy_plot_val': [], 66 | 'indy_vals': ['EAST', 67 | 'NMT', 68 | 'WEST', 69 | 'G2/TRO'], 70 | 'indy_var': 'vx_mask', 71 | 'line_type': 'sl1l2', 72 | 'list_stat_1': 73 | ['BCMSE'], 74 | 'ndays': 10, 75 | 'series_val_1': { 76 | 'fcst_lead': [ 77 | '120000:240000', 78 | '360000', 79 | '480000'], 80 | 'fcst_lev': 81 | ['P200'], 82 | 'model': 83 | ['AFWAOCv3.5.1_d01', 84 | 'NoahMPv3.5.1_d01']}, 85 | 'stat_flag': 'NCAR', 86 | 'sum_stat_input': f'{cwd}/data/scorecard.data', 87 | 'sum_stat_output': f'{cwd}/data/scorecard_output.data', 88 | 'log_dir': f'{cwd}/logs/', 89 | 'log_filename': 'log_scorecard.txt', 90 | 'log_level': 'WARNING' 91 | } 92 | scorecard = Scorecard(params) 93 | settings_dict = dict() 94 | settings_dict['scorecard'] = scorecard 95 | return settings_dict 96 | -------------------------------------------------------------------------------- /test/test_sl1l2.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | 4 | from metcalcpy.util import sl1l2_statistics as sl1l2 5 | 6 | def test_calculate_bcmse(): 7 | # Test that negative BCMSE values are no longer returned. 8 | input_data_list = [] 9 | 10 | # These data produce negative BCMSE values. Test that the modified code no longer returns negative values 11 | # for the BCMSE. 12 | input_data_list.append(np.array([[4.37978400e+01, 4.70115800e+01, 1.91825108e+03, 2.21008843e+03, 2.05900571e+03, 1.00000000e+00]])) 13 | input_data_list.append(np.array([[8.66233900e+01, 4.83037900e+01, 7.50361146e+03, 2.33325660e+03, 4.18423840e+03, 1.00000000e+00]])) 14 | input_data_list.append(np.array([[3.68089000e+01, 1.64253370e+02, 1.35489535e+03, 2.69791703e+04, 6.04598647e+03, 1.00000000e+00]])) 15 | columns_names = np.array(['fbar', 'obar', 'ffbar', 'oobar', 'fobar', 'total'], dtype='= 0. 20 | 21 | 22 | 23 | -------------------------------------------------------------------------------- /test/test_statistics.py: -------------------------------------------------------------------------------- 1 | """Tests the operation of METcalcpy's statistics code.""" 2 | import numpy as np 3 | import pytest 4 | from metcalcpy.util.met_stats import get_column_index_by_name 5 | from metcalcpy.util.correlation import corr, acf 6 | from metcalcpy.util.utils import round_half_up 7 | from metcalcpy.util.wald_wolfowitz_runs_test import runs_test 8 | from metcalcpy.util.eclv_statistics import calculate_eclv 9 | 10 | 11 | def test_get_column_index_by_name(settings): 12 | column_name = 'fobar' 13 | assert 11 == get_column_index_by_name(settings['columns'], column_name) 14 | 15 | column_name = 'not_in_array' 16 | assert not get_column_index_by_name(settings['columns'], column_name) 17 | 18 | 19 | @pytest.fixture 20 | def settings(): 21 | """Initialise values for testing. 22 | 23 | Returns: 24 | dictionary with values of different type 25 | """ 26 | settings_dict = dict() 27 | columns = np.array(['model', 'fcst_init_beg', 'fcst_valid_beg', 'fcst_lead', 'vx_mask', 'fcst_var', 28 | 'stat_name', 'stat_value', 'total', 'fbar', 'obar', 'fobar', 'ffbar', 'oobar', 29 | 'mae']) 30 | settings_dict['columns'] = columns 31 | 32 | return settings_dict 33 | 34 | 35 | def test_corr(): 36 | x = [103.4, 59.92, 68.17, 94.54, 69.48, 72.17, 74.37, 84.44, 96.74, 94.26, 48.52, 95.68] 37 | y = [90.11, 77.71, 77.71, 97.51, 58.21, 101.3, 79.84, 96.06, 89.3, 97.22, 61.62, 85.8] 38 | 39 | corr_val = corr(x=x, y=y)['r'].tolist()[0] 40 | assert round_half_up(corr_val, 2) == 0.67 41 | 42 | 43 | def test_acf(): 44 | x = [2.4, 2.4, 2.4, 2.2, 2.1, 1.5, 2.3, 2.3, 2.5, 2.0, 1.9, 1.7, 2.2, 1.8, 3.2, 3.2, 2.7, 2.2, 2.2, 1.9, 1.9, 1.8, 45 | 2.7, 3.0, 2.3, 2.0, 2.0, 2.9, 2.9, 2.7, 2.7, 2.3, 2.6, 2.4, 1.8, 1.7, 1.5, 1.4, 2.1, 3.3, 3.5, 3.5, 3.1, 2.6, 46 | 2.1, 3.4, 3.0, 2.9] 47 | acf_val = acf(x, 'correlation', lag_max=10) 48 | assert -0.154 == round_half_up(acf_val[10], 3) 49 | acf_val = acf(x, 'correlation') 50 | assert 0.151 == round_half_up(acf_val[16], 3) 51 | 52 | 53 | def test_eclv(): 54 | x = np.array([[666, 112, 25, 33, 496], [350, 73, 9, 22, 246], [316, 39, 16, 11, 250]]) 55 | columns_names = np.array(['total', 'fy_oy', 'fy_on', 'fn_oy', 'fn_on']) 56 | cl_pts = [0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95] 57 | thresh = 0 58 | line_type = 'ctc' 59 | add_base_rate = 1 60 | eclv = calculate_eclv(x, columns_names, thresh, line_type, cl_pts, add_base_rate) 61 | assert 0.7244291 == eclv['vmax'] 62 | assert 0.0479846 == eclv['F'] 63 | assert 0.7724138 == eclv['H'] 64 | assert 0.2177177 == eclv['s'] 65 | assert 20 == len(eclv['V']) 66 | assert -0.2514395393474087 == eclv['V'][0] 67 | 68 | 69 | 70 | def test_runs_test(): 71 | x = [2.4, 2.4, 2.4, 2.2, 2.1, 1.5, 2.3, 2.3, 2.5, 2.0, 1.9, 1.7, 2.2, 1.8, 3.2, 3.2, 2.7, 2.2, 2.2, 1.9, 1.9, 1.8, 72 | 2.7, 3.0, 2.3, 2.0, 2.0, 2.9, 2.9, 2.7, 2.7, 2.3, 2.6, 2.4, 1.8, 1.7, 1.5, 1.4, 2.1, 3.3, 3.5, 3.5, 3.1, 2.6, 73 | 2.1, 3.4, 3.0, 2.9] 74 | 75 | ww_run = runs_test(x, 'left.sided', 'median') 76 | assert 0.00117 == round_half_up(ww_run['p_value'], 5) 77 | 78 | 79 | if __name__ == "__main__": 80 | test_get_column_index_by_name() 81 | test_corr() 82 | test_acf() 83 | test_runs_test() 84 | test_eclv() 85 | -------------------------------------------------------------------------------- /test/test_tost_paired.py: -------------------------------------------------------------------------------- 1 | """Tests the operation of METcalcpy's tost_paired code.""" 2 | import statistics 3 | import metcalcpy.util.correlation as pg 4 | 5 | from metcalcpy.util.utils import tost_paired 6 | 7 | 8 | def test_tost_paired(): 9 | x = [103.4, 59.92, 68.17, 94.54, 69.48, 72.17, 74.37, 84.44, 96.74, 94.26, 48.52, 95.68] 10 | y = [90.11, 77.71, 77.71, 97.51, 58.21, 101.3, 79.84, 96.06, 89.3, 97.22, 61.62, 85.8] 11 | 12 | corr = pg.corr(x=x, y=y)['r'].tolist()[0] 13 | 14 | result = tost_paired(len(x), statistics.mean(x), statistics.mean(y), 15 | statistics.stdev(x), statistics.stdev(y), corr, 16 | -0.001, 0.001) 17 | assert result['dif'] == -4.225 18 | assert result['t'] == (-1.1243156, -1.1312438) 19 | assert result['p'] == (0.8575922, 0.1410063) 20 | assert result['degrees_of_freedom'] == 11 21 | assert result['ci_tost'] == (-10.9529216, 2.5029216) 22 | assert result['ci_ttest'] == (-12.4705487, 4.0205487) 23 | assert result['eqbound'] == (-0.0129776, 0.0129776) 24 | assert result['xlim'] == (-12.298506, 3.848506) 25 | assert result['combined_outcome'] == 'no_diff_no_eqv' 26 | assert result['test_outcome'] == 'non-significant' 27 | assert result['tost_outcome'] == 'non-significant' 28 | 29 | 30 | if __name__ == "__main__": 31 | test_tost_paired() -------------------------------------------------------------------------------- /test/test_validate_mv_python.py: -------------------------------------------------------------------------------- 1 | import datetime as dt 2 | import pytest 3 | 4 | from metcalcpy.validate_mv_python import get_testing_period, replace_name 5 | 6 | 7 | def test_get_testing_period(): 8 | param = {'start_date': '2020-01-01', 'end_date': '2020-01-16'} 9 | (start, end) = get_testing_period(param) 10 | assert start == dt.datetime.strptime('2020-01-01', '%Y-%m-%d').date() 11 | assert end == dt.datetime.strptime('2020-01-16', '%Y-%m-%d').date() 12 | 13 | param = {} 14 | (start, end) = get_testing_period(param) 15 | assert start == (dt.datetime.now() - dt.timedelta(1)).date() 16 | assert end == dt.datetime.now().date() 17 | 18 | param = {'start_date': '2020-01-01'} 19 | (start, end) = get_testing_period(param) 20 | assert start == dt.datetime.strptime('2020-01-01', '%Y-%m-%d').date() 21 | assert end == dt.datetime.now().date() 22 | 23 | param = {'end_date': '2020-01-01'} 24 | with pytest.raises(Exception): 25 | get_testing_period(param) 26 | 27 | 28 | def test_replace_name(): 29 | assert replace_name('plot_20200115_135714.xml', 'py') == 'plot_20200115_135714_py.xml' 30 | -------------------------------------------------------------------------------- /test/val1l2_agg_stat.yaml: -------------------------------------------------------------------------------- 1 | agg_stat_input: !ENV "${TEST_DIR}/data/point_stat/point_stat_GRIB1_NAM_GDAS_MASK_SID_120000L_20120409_120000V_val1l2.txt" 2 | agg_stat_output: !ENV "${TEST_DIR}/calcpy_val1l2_agg.txt" 3 | alpha: 0.05 4 | append_to_file: null 5 | circular_block_bootstrap: True 6 | derived_series_1: [] 7 | derived_series_2: [] 8 | event_equal: False 9 | fcst_var_val_1: 10 | UGRD_VGRD: 11 | - VAL1L2_DIRA_ME 12 | - VAL1L2_DIRA_MAE 13 | - VAL1L2_DIRA_MSE 14 | fcst_var_val_2: {} 15 | indy_vals: 16 | #- '30000' 17 | #- '40000' 18 | #- '60000' 19 | #- '90000' 20 | - '120000' 21 | #- '150000' 22 | #- '160000' 23 | #- '170000' 24 | #- '180000' 25 | #- '200000' 26 | #- '240000' 27 | #- '270000' 28 | 29 | indy_var: fcst_lead 30 | line_type: val1l2 31 | list_stat_1: 32 | - VAL1L2_DIRA_ME 33 | - VAL1L2_DIRA_MAE 34 | - VAL1L2_DIRA_MSE 35 | list_stat_2: [] 36 | method: perc 37 | num_iterations: 1 38 | num_threads: -1 39 | random_seed: null 40 | series_val_1: 41 | model: 42 | - FCST 43 | series_val_2: {} 44 | log_dir: !ENV "${TEST_DIR}/logs" 45 | log_filename: log_agg_stat_val1l2.txt 46 | log_level: WARNING -------------------------------------------------------------------------------- /test/vcnt_agg_stat.yaml: -------------------------------------------------------------------------------- 1 | agg_stat_input: !ENV "${TEST_DIR}/data/point_stat/point_stat_GRIB2_SREF_GDAS_150000L_20120409_120000V_vcnt.txt" 2 | agg_stat_output: !ENV "${TEST_DIR}/calcpy_vcnt_agg.txt" 3 | alpha: 0.05 4 | append_to_file: null 5 | circular_block_bootstrap: True 6 | derived_series_1: [] 7 | derived_series_2: [] 8 | event_equal: False 9 | fcst_var_val_1: 10 | UGRD_VGRD: 11 | - VCNT_DIR_ME 12 | - VCNT_DIR_MAE 13 | - VCNT_DIR_MSE 14 | - VCNT_DIR_RMSE 15 | fcst_var_val_2: {} 16 | indy_vals: 17 | #- '30000' 18 | #- '40000' 19 | #- '60000' 20 | #- '90000' 21 | #- '120000' 22 | - '150000' 23 | #- '160000' 24 | #- '170000' 25 | #- '180000' 26 | #- '200000' 27 | #- '240000' 28 | #- '270000' 29 | 30 | indy_var: fcst_lead 31 | line_type: val1l2 32 | list_stat_1: 33 | - VCNT_DIR_ME 34 | - VCNT_DIR_MAE 35 | - VCNT_DIR_MSE 36 | - VCNT_DIR_RMSE 37 | list_stat_2: [] 38 | method: perc 39 | num_iterations: 1 40 | num_threads: -1 41 | random_seed: null 42 | series_val_1: 43 | model: 44 | - FCST 45 | series_val_2: {} 46 | log_dir: !ENV "${TEST_DIR}/logs" 47 | log_filename: log_agg_stat_vcnt.txt 48 | log_level: WARNING 49 | -------------------------------------------------------------------------------- /test/vl1l2_agg_stat_met_v12.yaml: -------------------------------------------------------------------------------- 1 | agg_stat_input: !ENV "${TEST_DIR}/data/point_stat/point_stat_GRIB2_SREF_GDAS_150000L_20120409_120000V_vl1l2.txt" 2 | agg_stat_output: !ENV "${TEST_DIR}/met_v12_v1l1l2_agg.txt" 3 | alpha: 0.05 4 | append_to_file: null 5 | circular_block_bootstrap: True 6 | derived_series_1: [] 7 | derived_series_2: [] 8 | event_equal: False 9 | fcst_var_val_1: 10 | UGRD_VGRD: 11 | - VL1L2_DIR_ME 12 | - VL1L2_DIR_MAE 13 | - VL1L2_DIR_MSE 14 | fcst_var_val_2: {} 15 | indy_vals: 16 | #- '30000' 17 | #- '40000' 18 | #- '60000' 19 | #- '90000' 20 | #- '120000' 21 | - '150000' 22 | #- '160000' 23 | #- '170000' 24 | #- '180000' 25 | #- '200000' 26 | #- '240000' 27 | #- '270000' 28 | 29 | indy_var: fcst_lead 30 | line_type: vl1l2 31 | list_stat_1: 32 | - VL1L2_DIR_ME 33 | - VL1L2_DIR_MAE 34 | - VL1L2_DIR_MSE 35 | list_stat_2: [] 36 | method: perc 37 | num_iterations: 1 38 | num_threads: -1 39 | random_seed: null 40 | series_val_1: 41 | model: 42 | - FCST 43 | series_val_2: {} 44 | log_dir: !ENV "${TEST_DIR}/logs" 45 | log_filename: log_agg_stat_vl1l2.txt 46 | log_level: WARNING 47 | --------------------------------------------------------------------------------