├── .github ├── ci-config.yml ├── ci-hpc-config.yml └── workflows │ ├── cd-pypi.yml │ ├── ci.yml │ ├── label-public-pr.yml │ └── weekly.yml ├── .gitignore ├── ADVANCED_USAGE.rst ├── CHANGELOG.rst ├── CONTRIBUTING.rst ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.rst ├── appveyor.yml ├── cf2cdm ├── __init__.py ├── cfcoords.py ├── cfunits.py └── datamodels.py ├── cfgrib ├── __init__.py ├── __main__.py ├── abc.py ├── cfmessage.py ├── dataset.py ├── messages.py ├── xarray_plugin.py ├── xarray_store.py └── xarray_to_grib.py ├── ci ├── install_python.ps1 ├── requirements-dev.txt ├── requirements-docs.in ├── requirements-docs.txt ├── requirements-docs.yml ├── requirements-py37.yml ├── requirements-tests.in └── requirements-tests.txt ├── docs ├── _static │ └── .gitkeep ├── cfmessage.rst ├── conf.py ├── dataset.rst ├── index.rst ├── messages.rst ├── xarray_store.rst └── xarray_to_grib.rst ├── environment-minimal.in.yml ├── environment-minver.in.yml ├── environment.in.yml ├── era5-levels-members.grib ├── nam.t00z.awp21100.tm00.grib2 ├── pyproject.toml ├── setup.cfg ├── setup.py ├── tests ├── cds_test_00_sync_sample_data.py ├── cds_test_10_era5.py ├── cds_test_20_sf_ecmwf.py ├── cds_test_20_sf_meteo_france.py ├── cds_test_20_sf_ukmo.py ├── cdscommon.py ├── environment-macos-3.8.yml ├── environment-ubuntu-3.10.yml ├── environment-ubuntu-3.7.yml ├── environment-ubuntu-3.8.yml ├── environment-ubuntu-3.9-minimal.yml ├── environment-windows-3.8.yml ├── sample-data │ ├── alternate-scanning.grib │ ├── cams-egg4-monthly.grib │ ├── cfrzr_and_cprat.grib │ ├── cfrzr_and_cprat_0s.grib │ ├── ds.waveh.5.grib │ ├── era5-levels-corrupted.grib │ ├── era5-levels-members.grib │ ├── era5-levels-members.nc │ ├── era5-single-level-scalar-time.grib │ ├── fields_with_missing_values.grib │ ├── forecast_monthly_ukmo.grib │ ├── hpa_and_pa.grib │ ├── lambert_grid.grib │ ├── multi_param_on_multi_dims.grib │ ├── ncep-seasonal-monthly.grib │ ├── reduced_gg.grib │ ├── regular_gg_ml.grib │ ├── regular_gg_ml_g2.grib │ ├── regular_gg_pl.grib │ ├── regular_gg_sfc.grib │ ├── regular_gg_wrong_increment.grib │ ├── regular_ll_msl.grib │ ├── regular_ll_sfc.grib │ ├── regular_ll_wrong_increment.grib │ ├── scanning_mode_64.grib │ ├── single_gridpoint.grib │ ├── soil-surface-level-mix.grib │ ├── spherical_harmonics.grib │ ├── step_60m.grib │ ├── t_analysis_and_fc_0.grib │ ├── t_on_different_level_types.grib │ ├── tp_on_different_grid_resolutions.grib │ └── uv_on_different_levels.grib ├── test_10_cfunits.py ├── test_20_cfcoords.py ├── test_20_main.py ├── test_20_messages.py ├── test_25_cfmessage.py ├── test_30_dataset.py ├── test_40_xarray_store.ipynb ├── test_40_xarray_store.py ├── test_40_xarray_to_grib_regular_ll.py ├── test_50_datamodels.py ├── test_50_sample_data.py ├── test_50_xarray_getitem.py ├── test_50_xarray_plugin.py └── test_60_main_commands.py └── tox.ini /.github/ci-config.yml: -------------------------------------------------------------------------------- 1 | dependencies: | 2 | ecmwf/ecbuild 3 | MathisRosenhauer/libaec@refs/tags/v1.1.3 4 | ecmwf/eccodes 5 | dependency_branch: develop 6 | parallelism_factor: 8 7 | self_build: false 8 | -------------------------------------------------------------------------------- /.github/ci-hpc-config.yml: -------------------------------------------------------------------------------- 1 | build: 2 | python: '3.10' 3 | modules: 4 | - ninja 5 | dependencies: 6 | - ecmwf/ecbuild@develop 7 | - ecmwf/eccodes@develop 8 | python_dependencies: 9 | - ecmwf/eccodes-python@develop 10 | env: 11 | - ECCODES_SAMPLES_PATH=$ECCODES_DIR/share/eccodes/samples 12 | - ECCODES_DEFINITION_PATH=$ECCODES_DIR/share/eccodes/definitions 13 | parallel: 64 14 | -------------------------------------------------------------------------------- /.github/workflows/cd-pypi.yml: -------------------------------------------------------------------------------- 1 | name: cd 2 | 3 | on: 4 | push: 5 | tags: 6 | - '**' 7 | 8 | jobs: 9 | deploy: 10 | uses: ecmwf/reusable-workflows/.github/workflows/cd-pypi.yml@v2 11 | secrets: inherit -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: ci 2 | 3 | on: 4 | # Trigger the workflow on push to master or develop, except tag creation 5 | push: 6 | branches: 7 | - 'master' 8 | - 'develop' 9 | tags-ignore: 10 | - '**' 11 | 12 | # Trigger the workflow on pull request 13 | pull_request: ~ 14 | 15 | # Trigger the workflow manually 16 | workflow_dispatch: ~ 17 | 18 | # Trigger after public PR approved for CI 19 | pull_request_target: 20 | types: [labeled] 21 | 22 | jobs: 23 | # Run CI including downstream packages on self-hosted runners 24 | downstream-ci: 25 | name: downstream-ci 26 | if: ${{ !github.event.pull_request.head.repo.fork && github.event.action != 'labeled' || github.event.label.name == 'approved-for-ci' }} 27 | uses: ecmwf/downstream-ci/.github/workflows/downstream-ci.yml@main 28 | with: 29 | cfgrib: ecmwf/cfgrib@${{ github.event.pull_request.head.sha || github.sha }} 30 | codecov_upload: true 31 | secrets: inherit 32 | 33 | # Build downstream packages on HPC 34 | downstream-ci-hpc: 35 | name: downstream-ci-hpc 36 | if: ${{ !github.event.pull_request.head.repo.fork && github.event.action != 'labeled' || github.event.label.name == 'approved-for-ci' }} 37 | uses: ecmwf/downstream-ci/.github/workflows/downstream-ci-hpc.yml@main 38 | with: 39 | cfgrib: ecmwf/cfgrib@${{ github.event.pull_request.head.sha || github.sha }} 40 | secrets: inherit 41 | -------------------------------------------------------------------------------- /.github/workflows/label-public-pr.yml: -------------------------------------------------------------------------------- 1 | # Manage labels of pull requests that originate from forks 2 | name: label-public-pr 3 | 4 | on: 5 | pull_request_target: 6 | types: [opened, synchronize] 7 | 8 | jobs: 9 | label: 10 | uses: ecmwf/reusable-workflows/.github/workflows/label-pr.yml@v2 11 | -------------------------------------------------------------------------------- /.github/workflows/weekly.yml: -------------------------------------------------------------------------------- 1 | name: weekly 2 | 3 | on: 4 | schedule: 5 | - cron: '0 9 * * 1' 6 | workflow_dispatch: 7 | 8 | jobs: 9 | weekly: 10 | runs-on: ${{ matrix.os }}-latest 11 | strategy: 12 | max-parallel: 5 13 | fail-fast: false 14 | matrix: 15 | os: [ubuntu] 16 | python: [3.7, 3.8] 17 | extras: [''] 18 | include: 19 | - os: macos 20 | python: 3.8 21 | - os: windows 22 | python: 3.8 23 | - os: ubuntu 24 | python: 3.9 25 | extras: -minimal 26 | 27 | steps: 28 | - uses: actions/checkout@v2 29 | - uses: conda-incubator/setup-miniconda@v2 30 | with: 31 | auto-update-conda: true 32 | python-version: ${{ matrix.python }} 33 | activate-environment: ${{ matrix.os }}-${{ matrix.python }}${{ matrix.extras }} 34 | environment-file: environment${{ matrix.extras }}.in.yml 35 | - name: Export concrete dependencies 36 | shell: bash -l {0} 37 | run: | 38 | conda env export --no-build -f tests/environment-${{ matrix.os }}-${{ matrix.python }}${{ matrix.extras }}.yml 39 | git diff 40 | - name: Archive environment-${{ matrix.os }}-${{ matrix.python }}${{ matrix.extras }}.yml 41 | uses: actions/upload-artifact@v4 42 | with: 43 | name: environment-${{ matrix.os }}-${{ matrix.python }}${{ matrix.extras }}.yml 44 | path: tests/environment-${{ matrix.os }}-${{ matrix.python }}${{ matrix.extras }}.yml 45 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # Installer logs 29 | pip-log.txt 30 | pip-delete-this-directory.txt 31 | 32 | # Unit test / coverage reports 33 | htmlcov/ 34 | .tox/ 35 | .coverage 36 | .coverage.* 37 | .cache 38 | nosetests.xml 39 | coverage.xml 40 | *.cover 41 | .hypothesis/ 42 | .pytest_cache/ 43 | 44 | # Sphinx documentation 45 | docs/_build/ 46 | 47 | # Jupyter Notebook 48 | .ipynb_checkpoints 49 | 50 | # pyenv 51 | .python-version 52 | 53 | # Environments 54 | .env 55 | .venv 56 | env/ 57 | venv/ 58 | ENV/ 59 | env.bak/ 60 | venv.bak/ 61 | 62 | # Spyder project settings 63 | .spyderproject 64 | .spyproject 65 | 66 | # Rope project settings 67 | .ropeproject 68 | 69 | # mkdocs documentation 70 | /site 71 | 72 | # mypy 73 | .mypy_cache/ 74 | 75 | # editors 76 | .vscode 77 | 78 | # local ignores 79 | tests/sample-data/cds*.grib 80 | tests/sample-data/cds*.nc 81 | *.idx 82 | _dev 83 | 84 | # mac 85 | .DS_Store -------------------------------------------------------------------------------- /ADVANCED_USAGE.rst: -------------------------------------------------------------------------------- 1 | 2 | ECMWF’s MARS and Metview software introduced the notion of *Fieldset* which is an 3 | ordered collection of GRIB message. The *Fieldset* is an abstract concept and can be 4 | implemented in many ways. In the case of MARS and Metview, a *Fieldset* is an implemented 5 | internally as an array of *Field*, each *Field* being represented by a file path, an offset and a 6 | length where the actual GRIB message can be found. Thus, a *Fieldset* can represent an 7 | ordered collection of *Field* which are at various locations of several files. 8 | 9 | *cfgrib* now provides the definition of a ``Field`` and ``Fieldset`` types in the ``cfgrib.abc`` module 10 | and additionally a ``MappingFieldset`` for specialised use. 11 | The implementations are based on simple python sequences and mappings so that *cfgrib* 12 | can build a Dataset for example from something as simple as a list of dicts. 13 | 14 | Classes that implement the ``Fieldset`` and the ``MappingFieldset`` interface 15 | can use the low-level interface ``cfgrib.open_fielset`` to obtain a ``cfgrib.Dataset`` 16 | or they can be passed directly to *Xarray*. 17 | 18 | The simplest *Fieldset* is a list of dictionaries: 19 | 20 | .. code-block:: python 21 | 22 | >>> import xarray as xr 23 | >>> fieldset = [ 24 | ... { 25 | ... "gridType": "regular_ll", 26 | ... "Nx": 2, 27 | ... "Ny": 3, 28 | ... "distinctLatitudes": [-10.0, 0.0, 10.0], 29 | ... "distinctLongitudes": [0.0, 10.0], 30 | ... "paramId": 130, 31 | ... "shortName": "t", 32 | ... "values": [[1, 2], [3, 4], [5, 6]], 33 | ... "dataDate": 20211216, 34 | ... "dataTime": 1200, 35 | ... } 36 | ... ] 37 | >>> ds = xr.open_dataset(fieldset, engine="cfgrib") 38 | >>> ds 39 | 40 | Dimensions: (latitude: 3, longitude: 2) 41 | Coordinates: 42 | time datetime64[ns] ... 43 | * latitude (latitude) float64 -10.0 0.0 10.0 44 | * longitude (longitude) float64 0.0 10.0 45 | Data variables: 46 | t (latitude, longitude) float32 ... 47 | Attributes: 48 | Conventions: CF-1.7 49 | history: ... 50 | >>> ds.mean() 51 | 52 | Dimensions: () 53 | Coordinates: 54 | time datetime64[ns] ... 55 | Data variables: 56 | t float32 3.5 57 | 58 | 59 | For example you can implement a dedicated ``Fieldset`` class following this pattern: 60 | 61 | .. code-block:: python 62 | 63 | from typing import Iterator 64 | 65 | from cfgrib import abc 66 | 67 | class MyFieldset(abc.Fieldset): 68 | def __len__(self) -> int: # not used by cfgrib 69 | ... 70 | def __getitem__(self, item: int) -> abc.Field: 71 | ... 72 | def __iter__(self) -> Iterator[abc.Field]: 73 | ... 74 | 75 | 76 | If ``__getitem__`` and ``__iter__`` implement lazy loading of GRIB fields *cfgrib* and 77 | *Xarray* will be able to access larger-than-memory files. 78 | 79 | In the event a ``Field`` is identified by a more complex *key* than just an sequence *index* 80 | developers may implemnt a ``MappingFieldset`` class following this pattern: 81 | 82 | .. code-block:: python 83 | 84 | from typing import ItemsView, Iterator 85 | 86 | from cfgrib import abc 87 | 88 | class MyFieldset(abc.MappingFieldset[T.Any, abc.Field]): 89 | def __len__(self) -> int: # not used by cfgrib 90 | ... 91 | def __getitem__(self, item: int) -> abc.Field: 92 | ... 93 | def __iter__(self) -> Iterator[abc.Field]: # not used by cfgrib 94 | ... 95 | def items() -> ItemsView[T.Any, abc.Field]: 96 | ... 97 | 98 | 99 | Again if ``__getitem__`` and ``items`` implement lazy loading of GRIB fields *cfgrib* and 100 | *Xarray* will be able to access larger-than-memory files. 101 | 102 | An example of the ``MappingFieldset`` use is ``cfgrib.messages.FileStream`` that 103 | uses the *file offset* as the *key*. 104 | -------------------------------------------------------------------------------- /CONTRIBUTING.rst: -------------------------------------------------------------------------------- 1 | 2 | .. highlight: console 3 | 4 | ============ 5 | Contributing 6 | ============ 7 | 8 | Contributions are welcome, and they are greatly appreciated! Every 9 | little bit helps, and credit will always be given. 10 | 11 | Please note, that we have hooked a CLA assistant to this GitHub Repo. Please accept the contributors license agreement to allow us to keep a legal track of contributions and keep this package open source for the future. 12 | 13 | You can contribute in many ways: 14 | 15 | Types of Contributions 16 | ---------------------- 17 | 18 | Report Bugs 19 | ~~~~~~~~~~~ 20 | 21 | Report bugs at https://github.com/ecmwf/cfgrib/issues 22 | 23 | If you are reporting a bug, please include: 24 | 25 | * Your operating system name and version. 26 | * Installation method and version of all dependencies. 27 | * Any details about your local setup that might be helpful in troubleshooting. 28 | * Detailed steps to reproduce the bug, including a sample file. 29 | 30 | Fix Bugs 31 | ~~~~~~~~ 32 | 33 | Look through the GitHub issues for bugs. Anything tagged with "bug" 34 | and "help wanted" is open to whoever wants to implement a fix for it. 35 | 36 | Implement Features 37 | ~~~~~~~~~~~~~~~~~~ 38 | 39 | Look through the GitHub issues for features. Anything tagged with "enhancement" 40 | and "help wanted" is open to whoever wants to implement it. 41 | 42 | Get Started! 43 | ------------ 44 | 45 | Ready to contribute? Here's how to set up `cfgrib` for local development. Please note this documentation assumes 46 | you already have `virtualenv` and `Git` installed and ready to go. 47 | 48 | 1. Fork the `cfgrib` repo on GitHub. 49 | 2. Clone your fork locally:: 50 | 51 | $ cd path_for_the_repo 52 | $ git clone https://github.com/YOUR_NAME/cfgrib.git 53 | $ cd cfgrib 54 | 55 | 3. Assuming you have virtualenv installed, you can create a new environment for your local development by typing:: 56 | 57 | $ virtualenv ../cfgrib-env 58 | $ source ../cfgrib-env/bin/activate 59 | 60 | This should change the shell to look something like 61 | (cfgrib-env) $ 62 | 63 | 4. Install system dependencies as described in the README.rst file then install a known-good set of python dependencies and the your local copy with:: 64 | 65 | $ pip install -r ci/requirements-tests.txt 66 | $ pip install -e . 67 | 68 | 5. Create a branch for local development:: 69 | 70 | $ git checkout -b name-of-your-bugfix-or-feature 71 | 72 | Now you can make your changes locally. 73 | 74 | 6. The next step would be to run the test cases. `cfgrib` uses py.test, you can run PyTest. Before you run pytest you should ensure all dependancies are installed:: 75 | 76 | $ pip install -r ci/requirements-dev.txt 77 | $ pytest -v --flakes 78 | 79 | 7. Before raising a pull request you should also run tox. This will run the tests across different versions of Python:: 80 | 81 | $ tox 82 | 83 | 8. If your contribution is a bug fix or new feature, you should add a test to the existing test suite. 84 | 85 | 9. Commit your changes and push your branch to GitHub:: 86 | 87 | $ git add . 88 | $ git commit -m "Your detailed description of your changes." 89 | $ git push origin name-of-your-bugfix-or-feature 90 | 91 | 10. Submit a pull request through the GitHub website. 92 | 93 | Pull Request Guidelines 94 | ----------------------- 95 | 96 | Before you submit a pull request, check that it meets these guidelines: 97 | 98 | 1. The pull request should include tests. 99 | 100 | 2. If the pull request adds functionality, the docs should be updated. Put 101 | your new functionality into a function with a docstring, and add the 102 | feature to the list in README.rst. 103 | 104 | 3. The pull request should work for all supported versions of Python, including PyPy3. Check 105 | the tox results and make sure that the tests pass for all supported Python versions. 106 | 107 | 108 | Testing CDS data 109 | ---------------- 110 | 111 | You can test the CF-GRIB driver on a set of products downloaded from the Climate Data Store 112 | of the `Copernicus Climate Change Service `_. 113 | If you are not register to the CDS portal register at: 114 | 115 | https://cds.climate.copernicus.eu/user/register 116 | 117 | In order to automatically download and test the GRIB files install and configure the `cdsapi` package:: 118 | 119 | $ pip install cdsapi 120 | $ pip install netcdf4 121 | 122 | The log into the CDS portal and setup the CDS API key as described in: 123 | 124 | https://cds.climate.copernicus.eu/api-how-to 125 | 126 | Then you can run:: 127 | 128 | $ pytest -vv tests/cds_test_*.py 129 | 130 | 131 | .. cfgrib: https://github.com/ecmwf/cfgrib 132 | .. virtualenv: https://virtualenv.pypa.io/en/stable/installation 133 | .. git: https://git-scm.com/book/en/v2/Getting-Started-Installing-Git 134 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include *.grib 2 | include *.grib2 3 | include *.rst 4 | include *.yml 5 | include LICENSE 6 | include Makefile 7 | include tox.ini 8 | recursive-include cfgrib *.h 9 | recursive-include ci *.in 10 | recursive-include ci *.ps1 11 | recursive-include ci *.txt 12 | recursive-include ci *.yml 13 | recursive-include docs *.gitkeep 14 | recursive-include docs *.py 15 | recursive-include docs *.rst 16 | recursive-include tests *.grib 17 | recursive-include tests *.ipynb 18 | recursive-include tests *.py 19 | recursive-include tests *.yml 20 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | 2 | PACKAGE := cfgrib 3 | IMAGE := $(PACKAGE)-image 4 | MODULE := $(PACKAGE) 5 | 6 | COV_REPORT := html 7 | 8 | default: fix-code-style test 9 | 10 | fix-code-style: 11 | black . 12 | isort . 13 | 14 | unit-test: testclean 15 | python -m pytest -v --cov=. --cov-report=$(COV_REPORT) tests/ 16 | 17 | doc-test: testclean 18 | python -m pytest -v *.rst 19 | python -m pytest -v --doctest-modules cfgrib 20 | 21 | test: unit-test doc-test 22 | 23 | code-quality: 24 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics 25 | flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics 26 | mypy --strict cfgrib tests/test_*py 27 | 28 | code-style: 29 | black --check . 30 | isort --check . 31 | 32 | qc: code-quality 33 | 34 | testclean: 35 | $(RM) -r */__pycache__ .coverage .cache tests/.ipynb_checkpoints *.idx tests/sample-data/*.idx out*.grib 36 | 37 | distclean: testclean 38 | $(RM) -r */*.pyc htmlcov dist build .eggs *.egg-info 39 | -------------------------------------------------------------------------------- /appveyor.yml: -------------------------------------------------------------------------------- 1 | # CI on Windows via appveyor 2 | 3 | environment: 4 | matrix: 5 | - PYTHON: "C:\\Python37-conda64" 6 | PYTHON_VERSION: "3.7" 7 | PYTHON_ARCH: "64" 8 | CONDA_ENV: "py37" 9 | 10 | install: 11 | # Install miniconda Python 12 | - "powershell ./ci/install_python.ps1" 13 | 14 | build: false 15 | 16 | test_script: 17 | - "echo Pass" -------------------------------------------------------------------------------- /cf2cdm/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017-2021 European Centre for Medium-Range Weather Forecasts (ECMWF). 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | from .cfcoords import translate_coords 17 | from .datamodels import CDS, ECMWF 18 | 19 | __all__ = ["CDS", "ECMWF", "translate_coords"] 20 | -------------------------------------------------------------------------------- /cf2cdm/cfcoords.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017-2021 European Centre for Medium-Range Weather Forecasts (ECMWF). 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | # Authors: 17 | # Alessandro Amici - B-Open - https://bopen.eu 18 | # 19 | 20 | import functools 21 | import logging 22 | import typing as T 23 | 24 | import xarray as xr 25 | 26 | from . import cfunits 27 | 28 | CoordModelType = T.Dict[str, T.Dict[str, str]] 29 | CoordTranslatorType = T.Callable[[str, xr.Dataset, CoordModelType], xr.Dataset] 30 | 31 | COORD_MODEL: CoordModelType = {} 32 | COORD_TRANSLATORS: T.Dict[str, CoordTranslatorType] = {} 33 | LOG = logging.getLogger(__name__) 34 | 35 | 36 | def match_values(match_value_func, mapping): 37 | # type: (T.Callable[[T.Any], bool], T.Mapping[T.Hashable, T.Any]) -> T.List[str] 38 | matched_names = [] 39 | for name, value in mapping.items(): 40 | if match_value_func(value): 41 | matched_names.append(str(name)) 42 | return matched_names 43 | 44 | 45 | def translate_coord_direction(data, coord_name, stored_direction="increasing"): 46 | # type: (xr.Dataset, str, str) -> xr.Dataset 47 | if stored_direction not in ("increasing", "decreasing"): 48 | raise ValueError("unknown stored_direction %r" % stored_direction) 49 | if len(data.coords[coord_name].shape) == 0: 50 | return data 51 | values = data.coords[coord_name].values 52 | if values[0] < values[-1] and stored_direction == "decreasing": 53 | data = data.isel({coord_name: slice(None, None, -1)}) 54 | elif values[0] > values[-1] and stored_direction == "increasing": 55 | data = data.isel({coord_name: slice(None, None, -1)}) 56 | return data 57 | 58 | 59 | def coord_translator( 60 | default_out_name: str, 61 | default_units: str, 62 | default_direction: str, 63 | is_cf_type: T.Callable[[xr.IndexVariable], bool], 64 | cf_type: str, 65 | data: xr.Dataset, 66 | coord_model: CoordModelType = COORD_MODEL, 67 | ) -> xr.Dataset: 68 | out_name = coord_model.get(cf_type, {}).get("out_name", default_out_name) 69 | units = coord_model.get(cf_type, {}).get("units", default_units) 70 | stored_direction = coord_model.get(cf_type, {}).get("stored_direction", default_direction) 71 | matches = match_values(is_cf_type, data.coords) 72 | if len(matches) > 1: 73 | raise ValueError("found more than one CF coordinate with type %r." % cf_type) 74 | if not matches: 75 | return data 76 | match = matches[0] 77 | for name in data.coords: 78 | if name == out_name and name != match: 79 | raise ValueError("found non CF compliant coordinate with type %r." % cf_type) 80 | data = data.rename({match: out_name}) 81 | coord = data.coords[out_name] 82 | if "units" in coord.attrs: 83 | data.coords[out_name] = cfunits.convert_units(coord, units, coord.attrs["units"]) 84 | data.coords[out_name].attrs.update(coord.attrs) 85 | data.coords[out_name].attrs["units"] = units 86 | if out_name in data.dims: 87 | data = translate_coord_direction(data, out_name, stored_direction) 88 | return data 89 | 90 | 91 | VALID_LAT_UNITS = ["degrees_north", "degree_north", "degree_N", "degrees_N", "degreeN", "degreesN"] 92 | 93 | 94 | def is_latitude(coord: xr.IndexVariable) -> bool: 95 | return coord.attrs.get("units") in VALID_LAT_UNITS 96 | 97 | 98 | COORD_TRANSLATORS["latitude"] = functools.partial( 99 | coord_translator, "latitude", "degrees_north", "decreasing", is_latitude 100 | ) 101 | 102 | 103 | VALID_LON_UNITS = ["degrees_east", "degree_east", "degree_E", "degrees_E", "degreeE", "degreesE"] 104 | 105 | 106 | def is_longitude(coord: xr.IndexVariable) -> bool: 107 | return coord.attrs.get("units") in VALID_LON_UNITS 108 | 109 | 110 | COORD_TRANSLATORS["longitude"] = functools.partial( 111 | coord_translator, "longitude", "degrees_east", "increasing", is_longitude 112 | ) 113 | 114 | 115 | def is_time(coord: xr.IndexVariable) -> bool: 116 | return coord.attrs.get("standard_name") == "forecast_reference_time" 117 | 118 | 119 | TIME_CF_UNITS = "seconds since 1970-01-01T00:00:00+00:00" 120 | 121 | 122 | COORD_TRANSLATORS["time"] = functools.partial( 123 | coord_translator, "time", TIME_CF_UNITS, "increasing", is_time 124 | ) 125 | 126 | 127 | def is_step(coord: xr.IndexVariable) -> bool: 128 | return coord.attrs.get("standard_name") == "forecast_period" 129 | 130 | 131 | COORD_TRANSLATORS["step"] = functools.partial(coord_translator, "step", "h", "increasing", is_step) 132 | 133 | 134 | def is_valid_time(coord: xr.IndexVariable) -> bool: 135 | if coord.attrs.get("standard_name") == "time": 136 | return True 137 | elif str(coord.dtype) == "datetime64[ns]" and "standard_name" not in coord.attrs: 138 | return True 139 | return False 140 | 141 | 142 | COORD_TRANSLATORS["valid_time"] = functools.partial( 143 | coord_translator, "valid_time", TIME_CF_UNITS, "increasing", is_valid_time 144 | ) 145 | 146 | 147 | def is_depth(coord: xr.IndexVariable) -> bool: 148 | return coord.attrs.get("standard_name") == "depth" 149 | 150 | 151 | COORD_TRANSLATORS["depthBelowLand"] = functools.partial( 152 | coord_translator, "depthBelowLand", "m", "decreasing", is_depth 153 | ) 154 | 155 | 156 | def is_isobaric(coord: xr.IndexVariable) -> bool: 157 | return cfunits.are_convertible(coord.attrs.get("units", ""), "Pa") 158 | 159 | 160 | COORD_TRANSLATORS["isobaricInhPa"] = functools.partial( 161 | coord_translator, "isobaricInhPa", "hPa", "decreasing", is_isobaric 162 | ) 163 | 164 | 165 | def is_number(coord: xr.IndexVariable) -> bool: 166 | return coord.attrs.get("standard_name") == "realization" 167 | 168 | 169 | COORD_TRANSLATORS["number"] = functools.partial( 170 | coord_translator, "number", "1", "increasing", is_number 171 | ) 172 | 173 | 174 | # CF-Conventions have no concept of leadtime expressed in months 175 | def is_forecast_month(coord: xr.IndexVariable) -> bool: 176 | return coord.attrs.get("long_name") == "months since forecast_reference_time" 177 | 178 | 179 | COORD_TRANSLATORS["forecastMonth"] = functools.partial( 180 | coord_translator, "forecastMonth", "1", "increasing", is_forecast_month 181 | ) 182 | 183 | 184 | def translate_coords( 185 | data, coord_model=COORD_MODEL, errors="warn", coord_translators=COORD_TRANSLATORS 186 | ): 187 | # type: (xr.Dataset, CoordModelType, str, T.Dict[str, CoordTranslatorType]) -> xr.Dataset 188 | for cf_name, translator in coord_translators.items(): 189 | try: 190 | data = translator(cf_name, data, coord_model) 191 | except: 192 | if errors == "ignore": 193 | pass 194 | elif errors == "raise": 195 | raise RuntimeError("error while translating coordinate: %r" % cf_name) 196 | else: 197 | LOG.warning("error while translating coordinate: %r", cf_name) 198 | return data 199 | -------------------------------------------------------------------------------- /cf2cdm/cfunits.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017-2021 European Centre for Medium-Range Weather Forecasts (ECMWF). 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | # Authors: 17 | # Alessandro Amici - B-Open - https://bopen.eu 18 | # 19 | 20 | import typing as T 21 | 22 | PRESSURE_CONVERSION_RULES: T.Dict[T.Tuple[str, ...], float] = { 23 | ("Pa", "pascal", "pascals"): 1.0, 24 | ("hPa", "hectopascal", "hectopascals", "hpascal", "millibar", "millibars", "mbar"): 100.0, 25 | ("decibar", "dbar"): 10000.0, 26 | ("bar", "bars"): 100000.0, 27 | ("atmosphere", "atmospheres", "atm"): 101325.0, 28 | } 29 | 30 | LENGTH_CONVERSION_RULES: T.Dict[T.Tuple[str, ...], float] = { 31 | ("m", "meter", "meters"): 1.0, 32 | ("cm", "centimeter", "centimeters"): 0.01, 33 | ("km", "kilometer", "kilometers"): 1000.0, 34 | } 35 | 36 | 37 | class ConversionError(Exception): 38 | pass 39 | 40 | 41 | def simple_conversion_factor(source_units, target_units, rules): 42 | # type: (str, str, T.Dict[T.Tuple[str, ...], float]) -> float 43 | conversion_factor = 1.0 44 | seen = 0 45 | for pressure_units, factor in rules.items(): 46 | if source_units in pressure_units: 47 | conversion_factor /= factor 48 | seen += 1 49 | if target_units in pressure_units: 50 | conversion_factor *= factor 51 | seen += 1 52 | if seen != 2: 53 | raise ConversionError("cannot convert from %r to %r." % (source_units, target_units)) 54 | return conversion_factor 55 | 56 | 57 | def convert_units(data: T.Any, target_units: str, source_units: str) -> T.Any: 58 | if target_units == source_units: 59 | return data 60 | for rules in [PRESSURE_CONVERSION_RULES, LENGTH_CONVERSION_RULES]: 61 | try: 62 | return data * simple_conversion_factor(target_units, source_units, rules) 63 | except ConversionError: 64 | pass 65 | raise ConversionError("cannot convert from %r to %r." % (source_units, target_units)) 66 | 67 | 68 | def are_convertible(source_units: str, target_units: str) -> bool: 69 | try: 70 | convert_units(1, source_units, target_units) 71 | except ConversionError: 72 | return False 73 | return True 74 | -------------------------------------------------------------------------------- /cf2cdm/datamodels.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017-2021 European Centre for Medium-Range Weather Forecasts (ECMWF). 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | # Authors: 17 | # Alessandro Amici - B-Open - https://bopen.eu 18 | # 19 | 20 | CDS = { 21 | # geography 22 | "latitude": {"out_name": "lat", "stored_direction": "increasing"}, 23 | "longitude": {"out_name": "lon", "stored_direction": "increasing"}, 24 | # vertical 25 | "depthBelowLand": {"out_name": "depth", "units": "m", "stored_direction": "increasing"}, 26 | "isobaricInhPa": {"out_name": "plev", "units": "Pa", "stored_direction": "decreasing"}, 27 | # ensemble 28 | "number": {"out_name": "realization", "stored_direction": "increasing"}, 29 | # time 30 | "time": {"out_name": "forecast_reference_time", "stored_direction": "increasing"}, 31 | "valid_time": {"out_name": "time", "stored_direction": "increasing"}, 32 | "step": {"out_name": "leadtime", "stored_direction": "increasing"}, 33 | "forecastMonth": {"out_name": "leadtime_month", "stored_direction": "increasing"}, 34 | } 35 | 36 | 37 | ECMWF = { 38 | "depthBelowLand": {"out_name": "level", "units": "m", "stored_direction": "increasing"}, 39 | "isobaricInhPa": {"out_name": "level", "units": "hPa", "stored_direction": "decreasing"}, 40 | "isobaricInPa": {"out_name": "level", "units": "hPa", "stored_direction": "decreasing"}, 41 | "hybrid": {"out_name": "level", "stored_direction": "increasing"}, 42 | } 43 | -------------------------------------------------------------------------------- /cfgrib/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017-2021 European Centre for Medium-Range Weather Forecasts (ECMWF). 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | __version__ = "0.9.15.1" 17 | 18 | # cfgrib core API depends on the ECMWF ecCodes C-library only 19 | from .abc import Field, Fieldset, Index, MappingFieldset 20 | from .cfmessage import COMPUTED_KEYS 21 | from .dataset import ( 22 | Dataset, 23 | DatasetBuildError, 24 | compute_index_keys, 25 | open_fieldset, 26 | open_file, 27 | open_from_index, 28 | ) 29 | from .messages import FieldsetIndex, FileStream, Message 30 | 31 | # NOTE: xarray is not a hard dependency, but let's provide helpers if it is available. 32 | try: 33 | from .xarray_store import open_dataset, open_datasets 34 | except ImportError: 35 | pass 36 | -------------------------------------------------------------------------------- /cfgrib/__main__.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017-2021 European Centre for Medium-Range Weather Forecasts (ECMWF). 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | # Authors: 17 | # Alessandro Amici - B-Open - https://bopen.eu 18 | # 19 | 20 | import json 21 | import os.path 22 | import typing as T 23 | 24 | import click 25 | 26 | # NOTE: imports are executed inside functions so missing dependencies don't break all commands 27 | 28 | 29 | def handle_json(in_json): 30 | """ 31 | Handle input json which can be a a json format string, or path to a json format file. 32 | Returns a dictionary of the json contents. 33 | """ 34 | try: 35 | # Assume a json format string 36 | out_json = json.loads(in_json) 37 | except json.JSONDecodeError: 38 | # Then a json file 39 | with open(in_json, "r") as f: 40 | out_json = json.load(f) 41 | return out_json 42 | 43 | 44 | @click.group() 45 | def cfgrib_cli() -> None: 46 | pass 47 | 48 | 49 | @cfgrib_cli.command("selfcheck") 50 | def selfcheck() -> None: 51 | from .messages import eccodes_version 52 | 53 | print("Found: ecCodes v%s." % eccodes_version) 54 | print("Your system is ready.") 55 | 56 | 57 | @cfgrib_cli.command("to_netcdf") 58 | @click.argument("inpaths", nargs=-1) 59 | @click.option("--outpath", "-o", default=None, help="Filename of the output netcdf file.") 60 | @click.option( 61 | "--cdm", "-c", default=None, help="Coordinate model to translate the grib coordinates to." 62 | ) 63 | @click.option( 64 | "--engine", "-e", default="cfgrib", help="xarray engine to use in xarray.open_dataset." 65 | ) 66 | @click.option( 67 | "--backend-kwargs-json", 68 | "-b", 69 | default=None, 70 | help=( 71 | "Backend kwargs used in xarray.open_dataset." 72 | "Can either be a JSON format string or " 73 | "the path to JSON file" 74 | ), 75 | ) 76 | @click.option( 77 | "--netcdf-kwargs-json", 78 | "-n", 79 | default=None, 80 | help=( 81 | "kwargs used xarray.to_netcdf when creating the netCDF file. " 82 | "Can either be a JSON format string or the path to JSON file. " 83 | ), 84 | ) 85 | @click.option( 86 | "--var-encoding-json", 87 | "-v", 88 | default=None, 89 | help=( 90 | "encoding options to apply to all data variables in the dataset. " 91 | "Can either be a JSON format string or the path to JSON file. " 92 | ), 93 | ) 94 | def to_netcdf( 95 | inpaths, outpath, cdm, engine, backend_kwargs_json, netcdf_kwargs_json, var_encoding_json 96 | ): # type: (T.List[str], str, str, str, str, str, str) -> None 97 | import xarray as xr 98 | 99 | import cf2cdm 100 | 101 | # NOTE: noop if no input argument 102 | if len(inpaths) == 0: 103 | return 104 | 105 | if not outpath: 106 | outpath = os.path.splitext(inpaths[0])[0] + ".nc" 107 | 108 | if backend_kwargs_json is not None: 109 | backend_kwargs = handle_json(backend_kwargs_json) 110 | else: 111 | backend_kwargs = {} 112 | 113 | if len(inpaths) == 1: 114 | # avoid to depend on dask when passing only one file 115 | ds = xr.open_dataset( 116 | inpaths[0], 117 | engine=engine, 118 | backend_kwargs=backend_kwargs, 119 | ) # type: ignore 120 | else: 121 | ds = xr.open_mfdataset( 122 | inpaths, 123 | engine=engine, 124 | combine="by_coords", 125 | backend_kwargs=backend_kwargs, 126 | ) # type: ignore 127 | 128 | if cdm: 129 | coord_model = getattr(cf2cdm, cdm) 130 | ds = cf2cdm.translate_coords(ds, coord_model=coord_model) 131 | 132 | if netcdf_kwargs_json is not None: 133 | netcdf_kwargs = handle_json(netcdf_kwargs_json) 134 | else: 135 | netcdf_kwargs = {} 136 | 137 | if var_encoding_json is not None: 138 | var_encoding = handle_json(var_encoding_json) 139 | netcdf_kwargs.setdefault("encoding", {}) 140 | for var in ds.data_vars: 141 | netcdf_kwargs["encoding"].setdefault(var, var_encoding) 142 | 143 | ds.to_netcdf(outpath, **netcdf_kwargs) 144 | 145 | 146 | @cfgrib_cli.command("dump") 147 | @click.argument("inpaths", nargs=-1) 148 | @click.option("--variable", "-v", default=None) 149 | @click.option("--cdm", "-c", default=None) 150 | @click.option("--engine", "-e", default="cfgrib") 151 | def dump(inpaths, variable, cdm, engine): 152 | # type: (T.List[str], str, str, str) -> None 153 | import xarray as xr 154 | 155 | import cf2cdm 156 | 157 | # NOTE: noop if no input argument 158 | if len(inpaths) == 0: 159 | return 160 | 161 | if len(inpaths) == 1: 162 | # avoid to depend on dask when passing only one file 163 | ds = xr.open_dataset(inpaths[0], engine=engine) # type: ignore 164 | else: 165 | ds = xr.open_mfdataset(inpaths, engine=engine, combine="by_coords") # type: ignore 166 | 167 | if cdm: 168 | coord_model = getattr(cf2cdm, cdm) 169 | ds = cf2cdm.translate_coords(ds, coord_model=coord_model) 170 | 171 | if variable: 172 | ds_or_da = ds[variable] # type: ignore 173 | else: 174 | ds_or_da = ds # type: ignore 175 | 176 | print(ds_or_da) 177 | 178 | 179 | if __name__ == "__main__": # pragma: no cover 180 | cfgrib_cli() 181 | -------------------------------------------------------------------------------- /cfgrib/abc.py: -------------------------------------------------------------------------------- 1 | """Abstract Base Classes for GRIB fields and fieldsets""" 2 | import abc 3 | import typing as T 4 | 5 | FieldIdTypeVar = T.TypeVar("FieldIdTypeVar") 6 | FieldTypeVar = T.TypeVar("FieldTypeVar", bound="Field") 7 | 8 | Field = T.Mapping[str, T.Any] 9 | MutableField = T.MutableMapping[str, T.Any] 10 | MappingFieldset = T.Mapping[FieldIdTypeVar, FieldTypeVar] 11 | Fieldset = T.Sequence[FieldTypeVar] 12 | 13 | 14 | class Index(T.Mapping[str, T.List[T.Any]], T.Generic[FieldIdTypeVar, FieldTypeVar]): 15 | fieldset: T.Union[Fieldset[FieldTypeVar], MappingFieldset[FieldIdTypeVar, FieldTypeVar]] 16 | index_keys: T.List[str] 17 | filter_by_keys: T.Dict[str, T.Any] = {} 18 | 19 | @abc.abstractmethod 20 | def subindex( 21 | self, filter_by_keys: T.Mapping[str, T.Any] = {}, **query: T.Any 22 | ) -> "Index[FieldIdTypeVar, FieldTypeVar]": 23 | pass 24 | 25 | @abc.abstractmethod 26 | def getone(self, item: str) -> T.Any: 27 | pass 28 | 29 | @abc.abstractmethod 30 | def first(self) -> FieldTypeVar: 31 | pass 32 | 33 | @abc.abstractmethod 34 | def source(self) -> str: 35 | pass 36 | 37 | @abc.abstractmethod 38 | def iter_index(self) -> T.Iterator[T.Tuple[T.Tuple[T.Any, ...], T.List[FieldIdTypeVar]]]: 39 | pass 40 | -------------------------------------------------------------------------------- /cfgrib/cfmessage.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017-2021 European Centre for Medium-Range Weather Forecasts (ECMWF). 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | # Authors: 17 | # Baudouin Raoult - ECMWF - https://ecmwf.int 18 | # Alessandro Amici - B-Open - https://bopen.eu 19 | # 20 | 21 | import datetime 22 | import functools 23 | import logging 24 | import typing as T 25 | 26 | import attr 27 | import numpy as np 28 | 29 | from . import abc, messages 30 | 31 | LOG = logging.getLogger(__name__) 32 | 33 | # taken from eccodes stepUnits.table 34 | GRIB_STEP_UNITS_TO_SECONDS = [ 35 | 60, 36 | 3600, 37 | 86400, 38 | None, 39 | None, 40 | None, 41 | None, 42 | None, 43 | None, 44 | None, 45 | 10800, 46 | 21600, 47 | 43200, 48 | 1, 49 | 900, 50 | 1800, 51 | ] # type: T.List[T.Union[int, None]] 52 | DEFAULT_EPOCH = datetime.datetime(1970, 1, 1) 53 | 54 | 55 | def from_grib_date_time(message, date_key="dataDate", time_key="dataTime", epoch=DEFAULT_EPOCH): 56 | # type: (abc.Field, str, str, datetime.datetime) -> int 57 | """ 58 | Return the number of seconds since the ``epoch`` from the values of the ``message`` keys, 59 | using datetime.total_seconds(). 60 | 61 | :param message: the target GRIB message 62 | :param date_key: the date key, defaults to "dataDate" 63 | :param time_key: the time key, defaults to "dataTime" 64 | :param epoch: the reference datetime 65 | """ 66 | date = message[date_key] 67 | time = message[time_key] 68 | hour = time // 100 69 | minute = time % 100 70 | year = date // 10000 71 | month = date // 100 % 100 72 | day = date % 100 73 | data_datetime = datetime.datetime(year, month, day, hour, minute) 74 | # Python 2 compatible timestamp implementation without timezone hurdle 75 | # see: https://docs.python.org/3/library/datetime.html#datetime.datetime.timestamp 76 | return int((data_datetime - epoch).total_seconds()) 77 | 78 | 79 | def to_grib_date_time( 80 | message, time_ns, date_key="dataDate", time_key="dataTime", epoch=DEFAULT_EPOCH 81 | ): 82 | # type: (abc.MutableField, int, str, str, datetime.datetime) -> None 83 | time_s = int(time_ns) * 1e-9 84 | time = epoch + datetime.timedelta(seconds=time_s) 85 | datetime_iso = str(time) 86 | message[date_key] = int(datetime_iso[:10].replace("-", "")) 87 | message[time_key] = int(datetime_iso[11:16].replace(":", "")) 88 | 89 | 90 | def from_grib_step(message, step_key="endStep:int", step_unit_key="stepUnits:int"): 91 | # type: (abc.Field, str, str) -> float 92 | step_unit = message[step_unit_key] 93 | to_seconds = GRIB_STEP_UNITS_TO_SECONDS[step_unit] 94 | if to_seconds is None: 95 | raise ValueError("unsupported stepUnit %r" % step_unit) 96 | assert isinstance(to_seconds, int) # mypy misses this 97 | return int(message[step_key]) * to_seconds / 3600.0 98 | 99 | 100 | def to_grib_step(message, step_ns, step_unit=1, step_key="endStep:int", step_unit_key="stepUnits:int"): 101 | # type: (abc.MutableField, int, int, str, str) -> None 102 | step_s = step_ns * 1e-9 103 | to_seconds = GRIB_STEP_UNITS_TO_SECONDS[step_unit] 104 | if to_seconds is None: 105 | raise ValueError("unsupported stepUnit %r" % step_unit) 106 | message[step_key] = int(step_s / to_seconds) 107 | message[step_unit_key] = step_unit 108 | 109 | 110 | def from_grib_step_units(message): 111 | # type: (abc.Field) -> float 112 | # we always index steps in hours 113 | return 1 114 | 115 | 116 | def to_grib_step_units(message, step_unit=1, step_unit_key="stepUnits:int"): 117 | # type: (abc.MutableField, int, str) -> None 118 | message[step_unit_key] = step_unit 119 | 120 | 121 | def from_grib_month(message, verifying_month_key="verifyingMonth", epoch=DEFAULT_EPOCH): 122 | # type: (abc.Field, str, datetime.datetime) -> int 123 | date = message[verifying_month_key] 124 | year = date // 100 125 | month = date % 100 126 | data_datetime = datetime.datetime(year, month, 1, 0, 0) 127 | return int((data_datetime - epoch).total_seconds()) 128 | 129 | 130 | def to_grib_dummy(message, value): 131 | # type: (abc.MutableField, T.Any) -> None 132 | pass 133 | 134 | 135 | def build_valid_time(time, step): 136 | # type: (np.ndarray, np.ndarray) -> T.Tuple[T.Tuple[str, ...], np.ndarray] 137 | """ 138 | Return dimensions and data of the valid_time corresponding to the given ``time`` and ``step``. 139 | The data is seconds from the same epoch as ``time`` and may have one or two dimensions. 140 | 141 | :param time: given in seconds from an epoch, as returned by ``from_grib_date_time`` 142 | :param step: given in hours, as returned by ``from_grib_step`` 143 | """ 144 | step_s = step * 3600 145 | if len(time.shape) == 0 and len(step.shape) == 0: 146 | data = time + step_s 147 | dims = () # type: T.Tuple[str, ...] 148 | elif len(time.shape) > 0 and len(step.shape) == 0: 149 | data = time + step_s 150 | dims = ("time",) 151 | elif len(time.shape) == 0 and len(step.shape) > 0: 152 | data = time + step_s 153 | dims = ("step",) 154 | else: 155 | data = time[:, None] + step_s[None, :] 156 | dims = ("time", "step") 157 | return dims, data 158 | 159 | 160 | COMPUTED_KEYS = { 161 | "time": (from_grib_date_time, to_grib_date_time), 162 | "step": (from_grib_step, to_grib_step), 163 | "endStep": (from_grib_step, to_grib_step), 164 | "stepUnits": (from_grib_step_units, to_grib_step_units), 165 | "valid_time": ( 166 | functools.partial(from_grib_date_time, date_key="validityDate", time_key="validityTime"), 167 | functools.partial(to_grib_date_time, date_key="validityDate", time_key="validityTime"), 168 | ), 169 | "verifying_time": (from_grib_month, to_grib_dummy), 170 | "indexing_time": ( 171 | functools.partial(from_grib_date_time, date_key="indexingDate", time_key="indexingTime"), 172 | functools.partial(to_grib_date_time, date_key="indexingDate", time_key="indexingTime"), 173 | ), 174 | "valid_month": ( 175 | functools.partial(from_grib_date_time, date_key="monthlyVerificationDate", time_key="validityTime"), 176 | functools.partial(to_grib_date_time, date_key="monthlyVerificationDate", time_key="validityTime"), 177 | ), 178 | } # type: messages.ComputedKeysType 179 | 180 | 181 | @attr.attrs(auto_attribs=True) 182 | class CfMessage(messages.ComputedKeysMessage): 183 | computed_keys: messages.ComputedKeysType = COMPUTED_KEYS 184 | -------------------------------------------------------------------------------- /cfgrib/xarray_plugin.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pathlib 3 | import typing as T 4 | 5 | import numpy as np 6 | import xarray as xr 7 | from packaging.version import Version 8 | 9 | if Version(xr.__version__) <= Version("0.17.0"): 10 | raise ImportError("xarray_plugin module needs xarray version >= 0.18+") 11 | 12 | from xarray.backends.common import AbstractDataStore, BackendArray, BackendEntrypoint 13 | from xarray.backends.locks import SerializableLock, ensure_lock 14 | 15 | from . import abc, dataset, messages 16 | 17 | # FIXME: Add a dedicated lock, even if ecCodes is supposed to be thread-safe 18 | # in most circumstances. See: 19 | # https://confluence.ecmwf.int/display/ECC/Frequently+Asked+Questions 20 | ECCODES_LOCK = SerializableLock() # type: ignore 21 | 22 | 23 | class CfGribDataStore(AbstractDataStore): 24 | """ 25 | Implements the ``xr.AbstractDataStore`` read-only API for a GRIB file. 26 | """ 27 | 28 | def __init__( 29 | self, 30 | filename: T.Union[str, abc.Fieldset[abc.Field], abc.MappingFieldset[T.Any, abc.Field]], 31 | lock: T.Union[T.ContextManager[T.Any], None] = None, 32 | **backend_kwargs: T.Any, 33 | ): 34 | if lock is None: 35 | lock = ECCODES_LOCK 36 | self.lock = ensure_lock(lock) # type: ignore 37 | if isinstance(filename, (str, pathlib.PurePath)): 38 | opener = dataset.open_file 39 | else: 40 | opener = dataset.open_fieldset 41 | self.ds = opener(filename, **backend_kwargs) 42 | 43 | def open_store_variable( 44 | self, 45 | var: dataset.Variable, 46 | ) -> xr.Variable: 47 | if isinstance(var.data, np.ndarray): 48 | data = var.data 49 | else: 50 | wrapped_array = CfGribArrayWrapper(self, var.data) 51 | data = xr.core.indexing.LazilyIndexedArray(wrapped_array) # type: ignore 52 | encoding = self.ds.encoding.copy() 53 | encoding["original_shape"] = var.data.shape 54 | 55 | return xr.Variable(var.dimensions, data, var.attributes, encoding) # type: ignore 56 | 57 | def get_variables(self) -> xr.core.utils.Frozen[T.Any, T.Any]: 58 | return xr.core.utils.FrozenDict( 59 | (k, self.open_store_variable(v)) for k, v in self.ds.variables.items() 60 | ) 61 | 62 | def get_attrs(self) -> xr.core.utils.Frozen[T.Any, T.Any]: 63 | return xr.core.utils.Frozen(self.ds.attributes) 64 | 65 | def get_dimensions(self) -> xr.core.utils.Frozen[T.Any, T.Any]: 66 | return xr.core.utils.Frozen(self.ds.dimensions) 67 | 68 | def get_encoding(self) -> T.Dict[str, T.Set[str]]: 69 | dims = self.get_dimensions() 70 | encoding = {"unlimited_dims": {k for k, v in dims.items() if v is None}} 71 | return encoding 72 | 73 | 74 | class CfGribBackend(BackendEntrypoint): 75 | description = "Open GRIB files (.grib, .grib2, .grb and .grb2) in Xarray" 76 | url = "https://github.com/ecmwf/cfgrib" 77 | 78 | def guess_can_open( 79 | self, 80 | store_spec: str, 81 | ) -> bool: 82 | try: 83 | _, ext = os.path.splitext(store_spec) 84 | except TypeError: 85 | return False 86 | return ext in {".grib", ".grib2", ".grb", ".grb2"} 87 | 88 | def open_dataset( 89 | self, 90 | filename_or_obj: T.Union[str, abc.MappingFieldset[T.Any, abc.Field]], 91 | *, 92 | mask_and_scale: bool = True, 93 | decode_times: bool = True, 94 | concat_characters: bool = True, 95 | decode_coords: bool = True, 96 | drop_variables: T.Union[T.Iterable[str], None] = None, 97 | use_cftime: T.Union[bool, None] = None, 98 | decode_timedelta: T.Union[bool, None] = None, 99 | lock: T.Union[T.ContextManager[T.Any], None] = None, 100 | indexpath: str = messages.DEFAULT_INDEXPATH, 101 | filter_by_keys: T.Dict[str, T.Any] = {}, 102 | read_keys: T.Iterable[str] = (), 103 | ignore_keys: T.Iterable[str] = (), 104 | encode_cf: T.Sequence[str] = ("parameter", "time", "geography", "vertical"), 105 | squeeze: bool = True, 106 | time_dims: T.Iterable[str] = ("time", "step"), 107 | errors: str = "warn", 108 | extra_coords: T.Dict[str, str] = {}, 109 | coords_as_attributes: T.Dict[str, str] = {}, 110 | cache_geo_coords: bool = True, 111 | values_dtype: np.dtype = messages.DEFAULT_VALUES_DTYPE, 112 | ) -> xr.Dataset: 113 | store = CfGribDataStore( 114 | filename_or_obj, 115 | indexpath=indexpath, 116 | filter_by_keys=filter_by_keys, 117 | read_keys=read_keys, 118 | ignore_keys=ignore_keys, 119 | encode_cf=encode_cf, 120 | squeeze=squeeze, 121 | time_dims=time_dims, 122 | lock=lock, 123 | errors=errors, 124 | extra_coords=extra_coords, 125 | coords_as_attributes=coords_as_attributes, 126 | cache_geo_coords=cache_geo_coords, 127 | values_dtype=values_dtype, 128 | ) 129 | with xr.core.utils.close_on_error(store): 130 | vars, attrs = store.load() # type: ignore 131 | encoding = store.get_encoding() 132 | vars, attrs, coord_names = xr.conventions.decode_cf_variables( 133 | vars, 134 | attrs, 135 | mask_and_scale=mask_and_scale, 136 | decode_times=decode_times, 137 | concat_characters=concat_characters, 138 | decode_coords=decode_coords, 139 | drop_variables=drop_variables, 140 | use_cftime=use_cftime, 141 | decode_timedelta=decode_timedelta, 142 | ) # type: ignore 143 | 144 | ds = xr.Dataset(vars, attrs=attrs) 145 | ds = ds.set_coords(coord_names.intersection(vars)) 146 | ds.set_close(store.close) 147 | ds.encoding = encoding 148 | return ds 149 | 150 | 151 | class CfGribArrayWrapper(BackendArray): 152 | def __init__( 153 | self, datastore: CfGribDataStore, array: T.Union[dataset.OnDiskArray, np.ndarray] 154 | ): 155 | self.datastore = datastore 156 | self.shape = array.shape 157 | self.dtype = array.dtype 158 | self.array = array 159 | 160 | def __getitem__( 161 | self, 162 | key: xr.core.indexing.ExplicitIndexer, 163 | ) -> np.ndarray: 164 | return xr.core.indexing.explicit_indexing_adapter( 165 | key, self.shape, xr.core.indexing.IndexingSupport.BASIC, self._getitem 166 | ) 167 | 168 | def _getitem( 169 | self, 170 | key: T.Tuple[T.Any, ...], 171 | ) -> np.ndarray: 172 | with self.datastore.lock: 173 | return self.array[key] 174 | -------------------------------------------------------------------------------- /cfgrib/xarray_store.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017-2021 European Centre for Medium-Range Weather Forecasts (ECMWF). 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | # Authors: 17 | # Alessandro Amici - B-Open - https://bopen.eu 18 | # 19 | 20 | import logging 21 | import typing as T 22 | 23 | import xarray as xr 24 | 25 | from . import cfmessage, messages 26 | from .dataset import DatasetBuildError, open_fileindex 27 | 28 | LOGGER = logging.getLogger(__name__) 29 | 30 | 31 | def open_dataset(path, **kwargs): 32 | # type: (str, T.Any) -> xr.Dataset 33 | """ 34 | Return a ``xr.Dataset`` with the requested ``backend_kwargs`` from a GRIB file. 35 | """ 36 | if "engine" in kwargs and kwargs["engine"] != "cfgrib": 37 | raise ValueError("only engine=='cfgrib' is supported") 38 | kwargs["engine"] = "cfgrib" 39 | return xr.open_dataset(path, **kwargs) # type: ignore 40 | 41 | 42 | def merge_datasets(datasets, **kwargs): 43 | # type: (T.Sequence[xr.Dataset], T.Any) -> T.List[xr.Dataset] 44 | merged = [] # type: T.List[xr.Dataset] 45 | first = [] # type: T.List[xr.Dataset] 46 | for ds in datasets: 47 | ds.attrs.pop("history", None) 48 | for i, o in enumerate(merged): 49 | if all(o.attrs[k] == ds.attrs[k] for k in o.attrs): 50 | try: 51 | o = xr.merge([o, ds], **kwargs) 52 | o.attrs.update(ds.attrs) 53 | merged[i] = o 54 | break 55 | except Exception: 56 | pass 57 | else: 58 | merged.append(ds) 59 | first.append(ds) 60 | 61 | # Add the important coordinate encoding fields from the first found, to the merged: 62 | preserve_encoding_fields = ["source", "units", "calendar", "dtype"] 63 | for i, o in enumerate(first): 64 | for var in o.coords: 65 | out_encoding = { 66 | key: o[var].encoding[key] 67 | for key in preserve_encoding_fields 68 | if key in o[var].encoding 69 | } 70 | merged[i][var].encoding.update(out_encoding) 71 | 72 | return merged 73 | 74 | 75 | def raw_open_datasets(path, backend_kwargs={}, **kwargs): 76 | # type: (str, T.Dict[str, T.Any], T.Any) -> T.List[xr.Dataset] 77 | fbks = [] 78 | datasets = [] 79 | try: 80 | datasets.append(open_dataset(path, backend_kwargs=backend_kwargs, **kwargs)) 81 | except DatasetBuildError as ex: 82 | fbks.extend(ex.args[2]) 83 | # NOTE: the recursive call needs to stay out of the exception handler to avoid showing 84 | # to the user a confusing error message due to exception chaining 85 | for fbk in fbks: 86 | bks = backend_kwargs.copy() 87 | bks["filter_by_keys"] = fbk 88 | datasets.extend(raw_open_datasets(path, backend_kwargs=bks, **kwargs)) 89 | return datasets 90 | 91 | 92 | def open_variable_datasets(path, backend_kwargs={}, **kwargs): 93 | # type: (str, T.Dict[str, T.Any], T.Any) -> T.List[xr.Dataset] 94 | fileindex_kwargs = { 95 | key: backend_kwargs[key] 96 | for key in ["filter_by_keys", "indexpath"] 97 | if key in backend_kwargs 98 | } 99 | errors = backend_kwargs.get("errors", "warn") 100 | stream = messages.FileStream(path, errors=errors) 101 | index = open_fileindex(stream, computed_keys=cfmessage.COMPUTED_KEYS, **fileindex_kwargs) 102 | datasets = [] # type: T.List[xr.Dataset] 103 | for param_id in sorted(index["paramId"]): 104 | bk = backend_kwargs.copy() 105 | bk["filter_by_keys"] = backend_kwargs.get("filter_by_keys", {}).copy() 106 | bk["filter_by_keys"]["paramId"] = param_id 107 | datasets.extend(raw_open_datasets(path, bk, **kwargs)) 108 | return datasets 109 | 110 | 111 | def open_datasets(path, backend_kwargs={}, **kwargs): 112 | # type: (str, T.Dict[str, T.Any], T.Any) -> T.List[xr.Dataset] 113 | """ 114 | Open a GRIB file grouping incompatible hypercubes to different datasets via simple heuristics. 115 | """ 116 | squeeze = backend_kwargs.get("squeeze", True) 117 | backend_kwargs = backend_kwargs.copy() 118 | backend_kwargs["squeeze"] = False 119 | datasets = open_variable_datasets(path, backend_kwargs=backend_kwargs, **kwargs) 120 | 121 | type_of_level_datasets = {} # type: T.Dict[str, T.List[xr.Dataset]] 122 | for ds in datasets: 123 | for _, da in ds.data_vars.items(): 124 | type_of_level = da.attrs.get("GRIB_typeOfLevel", "undef") 125 | type_of_level_datasets.setdefault(type_of_level, []).append(ds) 126 | 127 | merged = [] # type: T.List[xr.Dataset] 128 | for type_of_level in sorted(type_of_level_datasets): 129 | for ds in merge_datasets( 130 | type_of_level_datasets[type_of_level], join="exact", combine_attrs="identical" 131 | ): 132 | merged.append(ds.squeeze() if squeeze else ds) 133 | return merged 134 | -------------------------------------------------------------------------------- /cfgrib/xarray_to_grib.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017-2021 European Centre for Medium-Range Weather Forecasts (ECMWF). 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | # Authors: 17 | # Alessandro Amici - B-Open - https://bopen.eu 18 | # Aureliana Barghini - B-Open - https://bopen.eu 19 | # Leonardo Barcaroli - B-Open - https://bopen.eu 20 | # 21 | 22 | import itertools 23 | import logging 24 | import typing as T 25 | import warnings 26 | 27 | import numpy as np 28 | import xarray as xr 29 | from packaging import version 30 | 31 | if version.parse(xr.__version__) < version.parse("2025.9.1"): 32 | from xarray.backends import api as backends_api 33 | else: 34 | # Refactor: https://github.com/pydata/xarray/pull/10771/files 35 | from xarray.backends import writers as backends_api 36 | 37 | from . import cfmessage, dataset, messages 38 | 39 | LOGGER = logging.getLogger(__name__) 40 | 41 | 42 | DEFAULT_GRIB_KEYS = { 43 | "centre": 255, # missing value, see: http://apps.ecmwf.int/codes/grib/format/grib1/centre/0/ 44 | "typeOfLevel": "surface", 45 | } 46 | TYPE_OF_LEVELS_SFC = ["surface", "meanSea", "cloudBase", "cloudTop"] 47 | TYPE_OF_LEVELS_PL = ["isobaricInhPa", "isobaricInPa"] 48 | TYPE_OF_LEVELS_ML = ["hybrid"] 49 | ALL_TYPE_OF_LEVELS = TYPE_OF_LEVELS_SFC + TYPE_OF_LEVELS_PL + TYPE_OF_LEVELS_ML 50 | GRID_TYPES = [ 51 | "polar_stereographic", 52 | "reduced_gg", 53 | "reduced_ll", 54 | "regular_gg", 55 | "regular_ll", 56 | "rotated_gg", 57 | "rotated_ll", 58 | "sh", 59 | ] 60 | 61 | 62 | def regular_ll_params(values, min_value=-180.0, max_value=360.0): 63 | # type: (np.ndarray, float, float) -> T.Tuple[float, float, int] 64 | start, stop, num = float(values[0]), float(values[-1]), len(values) 65 | if min(start, stop) < min_value or max(start, stop) > max_value: 66 | raise ValueError("Unsupported spatial grid: out of bounds (%r, %r)" % (start, stop)) 67 | check_values = np.linspace(start, stop, num) 68 | if not np.allclose(check_values, values): 69 | raise ValueError("Unsupported spatial grid: not regular %r" % (check_values,)) 70 | return (start, stop, num) 71 | 72 | 73 | def detect_regular_ll_grib_keys(lon, lat): 74 | # type: (np.ndarray, np.ndarray) -> T.Dict[str, T.Any] 75 | grib_keys = {} # type: T.Dict[str, T.Any] 76 | 77 | lon_start, lon_stop, lon_num = regular_ll_params(lon) 78 | lon_scan_negatively = lon_stop < lon_start 79 | lon_step = abs(lon_stop - lon_start) / (lon_num - 1.0) 80 | if lon_start < 0.0: 81 | lon_start += 360.0 82 | if lon_stop < 0.0: 83 | lon_stop += 360.0 84 | grib_keys["longitudeOfFirstGridPointInDegrees"] = lon_start 85 | grib_keys["longitudeOfLastGridPointInDegrees"] = lon_stop 86 | grib_keys["Ni"] = lon_num 87 | grib_keys["iDirectionIncrementInDegrees"] = lon_step 88 | grib_keys["iScansNegatively"] = lon_scan_negatively 89 | 90 | lat_start, lat_stop, lat_num = regular_ll_params(lat, min_value=-90.0, max_value=90.0) 91 | grib_keys["latitudeOfFirstGridPointInDegrees"] = lat_start 92 | grib_keys["latitudeOfLastGridPointInDegrees"] = lat_stop 93 | grib_keys["Nj"] = lat_num 94 | grib_keys["jDirectionIncrementInDegrees"] = abs(lat_stop - lat_start) / (lat_num - 1.0) 95 | grib_keys["jScansPositively"] = lat_stop > lat_start 96 | grib_keys["gridType"] = "regular_ll" 97 | 98 | return grib_keys 99 | 100 | 101 | def detect_grib_keys(data_var, default_grib_keys, grib_keys={}): 102 | # type: (xr.DataArray, T.Dict[str, T.Any], T.Dict[str, T.Any]) -> T.Tuple[T.Dict[str, T.Any], T.Dict[str, T.Any]] 103 | detected_grib_keys = {} 104 | suggested_grib_keys = default_grib_keys.copy() 105 | 106 | for key_raw, value in data_var.attrs.items(): 107 | key = str(key_raw) 108 | if key[:5] == "GRIB_": 109 | suggested_grib_keys[key[5:]] = value 110 | 111 | if "latitude" in data_var.dims and "longitude" in data_var.dims: 112 | try: 113 | regular_ll_keys = detect_regular_ll_grib_keys(data_var.longitude, data_var.latitude) 114 | detected_grib_keys.update(regular_ll_keys) 115 | except: 116 | pass 117 | 118 | for tol in ALL_TYPE_OF_LEVELS: 119 | if tol in data_var.dims or tol in data_var.coords: 120 | detected_grib_keys["typeOfLevel"] = tol 121 | 122 | if "number" in data_var.dims or "number" in data_var.coords and grib_keys.get("edition") != 1: 123 | # cannot set 'number' key without setting a productDefinitionTemplateNumber in GRIB2 124 | detected_grib_keys["productDefinitionTemplateNumber"] = 1 125 | 126 | if "values" in data_var.dims: 127 | detected_grib_keys["numberOfPoints"] = data_var.shape[data_var.dims.index("values")] 128 | 129 | return detected_grib_keys, suggested_grib_keys 130 | 131 | 132 | def detect_sample_name(grib_keys, sample_name_template="{geography}_{vertical}_grib{edition}"): 133 | # type: (T.Mapping[str, T.Any], str) -> str 134 | edition = grib_keys.get("edition", 2) 135 | 136 | if grib_keys["gridType"] in GRID_TYPES: 137 | geography = grib_keys["gridType"] 138 | else: 139 | LOGGER.warning("unknown 'gridType': %r. Using GRIB2 template", grib_keys["gridType"]) 140 | return "GRIB2" 141 | 142 | if grib_keys["typeOfLevel"] in TYPE_OF_LEVELS_PL: 143 | vertical = "pl" 144 | elif grib_keys["typeOfLevel"] in TYPE_OF_LEVELS_SFC: 145 | vertical = "sfc" 146 | elif grib_keys["typeOfLevel"] in TYPE_OF_LEVELS_ML: 147 | vertical = "ml" 148 | else: 149 | LOGGER.warning("unknown 'typeOfLevel': %r. Using GRIB2 template", grib_keys["typeOfLevel"]) 150 | return "GRIB2" 151 | 152 | sample_name = sample_name_template.format(**locals()) 153 | return sample_name 154 | 155 | 156 | def merge_grib_keys(grib_keys, detected_grib_keys, default_grib_keys): 157 | # type: (T.Dict[str, T.Any], T.Dict[str, T.Any], T.Dict[str, T.Any]) -> T.Dict[str, T.Any] 158 | merged_grib_keys = {k: v for k, v in grib_keys.items()} 159 | dataset.dict_merge(merged_grib_keys, detected_grib_keys) 160 | for key, value in default_grib_keys.items(): 161 | if key not in merged_grib_keys: 162 | merged_grib_keys[key] = value 163 | return merged_grib_keys 164 | 165 | 166 | def expand_dims(data_var: xr.DataArray) -> T.Tuple[T.List[str], xr.DataArray]: 167 | coords_names = [] # type: T.List[str] 168 | for coord_name in dataset.ALL_HEADER_DIMS + ALL_TYPE_OF_LEVELS + dataset.ALL_REF_TIME_KEYS: 169 | if ( 170 | coord_name in data_var.coords 171 | and data_var.coords[coord_name].size == 1 172 | and coord_name not in data_var.dims 173 | ): 174 | data_var = data_var.expand_dims(coord_name) 175 | if coord_name in data_var.dims: 176 | coords_names.append(coord_name) 177 | return coords_names, data_var 178 | 179 | 180 | def make_template_message(merged_grib_keys, template_path=None, sample_name=None): 181 | # type: (T.Dict[str, T.Any], T.Optional[str], T.Optional[str]) -> messages.Message 182 | if template_path and sample_name: 183 | raise ValueError("template_path and sample_name should not be both set") 184 | 185 | if template_path: 186 | with open(template_path, "rb") as file: 187 | template_message = cfmessage.CfMessage.from_file(file) 188 | else: 189 | if sample_name is None: 190 | sample_name = detect_sample_name(merged_grib_keys) 191 | template_message = cfmessage.CfMessage.from_sample_name(sample_name) 192 | 193 | for key, value in merged_grib_keys.items(): 194 | try: 195 | template_message[key] = value 196 | except KeyError: 197 | LOGGER.exception("skipping key due to errors: %r" % key) 198 | 199 | return template_message 200 | 201 | 202 | def canonical_dataarray_to_grib( 203 | data_var, file, grib_keys={}, default_grib_keys=DEFAULT_GRIB_KEYS, **kwargs 204 | ): 205 | # type: (xr.DataArray, T.IO[bytes], T.Dict[str, T.Any], T.Dict[str, T.Any], T.Any) -> None 206 | """ 207 | Write a ``xr.DataArray`` in *canonical* form to a GRIB file. 208 | """ 209 | # validate Dataset keys, DataArray names, and attr keys/values 210 | detected_keys, suggested_keys = detect_grib_keys(data_var, default_grib_keys, grib_keys) 211 | merged_grib_keys = merge_grib_keys(grib_keys, detected_keys, suggested_keys) 212 | merged_grib_keys["missingValue"] = messages.MISSING_VAUE_INDICATOR 213 | 214 | if "gridType" not in merged_grib_keys: 215 | raise ValueError("required grib_key 'gridType' not passed nor auto-detected") 216 | 217 | template_message = make_template_message(merged_grib_keys, **kwargs) 218 | 219 | coords_names, data_var = expand_dims(data_var) 220 | 221 | header_coords_values = [list(data_var.coords[name].values) for name in coords_names] 222 | 223 | for items in itertools.product(*header_coords_values): 224 | select = {n: v for n, v in zip(coords_names, items)} 225 | field_values = data_var.sel(**select).values.flat[:] 226 | 227 | # Missing values handling 228 | invalid_field_values = np.logical_not(np.isfinite(field_values)) 229 | 230 | # There's no need to save a message full of missing values 231 | if invalid_field_values.all(): 232 | continue 233 | 234 | missing_value = merged_grib_keys.get("GRIB_missingValue", messages.MISSING_VAUE_INDICATOR) 235 | field_values[invalid_field_values] = missing_value 236 | 237 | message = cfmessage.CfMessage.from_message(template_message) 238 | for coord_name, coord_value in zip(coords_names, items): 239 | if coord_name in ALL_TYPE_OF_LEVELS: 240 | coord_name = "level" 241 | message[coord_name] = coord_value 242 | 243 | if invalid_field_values.any(): 244 | message["bitmapPresent"] = 1 245 | message["missingValue"] = missing_value 246 | 247 | # OPTIMIZE: convert to list because Message.message_set doesn't support np.ndarray 248 | message["values"] = field_values.tolist() 249 | 250 | message.write(file) 251 | 252 | 253 | def canonical_dataset_to_grib(dataset, path, mode="wb", no_warn=False, grib_keys={}, **kwargs): 254 | # type: (xr.Dataset, str, str, bool, T.Dict[str, T.Any], T.Any) -> None 255 | """ 256 | Write a ``xr.Dataset`` in *canonical* form to a GRIB file. 257 | """ 258 | if not no_warn: 259 | warnings.warn("GRIB write support is experimental, DO NOT RELY ON IT!", FutureWarning) 260 | 261 | # validate Dataset keys, DataArray names, and attr keys/values 262 | backends_api._validate_dataset_names(dataset) # type: ignore 263 | # _validate_attrs takes the engine name as its 2nd arg from xarray 2024.09.0 264 | try: 265 | backends_api._validate_attrs(dataset) # type: ignore 266 | except TypeError: 267 | backends_api._validate_attrs(dataset, "cfgrib") # type: ignore 268 | 269 | real_grib_keys = {str(k)[5:]: v for k, v in dataset.attrs.items() if str(k)[:5] == "GRIB_"} 270 | real_grib_keys.update(grib_keys) 271 | 272 | with open(path, mode=mode) as file: 273 | for data_var in dataset.data_vars.values(): 274 | canonical_dataarray_to_grib(data_var, file, grib_keys=real_grib_keys, **kwargs) 275 | 276 | 277 | to_grib = canonical_dataset_to_grib 278 | -------------------------------------------------------------------------------- /ci/install_python.ps1: -------------------------------------------------------------------------------- 1 | # Sample script to install Python and pip under Windows 2 | # Authors: Olivier Grisel, Jonathan Helmus and Kyle Kastner 3 | # License: CC0 1.0 Universal: http://creativecommons.org/publicdomain/zero/1.0/ 4 | 5 | $MINICONDA_URL = "https://repo.anaconda.com/miniconda/" 6 | $BASE_URL = "https://www.python.org/ftp/python/" 7 | 8 | 9 | function DownloadMiniconda ($python_version, $platform_suffix) { 10 | $webclient = New-Object System.Net.WebClient 11 | if ($python_version -match "2.7") { 12 | $filename = "Miniconda2-latest-Windows-" + $platform_suffix + ".exe" 13 | } else { 14 | $filename = "Miniconda3-latest-Windows-" + $platform_suffix + ".exe" 15 | } 16 | $url = $MINICONDA_URL + $filename 17 | 18 | $basedir = $pwd.Path + "\" 19 | $filepath = $basedir + $filename 20 | if (Test-Path $filename) { 21 | Write-Host "Reusing" $filepath 22 | return $filepath 23 | } 24 | 25 | # Download and retry up to 3 times in case of network transient errors. 26 | Write-Host "Downloading" $filename "from" $url 27 | $retry_attempts = 2 28 | for($i=0; $i -lt $retry_attempts; $i++){ 29 | try { 30 | $webclient.DownloadFile($url, $filepath) 31 | break 32 | } 33 | Catch [Exception]{ 34 | Start-Sleep 1 35 | } 36 | } 37 | if (Test-Path $filepath) { 38 | Write-Host "File saved at" $filepath 39 | } else { 40 | # Retry once to get the error message if any at the last try 41 | $webclient.DownloadFile($url, $filepath) 42 | } 43 | return $filepath 44 | } 45 | 46 | 47 | function InstallMiniconda ($python_version, $architecture, $python_home) { 48 | Write-Host "Installing Python" $python_version "for" $architecture "bit architecture to" $python_home 49 | if (Test-Path $python_home) { 50 | Write-Host $python_home "already exists, skipping." 51 | return $false 52 | } 53 | if ($architecture -match "32") { 54 | $platform_suffix = "x86" 55 | } else { 56 | $platform_suffix = "x86_64" 57 | } 58 | 59 | $filepath = DownloadMiniconda $python_version $platform_suffix 60 | Write-Host "Installing" $filepath "to" $python_home 61 | $install_log = $python_home + ".log" 62 | $args = "/S /D=$python_home" 63 | Write-Host $filepath $args 64 | Start-Process -FilePath $filepath -ArgumentList $args -Wait -Passthru 65 | if (Test-Path $python_home) { 66 | Write-Host "Python $python_version ($architecture) installation complete" 67 | } else { 68 | Write-Host "Failed to install Python in $python_home" 69 | Get-Content -Path $install_log 70 | Exit 1 71 | } 72 | } 73 | 74 | 75 | function InstallCondaPackages ($python_home, $spec) { 76 | $conda_path = $python_home + "\Scripts\conda.exe" 77 | $args = "install --yes " + $spec 78 | Write-Host ("conda " + $args) 79 | Start-Process -FilePath "$conda_path" -ArgumentList $args -Wait -Passthru 80 | } 81 | 82 | function UpdateConda ($python_home) { 83 | $conda_path = $python_home + "\Scripts\conda.exe" 84 | Write-Host "Updating conda..." 85 | $args = "update --yes conda" 86 | Write-Host $conda_path $args 87 | Start-Process -FilePath "$conda_path" -ArgumentList $args -Wait -Passthru 88 | } 89 | 90 | 91 | function main () { 92 | InstallMiniconda $env:PYTHON_VERSION $env:PYTHON_ARCH $env:PYTHON 93 | UpdateConda $env:PYTHON 94 | InstallCondaPackages $env:PYTHON "conda-build jinja2 anaconda-client" 95 | } 96 | 97 | main 98 | -------------------------------------------------------------------------------- /ci/requirements-dev.txt: -------------------------------------------------------------------------------- 1 | check-manifest 2 | detox 3 | IPython 4 | matplotlib 5 | notebook 6 | pip-tools 7 | pyroma 8 | pytest-mypy 9 | setuptools 10 | tox 11 | tox-pyenv 12 | wheel 13 | zest.releaser 14 | -------------------------------------------------------------------------------- /ci/requirements-docs.in: -------------------------------------------------------------------------------- 1 | Sphinx 2 | pytest-runner 3 | xarray -------------------------------------------------------------------------------- /ci/requirements-docs.txt: -------------------------------------------------------------------------------- 1 | # 2 | # This file is autogenerated by pip-compile 3 | # To update, run: 4 | # 5 | # pip-compile --output-file ci/requirements-docs.txt setup.py ci/requirements-docs.in 6 | # 7 | alabaster==0.7.12 # via sphinx 8 | attrs==19.3.0 9 | babel==2.9.1 # via sphinx 10 | certifi==2024.07.04 # via requests 11 | cffi==1.14.0 12 | chardet==3.0.4 # via requests 13 | click==7.1.2 14 | docutils==0.16 # via sphinx 15 | idna==3.7 # via requests 16 | imagesize==1.2.0 # via sphinx 17 | jinja2==3.1.6 # via sphinx 18 | markupsafe==1.1.1 # via jinja2 19 | numpy==1.22.0 20 | packaging==20.3 # via sphinx 21 | pandas==1.0.3 # via xarray 22 | pycparser==2.20 # via cffi 23 | pygments==2.15.0 # via sphinx 24 | pyparsing==2.4.7 # via packaging 25 | pytest-runner==5.2 26 | python-dateutil==2.8.1 # via pandas 27 | pytz==2020.1 # via babel, pandas 28 | requests==2.32.4 # via sphinx 29 | six==1.14.0 # via packaging, python-dateutil 30 | snowballstemmer==2.0.0 # via sphinx 31 | sphinx==3.0.3 32 | sphinxcontrib-applehelp==1.0.2 # via sphinx 33 | sphinxcontrib-devhelp==1.0.2 # via sphinx 34 | sphinxcontrib-htmlhelp==1.0.3 # via sphinx 35 | sphinxcontrib-jsmath==1.0.1 # via sphinx 36 | sphinxcontrib-qthelp==1.0.3 # via sphinx 37 | sphinxcontrib-serializinghtml==1.1.4 # via sphinx 38 | urllib3==1.26.19 # via requests 39 | xarray==0.15.1 40 | -------------------------------------------------------------------------------- /ci/requirements-docs.yml: -------------------------------------------------------------------------------- 1 | name: test_env 2 | channels: 3 | - conda-forge 4 | dependencies: 5 | - attrs 6 | - cffi 7 | - eccodes 8 | - future 9 | - nomkl 10 | - sphinx 11 | - xarray 12 | 13 | -------------------------------------------------------------------------------- /ci/requirements-py37.yml: -------------------------------------------------------------------------------- 1 | name: test_env 2 | channels: 3 | - conda-forge 4 | dependencies: 5 | - attrs 6 | - cffi 7 | - click 8 | - coveralls 9 | - dask 10 | - eccodes 11 | - future 12 | - numpy 13 | - pytest 14 | - pytest-cov 15 | - pytest-flakes 16 | - python=3.7 17 | - scipy 18 | - typing 19 | - toolz 20 | - xarray 21 | 22 | -------------------------------------------------------------------------------- /ci/requirements-tests.in: -------------------------------------------------------------------------------- 1 | dask[array] 2 | pytest 3 | pytest-cov 4 | pytest-flakes 5 | pytest-mccabe 6 | pytest-pep8 7 | pytest-runner 8 | scipy 9 | xarray<2025.6.0 10 | -------------------------------------------------------------------------------- /ci/requirements-tests.txt: -------------------------------------------------------------------------------- 1 | # 2 | # This file is autogenerated by pip-compile 3 | # To update, run: 4 | # 5 | # pip-compile --output-file ci/requirements-tests.txt setup.py ci/requirements-tests.in 6 | # 7 | apipkg==1.5 # via execnet 8 | attrs==19.3.0 9 | cffi==1.14.0 10 | click==7.1.2 11 | coverage==5.1 # via pytest-cov 12 | dask[array]==2024.8.2 13 | execnet==1.7.1 # via pytest-cache 14 | importlib-metadata==1.6.0 # via pluggy, pytest 15 | mccabe==0.6.1 # via pytest-mccabe 16 | more-itertools==8.2.0 # via pytest 17 | numpy==1.22.0 18 | packaging==20.3 # via pytest 19 | pandas==1.0.3 # via xarray 20 | pep8==1.7.1 # via pytest-pep8 21 | pluggy==0.13.1 # via pytest 22 | pycparser==2.20 # via cffi 23 | pyflakes==2.2.0 # via pytest-flakes 24 | pyparsing==2.4.7 # via packaging 25 | pytest-cache==1.0 # via pytest-pep8 26 | pytest-cov==2.8.1 27 | pytest-flakes==4.0.0 28 | pytest-mccabe==1.0 29 | pytest-pep8==1.0.6 30 | pytest-runner==5.2 31 | pytest==7.2.0 32 | python-dateutil==2.8.1 # via pandas 33 | pytz==2020.1 # via pandas 34 | scipy==1.8.0 35 | six==1.14.0 # via packaging, python-dateutil 36 | toolz==0.10.0 # via dask 37 | wcwidth==0.1.9 # via pytest 38 | xarray==0.15.1 39 | zipp==3.19.1 # via importlib-metadata 40 | -------------------------------------------------------------------------------- /docs/_static/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/docs/_static/.gitkeep -------------------------------------------------------------------------------- /docs/cfmessage.rst: -------------------------------------------------------------------------------- 1 | 2 | GRIB to CF translation 3 | ---------------------- 4 | 5 | .. automodule:: cfgrib.cfmessage 6 | :members: 7 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | import os 5 | import sys 6 | 7 | import pkg_resources 8 | 9 | # Get the project root dir, which is the parent dir of this 10 | cwd = os.getcwd() 11 | project_root = os.path.dirname(cwd) 12 | 13 | # Insert the project root dir as the first element in the PYTHONPATH. 14 | # This lets us ensure that the source package is imported, and that its 15 | # version is used. 16 | sys.path.insert(0, project_root) 17 | 18 | # Add any Sphinx extension module names here, as strings. They can be 19 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 20 | extensions = ["sphinx.ext.autodoc", "sphinx.ext.viewcode"] 21 | 22 | # Add any paths that contain templates here, relative to this directory. 23 | templates_path = ["_templates"] 24 | 25 | # The suffix of source filenames. 26 | source_suffix = ".rst" 27 | 28 | # The encoding of source files. 29 | # source_encoding = 'utf-8-sig' 30 | 31 | # The master toctree document. 32 | master_doc = "index" 33 | 34 | # General information about the project. 35 | project = "cfgrib" 36 | copyright = "2017-2021 European Centre for Medium-Range Weather Forecasts (ECMWF)." 37 | 38 | # The version info for the project you're documenting, acts as replacement 39 | # for |version| and |release|, also used in various other places throughout 40 | # the built documents. 41 | # 42 | # The full version, including alpha/beta/rc tags. 43 | release = pkg_resources.get_distribution("cfgrib").version 44 | # The short X.Y version. 45 | version = ".".join(release.split(".")[:2]) 46 | 47 | # The language for content autogenerated by Sphinx. Refer to documentation 48 | # for a list of supported languages. 49 | # language = None 50 | 51 | # There are two options for replacing |today|: either, you set today to 52 | # some non-false value, then it is used: 53 | # today = '' 54 | # Else, today_fmt is used as the format for a strftime call. 55 | # today_fmt = '%B %d, %Y' 56 | 57 | # List of patterns, relative to source directory, that match files and 58 | # directories to ignore when looking for source files. 59 | exclude_patterns = ["_build"] 60 | 61 | # The reST default role (used for this markup: `text`) to use for all 62 | # documents. 63 | # default_role = None 64 | 65 | # If true, '()' will be appended to :func: etc. cross-reference text. 66 | # add_function_parentheses = True 67 | 68 | # If true, the current module name will be prepended to all description 69 | # unit titles (such as .. function::). 70 | # add_module_names = True 71 | 72 | # If true, sectionauthor and moduleauthor directives will be shown in the 73 | # output. They are ignored by default. 74 | # show_authors = False 75 | 76 | # The name of the Pygments (syntax highlighting) style to use. 77 | pygments_style = "sphinx" 78 | 79 | # A list of ignored prefixes for module index sorting. 80 | # modindex_common_prefix = [] 81 | 82 | # If true, keep warnings as "system message" paragraphs in the built 83 | # documents. 84 | # keep_warnings = False 85 | 86 | 87 | # -- Options for HTML output ------------------------------------------- 88 | 89 | # The theme to use for HTML and HTML Help pages. See the documentation for 90 | # a list of builtin themes. 91 | html_theme = "default" 92 | 93 | # Theme options are theme-specific and customize the look and feel of a 94 | # theme further. For a list of options available for each theme, see the 95 | # documentation. 96 | # html_theme_options = {} 97 | 98 | # Add any paths that contain custom themes here, relative to this directory. 99 | # html_theme_path = [] 100 | 101 | # The name for this set of Sphinx documents. If None, it defaults to 102 | # " v documentation". 103 | # html_title = None 104 | 105 | # A shorter title for the navigation bar. Default is the same as 106 | # html_title. 107 | # html_short_title = None 108 | 109 | # The name of an image file (relative to this directory) to place at the 110 | # top of the sidebar. 111 | # html_logo = None 112 | 113 | # The name of an image file (within the static path) to use as favicon 114 | # of the docs. This file should be a Windows icon file (.ico) being 115 | # 16x16 or 32x32 pixels large. 116 | # html_favicon = None 117 | 118 | # Add any paths that contain custom static files (such as style sheets) 119 | # here, relative to this directory. They are copied after the builtin 120 | # static files, so a file named "default.css" will overwrite the builtin 121 | # "default.css". 122 | html_static_path = ["_static"] 123 | 124 | # If not '', a 'Last updated on:' timestamp is inserted at every page 125 | # bottom, using the given strftime format. 126 | # html_last_updated_fmt = '%b %d, %Y' 127 | 128 | # If true, SmartyPants will be used to convert quotes and dashes to 129 | # typographically correct entities. 130 | # html_use_smartypants = True 131 | 132 | # Custom sidebar templates, maps document names to template names. 133 | # html_sidebars = {} 134 | 135 | # Additional templates that should be rendered to pages, maps page names 136 | # to template names. 137 | # html_additional_pages = {} 138 | 139 | # If false, no module index is generated. 140 | # html_domain_indices = True 141 | 142 | # If false, no index is generated. 143 | # html_use_index = True 144 | 145 | # If true, the index is split into individual pages for each letter. 146 | # html_split_index = False 147 | 148 | # If true, links to the reST sources are added to the pages. 149 | # html_show_sourcelink = True 150 | 151 | # If true, "Created using Sphinx" is shown in the HTML footer. 152 | # Default is True. 153 | # html_show_sphinx = True 154 | 155 | # If true, "(C) Copyright ..." is shown in the HTML footer. 156 | # Default is True. 157 | # html_show_copyright = True 158 | 159 | # If true, an OpenSearch description file will be output, and all pages 160 | # will contain a tag referring to it. The value of this option 161 | # must be the base URL from which the finished HTML is served. 162 | # html_use_opensearch = '' 163 | 164 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 165 | # html_file_suffix = None 166 | 167 | # Output file base name for HTML help builder. 168 | htmlhelp_basename = "cfgribdoc" 169 | -------------------------------------------------------------------------------- /docs/dataset.rst: -------------------------------------------------------------------------------- 1 | 2 | Dataset / Variable API 3 | ---------------------- 4 | 5 | .. automodule:: cfgrib.dataset 6 | :members: 7 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | 2 | 3 | ======= 4 | CF-GRIB 5 | ======= 6 | 7 | :Version: |release| 8 | :Date: |today| 9 | 10 | Python interface to map GRIB files to the 11 | `Unidata's Common Data Model v4 `_ 12 | following the `CF Conventions `_. 13 | The high level API is designed to support a GRIB engine for `xarray `_ 14 | and it is inspired by `netCDF4-python `_ 15 | and `h5netcdf `_. 16 | Low level access and decoding is performed via the 17 | `ECMWF ecCodes library `_. 18 | 19 | Features with development status **Beta**: 20 | 21 | - enables the ``engine='cfgrib'`` option to read GRIB files with *xarray*, 22 | - reads most GRIB 1 and 2 files including heterogeneous ones with ``cfgrib.open_datasets``, 23 | - supports all modern versions of Python and PyPy3, 24 | - the 0.9.6.x series with support for Python 2 will stay active and receive critical bugfixes, 25 | - works on *Linux*, *MacOS* and *Windows*, the *ecCodes* C-library is the only binary dependency, 26 | - conda-forge package on all supported platforms, 27 | - PyPI package with no install time build (binds via *CFFI* ABI mode), 28 | - reads the data lazily and efficiently in terms of both memory usage and disk access, 29 | - allows larger-than-memory and distributed processing via *dask*, 30 | - supports translating coordinates to different data models and naming conventions, 31 | - supports writing the index of a GRIB file to disk, to save a full-file scan on open. 32 | 33 | Work in progress: 34 | 35 | - **Alpha** limited support for MULTI-FIELD messages, e.g. u-v components, 36 | see `#76 `_. 37 | - **Alpha** install a ``cfgrib`` utility that can convert a GRIB file ``to_netcdf`` 38 | with a optional conversion to a specific coordinates data model, 39 | see `#40 `_. 40 | - **Alpha** support writing carefully-crafted ``xarray.Dataset``'s to a GRIB1 or GRIB2 file, 41 | see the *Advanced write usage* section below and 42 | `#18 `_. 43 | 44 | Limitations: 45 | 46 | - relies on *ecCodes* for the CF attributes of the data variables, 47 | - relies on *ecCodes* for anything related to coordinate systems / ``gridType``, 48 | see `#28 `_. 49 | 50 | 51 | .. toctree:: 52 | :maxdepth: 2 53 | :caption: Table of Contents 54 | 55 | messages 56 | cfmessage 57 | dataset 58 | xarray_store 59 | xarray_to_grib 60 | -------------------------------------------------------------------------------- /docs/messages.rst: -------------------------------------------------------------------------------- 1 | 2 | File / Message API 3 | ------------------ 4 | 5 | .. automodule:: cfgrib.messages 6 | :members: 7 | -------------------------------------------------------------------------------- /docs/xarray_store.rst: -------------------------------------------------------------------------------- 1 | 2 | xarray read-only GRIB driver 3 | ---------------------------- 4 | 5 | .. automodule:: cfgrib.xarray_store 6 | :members: 7 | -------------------------------------------------------------------------------- /docs/xarray_to_grib.rst: -------------------------------------------------------------------------------- 1 | 2 | xarray to GRIB 3 | -------------- 4 | 5 | .. automodule:: cfgrib.xarray_to_grib 6 | :members: 7 | -------------------------------------------------------------------------------- /environment-minimal.in.yml: -------------------------------------------------------------------------------- 1 | channels: 2 | - defaults 3 | - conda-forge 4 | dependencies: 5 | - attrs>=19.2 6 | - click 7 | - nomkl 8 | - numpy 9 | - pytest-cov 10 | - python-eccodes 11 | - tomli 12 | -------------------------------------------------------------------------------- /environment-minver.in.yml: -------------------------------------------------------------------------------- 1 | channels: 2 | - defaults 3 | - conda-forge 4 | dependencies: 5 | - attrs=19.2.0 6 | - click=7.0.0 7 | - eccodes=2.16.0 8 | - numpy=1.15.0 9 | - pandas=0.25.0 10 | - pytest-cov 11 | - python-eccodes=1.4.0 12 | - tomli 13 | - xarray=0.15.0 14 | -------------------------------------------------------------------------------- /environment.in.yml: -------------------------------------------------------------------------------- 1 | channels: 2 | - defaults 3 | - conda-forge 4 | dependencies: 5 | - attrs>=19.2 6 | - click 7 | - eccodes>=2.20.0 8 | - mypy=0.812 9 | - nomkl 10 | - numpy 11 | - pytest-cov 12 | - python-eccodes 13 | - scipy 14 | - tomli 15 | - xarray>=0.20.2 16 | -------------------------------------------------------------------------------- /era5-levels-members.grib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/era5-levels-members.grib -------------------------------------------------------------------------------- /nam.t00z.awp21100.tm00.grib2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/nam.t00z.awp21100.tm00.grib2 -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = [ "setuptools>=61", "setuptools-scm>=8" ] 3 | 4 | [tool.black] 5 | line-length = 99 6 | 7 | [tool.isort] 8 | profile = "black" 9 | line_length = 99 10 | 11 | [tool.pytest.ini_options] 12 | norecursedirs = [ 13 | "build", 14 | "docs", 15 | ".tox", 16 | ] 17 | 18 | [tool.coverage.run] 19 | branch = true 20 | omit = ["setup.py"] 21 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [zest.releaser] 2 | python-file-with-version = cfgrib/__init__.py 3 | create-wheel = yes 4 | 5 | # exclude xarray_plugin for now 6 | [mypy] 7 | 8 | [mypy-cfgrib.xarray_plugin] 9 | ignore_errors = True 10 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # Copyright 2017-2021 European Centre for Medium-Range Weather Forecasts (ECMWF). 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | import os 18 | import re 19 | 20 | import setuptools # type: ignore 21 | 22 | 23 | def read(path: str) -> str: 24 | file_path = os.path.join(os.path.dirname(__file__), *path.split("/")) 25 | return open(file_path).read() 26 | 27 | 28 | # single-sourcing the package version using method 1 of: 29 | # https://packaging.python.org/guides/single-sourcing-package-version/ 30 | def parse_version_from(path: str) -> str: 31 | version_file = read(path) 32 | version_match = re.search(r'^__version__ = "(.*)"', version_file, re.M) 33 | if version_match is None or len(version_match.groups()) > 1: 34 | raise ValueError("couldn't parse version") 35 | return version_match.group(1) 36 | 37 | 38 | setuptools.setup( 39 | name="cfgrib", 40 | version=parse_version_from("cfgrib/__init__.py"), 41 | description="Python interface to map GRIB files to the NetCDF Common Data Model " 42 | "following the CF Convention using ecCodes.", 43 | long_description=read("README.rst") + read("CHANGELOG.rst"), 44 | author="European Centre for Medium-Range Weather Forecasts (ECMWF)", 45 | author_email="software.support@ecmwf.int", 46 | license="Apache License Version 2.0", 47 | url="https://github.com/ecmwf/cfgrib", 48 | packages=setuptools.find_packages(), 49 | include_package_data=True, 50 | install_requires=["attrs>=19.2", "click", "eccodes>=0.9.8", "numpy"], 51 | python_requires=">=3.7", 52 | extras_require={ 53 | "xarray": ["xarray>=0.15"], 54 | "tests": ["dask[array]", "flake8", "pytest", "pytest-cov", "scipy", "xarray>=0.15"], 55 | }, 56 | zip_safe=True, 57 | keywords="eccodes grib xarray", 58 | classifiers=[ 59 | "Development Status :: 4 - Beta", 60 | "Intended Audience :: Developers", 61 | "License :: OSI Approved :: Apache Software License", 62 | "Programming Language :: Python :: 3", 63 | "Programming Language :: Python :: 3.7", 64 | "Programming Language :: Python :: 3.8", 65 | "Programming Language :: Python :: 3.9", 66 | "Programming Language :: Python :: Implementation :: CPython", 67 | "Programming Language :: Python :: Implementation :: PyPy", 68 | "Operating System :: OS Independent", 69 | ], 70 | entry_points={ 71 | "console_scripts": ["cfgrib=cfgrib.__main__:cfgrib_cli"], 72 | "xarray.backends": ["cfgrib=cfgrib.xarray_plugin:CfGribBackend"], 73 | }, 74 | ) 75 | -------------------------------------------------------------------------------- /tests/cds_test_00_sync_sample_data.py: -------------------------------------------------------------------------------- 1 | import cdscommon 2 | import pytest 3 | 4 | import cfgrib 5 | 6 | TEST_FILES = { 7 | "era5-levels-members": [ 8 | "reanalysis-era5-pressure-levels", 9 | { 10 | "variable": ["geopotential", "temperature"], 11 | "pressure_level": ["500", "850"], 12 | "product_type": "ensemble_members", 13 | "year": "2017", 14 | "month": "01", 15 | "day": ["01", "02"], 16 | "time": ["00:00", "12:00"], 17 | "grid": ["3", "3"], 18 | "format": "grib", 19 | }, 20 | 193, 21 | ] 22 | } 23 | 24 | 25 | @pytest.mark.parametrize("test_file", TEST_FILES.keys()) 26 | def test_reanalysis_Stream(test_file): 27 | dataset, request, key_count = TEST_FILES[test_file] 28 | path = cdscommon.ensure_data(dataset, request, name=test_file + "{ext}") 29 | 30 | stream = cfgrib.FileStream(path) 31 | leader = stream.first() 32 | assert len(leader) == key_count 33 | assert sum(1 for _ in stream) == leader["count"] 34 | -------------------------------------------------------------------------------- /tests/cds_test_10_era5.py: -------------------------------------------------------------------------------- 1 | import cdscommon 2 | import pytest 3 | 4 | import cfgrib 5 | import cfgrib.xarray_store 6 | 7 | TEST_FILES = { 8 | "era5-single-levels-reanalysis": [ 9 | "reanalysis-era5-single-levels", 10 | { 11 | "variable": "2m_temperature", 12 | "product_type": "reanalysis", 13 | "year": "2017", 14 | "month": "01", 15 | "day": ["01", "02"], 16 | "time": ["00:00", "12:00"], 17 | "grid": ["3", "3"], 18 | "format": "grib", 19 | }, 20 | 192, 21 | ], 22 | "era5-single-levels-ensemble_members": [ 23 | "reanalysis-era5-single-levels", 24 | { 25 | "variable": "2m_temperature", 26 | "product_type": "ensemble_members", 27 | "year": "2017", 28 | "month": "01", 29 | "day": ["01", "02"], 30 | "time": ["00:00", "12:00"], 31 | "grid": ["3", "3"], 32 | "format": "grib", 33 | }, 34 | 193, 35 | ], 36 | "era5-pressure-levels-reanalysis": [ 37 | "reanalysis-era5-pressure-levels", 38 | { 39 | "variable": "temperature", 40 | "pressure_level": ["500", "850"], 41 | "product_type": "reanalysis", 42 | "year": "2017", 43 | "month": "01", 44 | "day": ["01", "02"], 45 | "time": ["00:00", "12:00"], 46 | "grid": ["3", "3"], 47 | "format": "grib", 48 | }, 49 | 192, 50 | ], 51 | "era5-pressure-levels-ensemble_members": [ 52 | "reanalysis-era5-pressure-levels", 53 | { 54 | "variable": "temperature", 55 | "pressure_level": ["500", "850"], 56 | "product_type": "ensemble_members", 57 | "year": "2017", 58 | "month": "01", 59 | "day": ["01", "02"], 60 | "time": ["00:00", "12:00"], 61 | "grid": ["3", "3"], 62 | "format": "grib", 63 | }, 64 | 193, 65 | ], 66 | "era5-single-levels-reanalysis-area": [ 67 | "reanalysis-era5-single-levels", 68 | { 69 | "variable": "2m_temperature", 70 | "product_type": "reanalysis", 71 | "year": "2017", 72 | "month": "01", 73 | "day": ["01", "02"], 74 | "time": ["00:00", "12:00"], 75 | "area": ["35.5", "6.5", "47.", "19."], 76 | "format": "grib", 77 | }, 78 | 192, 79 | ], 80 | } 81 | 82 | 83 | @pytest.mark.parametrize("test_file", TEST_FILES.keys()) 84 | def test_Stream(test_file): 85 | dataset, request, key_count = TEST_FILES[test_file] 86 | path = cdscommon.ensure_data(dataset, request, name="cds-" + test_file + "-{uuid}.grib") 87 | 88 | stream = cfgrib.FileStream(path) 89 | leader = stream.first() 90 | assert len(leader) == key_count 91 | assert sum(1 for _ in stream) == leader["count"] 92 | 93 | 94 | @pytest.mark.parametrize("test_file", TEST_FILES.keys()) 95 | def test_Dataset(test_file): 96 | dataset, request, key_count = TEST_FILES[test_file] 97 | path = cdscommon.ensure_data(dataset, request, name="cds-" + test_file + "-{uuid}.grib") 98 | 99 | res = cfgrib.xarray_store.open_dataset(path) 100 | res.to_netcdf(path[:-5] + ".nc") 101 | 102 | 103 | @pytest.mark.skip() 104 | def test_large_Dataset(): 105 | dataset, request, key_count = TEST_FILES["era5-pressure-levels-ensemble_members"] 106 | # make the request large 107 | request["day"] = list(range(1, 32)) 108 | request["time"] = list(["%02d:00" % h for h in range(0, 24, 3)]) 109 | path = cdscommon.ensure_data(dataset, request, name="cds-" + dataset + "-LARGE-{uuid}.grib") 110 | 111 | res = cfgrib.xarray_store.open_dataset(path) 112 | res.to_netcdf(path[:-5] + ".nc") 113 | -------------------------------------------------------------------------------- /tests/cds_test_20_sf_ecmwf.py: -------------------------------------------------------------------------------- 1 | import cdscommon 2 | import pytest 3 | 4 | import cfgrib 5 | import cfgrib.xarray_store 6 | 7 | TEST_FILES = { 8 | "seasonal-original-single-levels-ecmwf": [ 9 | "seasonal-original-single-levels", 10 | { 11 | "originating_centre": "ecmwf", 12 | "variable": "maximum_2m_temperature_in_the_last_24_hours", 13 | "year": "2018", 14 | "month": ["04", "05"], 15 | "day": [ 16 | "01", 17 | "02", 18 | "03", 19 | "04", 20 | "05", 21 | "06", 22 | "07", 23 | "08", 24 | "09", 25 | "10", 26 | "11", 27 | "12", 28 | "13", 29 | "14", 30 | "15", 31 | "16", 32 | "17", 33 | "18", 34 | "19", 35 | "20", 36 | "21", 37 | "22", 38 | "23", 39 | "24", 40 | "25", 41 | "26", 42 | "27", 43 | "28", 44 | "29", 45 | "30", 46 | "31", 47 | ], 48 | "leadtime_hour": ["24", "48"], 49 | "grid": ["3", "3"], 50 | "format": "grib", 51 | }, 52 | 193, 53 | ], 54 | "seasonal-original-pressure-levels-ecmwf": [ 55 | "seasonal-original-pressure-levels", 56 | { 57 | "originating_centre": "ecmwf", 58 | "variable": "temperature", 59 | "pressure_level": ["500", "850"], 60 | "year": "2018", 61 | "month": ["04", "05"], 62 | "day": [ 63 | "01", 64 | "02", 65 | "03", 66 | "04", 67 | "05", 68 | "06", 69 | "07", 70 | "08", 71 | "09", 72 | "10", 73 | "11", 74 | "12", 75 | "13", 76 | "14", 77 | "15", 78 | "16", 79 | "17", 80 | "18", 81 | "19", 82 | "20", 83 | "21", 84 | "22", 85 | "23", 86 | "24", 87 | "25", 88 | "26", 89 | "27", 90 | "28", 91 | "29", 92 | "30", 93 | "31", 94 | ], 95 | "leadtime_hour": ["24", "48"], 96 | "grid": ["3", "3"], 97 | "format": "grib", 98 | }, 99 | 193, 100 | ], 101 | "seasonal-postprocessed-single-levels-ecmwf": [ 102 | "seasonal-postprocessed-single-levels", 103 | { 104 | "originating_centre": "ecmwf", 105 | "variable": "maximum_2m_temperature_in_the_last_24_hours_anomaly", 106 | "product_type": "monthly_mean", 107 | "year": "2018", 108 | "month": ["04", "05"], 109 | "leadtime_month": ["1", "2"], 110 | "grid": ["3", "3"], 111 | "format": "grib", 112 | }, 113 | 212, 114 | ], 115 | "seasonal-monthly-single-levels-monthly_mean-ecmwf": [ 116 | "seasonal-monthly-single-levels", 117 | { 118 | "originating_centre": "ecmwf", 119 | "variable": "maximum_2m_temperature_in_the_last_24_hours", 120 | "product_type": "monthly_mean", 121 | "year": "2018", 122 | "month": ["04", "05"], 123 | "leadtime_month": ["1", "2"], 124 | "grid": ["3", "3"], 125 | "format": "grib", 126 | }, 127 | 212, 128 | ], 129 | "seasonal-monthly-single-levels-ensemble_mean-ecmwf": [ 130 | "seasonal-monthly-single-levels", 131 | { 132 | "originating_centre": "ecmwf", 133 | "variable": "maximum_2m_temperature_in_the_last_24_hours", 134 | "product_type": "ensemble_mean", 135 | "year": "2018", 136 | "month": ["04", "05"], 137 | "leadtime_month": ["1", "2"], 138 | "grid": ["3", "3"], 139 | "format": "grib", 140 | }, 141 | 212, 142 | ], 143 | "seasonal-monthly-single-levels-hindcast_climate_mean-ecmwf": [ 144 | "seasonal-monthly-single-levels", 145 | { 146 | "originating_centre": "ecmwf", 147 | "variable": "maximum_2m_temperature_in_the_last_24_hours", 148 | "product_type": "hindcast_climate_mean", 149 | "year": "2018", 150 | "month": ["04", "05"], 151 | "leadtime_month": ["1", "2"], 152 | "grid": ["3", "3"], 153 | "format": "grib", 154 | }, 155 | 212, 156 | ], 157 | } 158 | 159 | 160 | @pytest.mark.parametrize("test_file", TEST_FILES.keys()) 161 | def test_Stream(test_file): 162 | dataset, request, key_count = TEST_FILES[test_file] 163 | path = cdscommon.ensure_data(dataset, request, name="cds-" + test_file + "-{uuid}.grib") 164 | 165 | stream = cfgrib.FileStream(path) 166 | leader = stream.first() 167 | assert len(leader) == key_count 168 | assert sum(1 for _ in stream) == leader["count"] 169 | 170 | 171 | @pytest.mark.parametrize("test_file", TEST_FILES.keys()) 172 | def test_Dataset(test_file): 173 | dataset, request, key_count = TEST_FILES[test_file] 174 | path = cdscommon.ensure_data(dataset, request, name="cds-" + test_file + "-{uuid}.grib") 175 | 176 | res = cfgrib.xarray_store.open_dataset(path) 177 | res.to_netcdf(path[:-5] + ".nc") 178 | 179 | 180 | @pytest.mark.skip() 181 | def test_large_Dataset(): 182 | dataset, request, key_count = TEST_FILES["seasonal-original-pressure-levels-ecmwf"] 183 | # make the request large 184 | request["leadtime_hour"] = list(range(720, 1445, 24)) 185 | request["grid"] = ["1", "1"] 186 | path = cdscommon.ensure_data(dataset, request, name="cds-" + dataset + "-LARGE-{uuid}.grib") 187 | 188 | res = cfgrib.xarray_store.open_dataset(path) 189 | res.to_netcdf(path[:-5] + ".nc") 190 | -------------------------------------------------------------------------------- /tests/cds_test_20_sf_meteo_france.py: -------------------------------------------------------------------------------- 1 | import cdscommon 2 | import pytest 3 | 4 | import cfgrib 5 | 6 | TEST_FILES = { 7 | "seasonal-original-single-levels-meteo_france": [ 8 | "seasonal-original-single-levels", 9 | { 10 | "originating_centre": "meteo_france", 11 | "variable": "maximum_2m_temperature_in_the_last_24_hours", 12 | "year": "2018", 13 | "month": ["04", "05"], 14 | "day": [ 15 | "01", 16 | "02", 17 | "03", 18 | "04", 19 | "05", 20 | "06", 21 | "07", 22 | "08", 23 | "09", 24 | "10", 25 | "11", 26 | "12", 27 | "13", 28 | "14", 29 | "15", 30 | "16", 31 | "17", 32 | "18", 33 | "19", 34 | "20", 35 | "21", 36 | "22", 37 | "23", 38 | "24", 39 | "25", 40 | "26", 41 | "27", 42 | "28", 43 | "29", 44 | "30", 45 | "31", 46 | ], 47 | "leadtime_hour": ["24", "48"], 48 | "grid": ["3", "3"], 49 | "format": "grib", 50 | }, 51 | 193, 52 | ], 53 | "seasonal-original-pressure-levels-meteo_france": [ 54 | "seasonal-original-pressure-levels", 55 | { 56 | "originating_centre": "meteo_france", 57 | "variable": "temperature", 58 | "pressure_level": ["500", "850"], 59 | "year": "2018", 60 | "month": ["04", "05"], 61 | "day": [ 62 | "01", 63 | "02", 64 | "03", 65 | "04", 66 | "05", 67 | "06", 68 | "07", 69 | "08", 70 | "09", 71 | "10", 72 | "11", 73 | "12", 74 | "13", 75 | "14", 76 | "15", 77 | "16", 78 | "17", 79 | "18", 80 | "19", 81 | "20", 82 | "21", 83 | "22", 84 | "23", 85 | "24", 86 | "25", 87 | "26", 88 | "27", 89 | "28", 90 | "29", 91 | "30", 92 | "31", 93 | ], 94 | "leadtime_hour": ["24", "48"], 95 | "grid": ["3", "3"], 96 | "format": "grib", 97 | }, 98 | 193, 99 | ], 100 | "seasonal-postprocessed-single-levels-meteo_france": [ 101 | "seasonal-postprocessed-single-levels", 102 | { 103 | "originating_centre": "meteo_france", 104 | "variable": "maximum_2m_temperature_in_the_last_24_hours_anomaly", 105 | "product_type": "monthly_mean", 106 | "year": "2018", 107 | "month": ["04", "05"], 108 | "leadtime_month": ["1", "2"], 109 | "grid": ["3", "3"], 110 | "format": "grib", 111 | }, 112 | 212, 113 | ], 114 | "seasonal-monthly-single-levels-monthly_mean-meteo_france": [ 115 | "seasonal-monthly-single-levels", 116 | { 117 | "originating_centre": "meteo_france", 118 | "variable": "maximum_2m_temperature_in_the_last_24_hours", 119 | "product_type": "monthly_mean", 120 | "year": "2018", 121 | "month": ["04", "05"], 122 | "leadtime_month": ["1", "2"], 123 | "grid": ["3", "3"], 124 | "format": "grib", 125 | }, 126 | 212, 127 | ], 128 | "seasonal-monthly-single-levels-ensemble_mean-meteo_france": [ 129 | "seasonal-monthly-single-levels", 130 | { 131 | "originating_centre": "meteo_france", 132 | "variable": "maximum_2m_temperature_in_the_last_24_hours", 133 | "product_type": "ensemble_mean", 134 | "year": "2018", 135 | "month": ["04", "05"], 136 | "leadtime_month": ["1", "2"], 137 | "grid": ["3", "3"], 138 | "format": "grib", 139 | }, 140 | 212, 141 | ], 142 | "seasonal-monthly-single-levels-hindcast_climate_mean-meteo_france": [ 143 | "seasonal-monthly-single-levels", 144 | { 145 | "originating_centre": "meteo_france", 146 | "variable": "maximum_2m_temperature_in_the_last_24_hours", 147 | "product_type": "hindcast_climate_mean", 148 | "year": "2018", 149 | "month": ["04", "05"], 150 | "leadtime_month": ["1", "2"], 151 | "grid": ["3", "3"], 152 | "format": "grib", 153 | }, 154 | 212, 155 | ], 156 | } 157 | 158 | 159 | @pytest.mark.parametrize("test_file", TEST_FILES.keys()) 160 | def test_reanalysis_Stream(test_file): 161 | dataset, request, key_count = TEST_FILES[test_file] 162 | path = cdscommon.ensure_data(dataset, request, name="cds-" + test_file + "-{uuid}.grib") 163 | 164 | stream = cfgrib.FileStream(path) 165 | leader = stream.first() 166 | assert len(leader) == key_count 167 | assert sum(1 for _ in stream) == leader["count"] 168 | 169 | 170 | @pytest.mark.parametrize("test_file", TEST_FILES.keys()) 171 | def test_reanalysis_Dataset(test_file): 172 | dataset, request, key_count = TEST_FILES[test_file] 173 | path = cdscommon.ensure_data(dataset, request, name="cds-" + test_file + "-{uuid}.grib") 174 | 175 | res = cfgrib.xarray_store.open_dataset(path) 176 | res.to_netcdf(path[:-5] + ".nc") 177 | -------------------------------------------------------------------------------- /tests/cds_test_20_sf_ukmo.py: -------------------------------------------------------------------------------- 1 | import cdscommon 2 | import pytest 3 | 4 | import cfgrib 5 | 6 | TEST_FILES = { 7 | "seasonal-original-single-levels-ukmo": [ 8 | "seasonal-original-single-levels", 9 | { 10 | "originating_centre": "ukmo", 11 | "variable": "maximum_2m_temperature_in_the_last_24_hours", 12 | "year": "2018", 13 | "month": ["04", "05"], 14 | "day": [ 15 | "01", 16 | "02", 17 | "03", 18 | "04", 19 | "05", 20 | "06", 21 | "07", 22 | "08", 23 | "09", 24 | "10", 25 | "11", 26 | "12", 27 | "13", 28 | "14", 29 | "15", 30 | "16", 31 | "17", 32 | "18", 33 | "19", 34 | "20", 35 | "21", 36 | "22", 37 | "23", 38 | "24", 39 | "25", 40 | "26", 41 | "27", 42 | "28", 43 | "29", 44 | "30", 45 | "31", 46 | ], 47 | "leadtime_hour": ["24", "48"], 48 | "grid": ["3", "3"], 49 | "format": "grib", 50 | }, 51 | 193, 52 | ], 53 | "seasonal-original-pressure-levels-ukmo": [ 54 | "seasonal-original-pressure-levels", 55 | { 56 | "originating_centre": "ukmo", 57 | "variable": "temperature", 58 | "pressure_level": ["500", "850"], 59 | "year": "2018", 60 | "month": ["04", "05"], 61 | "day": [ 62 | "01", 63 | "02", 64 | "03", 65 | "04", 66 | "05", 67 | "06", 68 | "07", 69 | "08", 70 | "09", 71 | "10", 72 | "11", 73 | "12", 74 | "13", 75 | "14", 76 | "15", 77 | "16", 78 | "17", 79 | "18", 80 | "19", 81 | "20", 82 | "21", 83 | "22", 84 | "23", 85 | "24", 86 | "25", 87 | "26", 88 | "27", 89 | "28", 90 | "29", 91 | "30", 92 | "31", 93 | ], 94 | "leadtime_hour": ["24", "48"], 95 | "grid": ["3", "3"], 96 | "format": "grib", 97 | }, 98 | 193, 99 | ], 100 | "seasonal-postprocessed-single-levels-ukmo": [ 101 | "seasonal-postprocessed-single-levels", 102 | { 103 | "originating_centre": "ukmo", 104 | "variable": "maximum_2m_temperature_in_the_last_24_hours_anomaly", 105 | "product_type": "monthly_mean", 106 | "year": "2018", 107 | "month": ["04", "05"], 108 | "leadtime_month": ["1", "2"], 109 | "grid": ["3", "3"], 110 | "format": "grib", 111 | }, 112 | 212, 113 | ], 114 | "seasonal-monthly-single-levels-monthly_mean-ukmo": [ 115 | "seasonal-monthly-single-levels", 116 | { 117 | "originating_centre": "ukmo", 118 | "variable": "maximum_2m_temperature_in_the_last_24_hours", 119 | "product_type": "monthly_mean", 120 | "year": "2018", 121 | "month": ["04", "05"], 122 | "leadtime_month": ["1", "2"], 123 | "grid": ["3", "3"], 124 | "format": "grib", 125 | }, 126 | 212, 127 | ], 128 | "seasonal-monthly-single-levels-ensemble_mean-ukmo": [ 129 | "seasonal-monthly-single-levels", 130 | { 131 | "originating_centre": "ukmo", 132 | "variable": "maximum_2m_temperature_in_the_last_24_hours", 133 | "product_type": "ensemble_mean", 134 | "year": "2018", 135 | "month": ["04", "05"], 136 | "leadtime_month": ["1", "2"], 137 | "grid": ["3", "3"], 138 | "format": "grib", 139 | }, 140 | 212, 141 | ], 142 | "seasonal-monthly-single-levels-hindcast_climate_mean-ukmo": [ 143 | "seasonal-monthly-single-levels", 144 | { 145 | "originating_centre": "ukmo", 146 | "variable": "maximum_2m_temperature_in_the_last_24_hours", 147 | "product_type": "hindcast_climate_mean", 148 | "year": "2018", 149 | "month": ["04", "05"], 150 | "leadtime_month": ["1", "2"], 151 | "grid": ["3", "3"], 152 | "format": "grib", 153 | }, 154 | 212, 155 | ], 156 | } 157 | 158 | 159 | @pytest.mark.parametrize("test_file", TEST_FILES.keys()) 160 | def test_reanalysis_Stream(test_file): 161 | dataset, request, key_count = TEST_FILES[test_file] 162 | path = cdscommon.ensure_data(dataset, request, name="cds-" + test_file + "-{uuid}.grib") 163 | 164 | stream = cfgrib.FileStream(path) 165 | leader = stream.first() 166 | assert len(leader) == key_count 167 | assert sum(1 for _ in stream) == leader["count"] 168 | 169 | 170 | @pytest.mark.parametrize("test_file", TEST_FILES.keys()) 171 | def test_reanalysis_Dataset(test_file): 172 | dataset, request, key_count = TEST_FILES[test_file] 173 | path = cdscommon.ensure_data(dataset, request, name="cds-" + test_file + "-{uuid}.grib") 174 | 175 | res = cfgrib.xarray_store.open_dataset(path) 176 | res.to_netcdf(path[:-5] + ".nc") 177 | -------------------------------------------------------------------------------- /tests/cdscommon.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | import os 3 | import shutil 4 | import typing as T 5 | 6 | import cdsapi # type: ignore 7 | 8 | SAMPLE_DATA_FOLDER = os.path.join(os.path.dirname(__file__), "sample-data") 9 | EXTENSIONS = {"grib": ".grib", "netcdf": ".nc"} 10 | 11 | 12 | def ensure_data(dataset, request, folder=SAMPLE_DATA_FOLDER, name="{uuid}.grib"): 13 | # type: (str, T.Dict[str, T.Any], str, str) -> str 14 | request_text = str(sorted(request.items())).encode("utf-8") 15 | uuid = hashlib.sha3_224(request_text).hexdigest()[:10] 16 | format = request.get("format", "grib") 17 | ext = EXTENSIONS.get(format, ".bin") 18 | name = name.format(**locals()) 19 | path = os.path.join(SAMPLE_DATA_FOLDER, name) 20 | if not os.path.exists(path): 21 | c = cdsapi.Client() 22 | try: 23 | c.retrieve(dataset, request, target=path + ".tmp") 24 | shutil.move(path + ".tmp", path) 25 | except: 26 | os.unlink(path + ".tmp") 27 | raise 28 | return path 29 | -------------------------------------------------------------------------------- /tests/environment-macos-3.8.yml: -------------------------------------------------------------------------------- 1 | name: macos-3.8 2 | channels: 3 | - defaults 4 | - conda-forge 5 | dependencies: 6 | - attrs=23.1.0 7 | - aws-c-auth=0.7.3 8 | - aws-c-cal=0.6.1 9 | - aws-c-common=0.9.0 10 | - aws-c-compression=0.2.17 11 | - aws-c-event-stream=0.3.1 12 | - aws-c-http=0.7.11 13 | - aws-c-io=0.13.32 14 | - aws-c-mqtt=0.9.3 15 | - aws-c-s3=0.3.14 16 | - aws-c-sdkutils=0.1.12 17 | - aws-checksums=0.1.17 18 | - aws-crt-cpp=0.21.0 19 | - aws-sdk-cpp=1.10.57 20 | - blas=2.117 21 | - blas-devel=3.9.0 22 | - blosc=1.21.4 23 | - brotli-python=1.0.9 24 | - bzip2=1.0.8 25 | - c-ares=1.19.1 26 | - ca-certificates=2023.7.22 27 | - certifi=2023.7.22 28 | - cffi=1.15.1 29 | - charset-normalizer=3.2.0 30 | - click=8.1.6 31 | - colorama=0.4.6 32 | - coverage=7.3.0 33 | - eccodes=2.31.0 34 | - exceptiongroup=1.1.3 35 | - findlibs=0.0.5 36 | - hdf4=4.2.15 37 | - hdf5=1.14.1 38 | - icu=72.1 39 | - idna=3.4 40 | - iniconfig=2.0.0 41 | - jasper=4.0.0 42 | - krb5=1.21.2 43 | - libaec=1.0.6 44 | - libblas=3.9.0 45 | - libcblas=3.9.0 46 | - libcurl=8.2.1 47 | - libcxx=16.0.6 48 | - libedit=3.1.20191231 49 | - libev=4.33 50 | - libffi=3.4.4 51 | - libgfortran=5.0.0 52 | - libgfortran5=12.3.0 53 | - libiconv=1.17 54 | - libjpeg-turbo=2.1.5.1 55 | - liblapack=3.9.0 56 | - liblapacke=3.9.0 57 | - libnetcdf=4.9.2 58 | - libnghttp2=1.52.0 59 | - libopenblas=0.3.23 60 | - libpng=1.6.39 61 | - libsqlite=3.42.0 62 | - libssh2=1.11.0 63 | - libxml2=2.11.5 64 | - libzip=1.9.2 65 | - libzlib=1.2.13 66 | - llvm-openmp=16.0.6 67 | - lz4-c=1.9.4 68 | - mypy=0.812 69 | - mypy_extensions=0.4.3 70 | - ncurses=6.4 71 | - nomkl=1.0 72 | - numpy=1.24.3 73 | - numpy-base=1.24.3 74 | - openblas=0.3.23 75 | - openssl=3.1.2 76 | - packaging=23.1 77 | - pandas=1.5.3 78 | - pip=23.2.1 79 | - platformdirs=3.10.0 80 | - pluggy=1.2.0 81 | - pooch=1.7.0 82 | - psutil=5.9.0 83 | - pycparser=2.21 84 | - pysocks=1.7.1 85 | - pytest=7.4.0 86 | - pytest-cov=4.1.0 87 | - python=3.8.17 88 | - python-dateutil=2.8.2 89 | - python-eccodes=1.6.0 90 | - python_abi=3.8 91 | - pytz=2023.3 92 | - readline=8.2 93 | - requests=2.32.0 94 | - scipy=1.10.1 95 | - setuptools=68.0.0 96 | - six=1.16.0 97 | - snappy=1.1.10 98 | - sqlite=3.41.2 99 | - tk=8.6.12 100 | - toml=0.10.2 101 | - tomli=2.0.1 102 | - typed-ast=1.4.3 103 | - typing-extensions=4.7.1 104 | - typing_extensions=4.7.1 105 | - urllib3=2.0.4 106 | - wheel=0.38.4 107 | - xarray=2023.1.0 108 | - xz=5.4.2 109 | - zlib=1.2.13 110 | - zstd=1.5.2 111 | prefix: /usr/local/miniconda/envs/macos-3.8 112 | -------------------------------------------------------------------------------- /tests/environment-ubuntu-3.10.yml: -------------------------------------------------------------------------------- 1 | name: ubuntu-3.10 2 | channels: 3 | - defaults 4 | - conda-forge 5 | dependencies: 6 | - _libgcc_mutex=0.1 7 | - _openmp_mutex=4.5 8 | - attrs=23.1.0 9 | - bzip2=1.0.8 10 | - c-ares=1.17.2 11 | - ca-certificates=2023.5.7 12 | - cffi=1.15.1 13 | - click=8.1.3 14 | - colorama=0.4.6 15 | - curl=7.79.1 16 | - exceptiongroup=1.1.1 17 | - findlibs=0.0.5 18 | - freeglut=3.2.1 19 | - hdf4=4.2.15 20 | - hdf5=1.12.1 21 | - importlib-metadata=6.6.0 22 | - iniconfig=2.0.0 23 | - jasper=2.0.14 24 | - jpeg=9d 25 | - keyutils=1.6.1 26 | - krb5=1.19.3 27 | - ld_impl_linux-64=2.40 28 | - libaec=1.0.6 29 | - libblas=3.9.0 30 | - libcblas=3.9.0 31 | - libcurl=7.79.1 32 | - libedit=3.1.20191231 33 | - libev=4.33 34 | - libffi=3.4.2 35 | - libgcc-ng=12.2.0 36 | - libgfortran-ng=11.2.0 37 | - libgfortran5=11.2.0 38 | - libglu=9.0.0 39 | - libgomp=12.2.0 40 | - liblapack=3.9.0 41 | - libnetcdf=4.8.1 42 | - libnghttp2=1.43.0 43 | - libnsl=2.0.0 44 | - libopenblas=0.3.21 45 | - libpng=1.6.37 46 | - libsqlite=3.42.0 47 | - libssh2=1.10.0 48 | - libstdcxx-ng=12.2.0 49 | - libuuid=2.38.1 50 | - libxcb=1.13 51 | - libzip=1.9.2 52 | - libzlib=1.2.13 53 | - ncurses=6.3 54 | - numpy=1.24.3 55 | - openssl=3.1.0 56 | - packaging=23.1 57 | - pandas=2.0.1 58 | - pip=23.1.2 59 | - pluggy=1.0.0 60 | - pthread-stubs=0.4 61 | - pycparser=2.21 62 | - pytest=7.3.1 63 | - python=3.10.11 64 | - python-dateutil=2.8.2 65 | - python-eccodes=1.6.0 66 | - python-tzdata=2023.3 67 | - python_abi=3.10 68 | - pytz=2023.3 69 | - readline=8.2 70 | - setuptools=67.7.2 71 | - six=1.16.0 72 | - tk=8.6.12 73 | - tomli=2.0.1 74 | - tzdata=2023c 75 | - wheel=0.40.0 76 | - xarray=2023.4.2 77 | - xorg-fixesproto=5.0 78 | - xorg-inputproto=2.3.2 79 | - xorg-kbproto=1.0.7 80 | - xorg-libx11=1.7.2 81 | - xorg-libxau=1.0.9 82 | - xorg-libxdmcp=1.1.3 83 | - xorg-libxext=1.3.4 84 | - xorg-libxfixes=5.0.3 85 | - xorg-libxi=1.7.10 86 | - xorg-xextproto=7.3.0 87 | - xorg-xproto=7.0.31 88 | - xz=5.2.6 89 | - zipp=3.15.0 90 | - zlib=1.2.13 91 | prefix: /usr/share/miniconda/envs/ubuntu-3.10 92 | -------------------------------------------------------------------------------- /tests/environment-ubuntu-3.7.yml: -------------------------------------------------------------------------------- 1 | name: ubuntu-3.7 2 | channels: 3 | - defaults 4 | - conda-forge 5 | dependencies: 6 | - _libgcc_mutex=0.1 7 | - _openmp_mutex=4.5 8 | - attrs=21.4.0 9 | - blas=2.106 10 | - bottleneck=1.3.2 11 | - bzip2=1.0.8 12 | - c-ares=1.18.1 13 | - ca-certificates=2021.10.26 14 | - certifi=2021.10.8 15 | - cffi=1.15.0 16 | - click=8.0.3 17 | - coverage=6.2 18 | - curl=7.80.0 19 | - eccodes=2.31.0 20 | - findlibs=0.0.2 21 | - freeglut=3.2.1 22 | - hdf4=4.2.15 23 | - hdf5=1.12.1 24 | - importlib-metadata=4.8.2 25 | - importlib_metadata=4.8.2 26 | - iniconfig=1.1.1 27 | - jasper=2.0.33 28 | - jpeg=9d 29 | - krb5=1.19.2 30 | - ld_impl_linux-64=2.35.1 31 | - libaec=1.0.6 32 | - libblas=3.9.0 33 | - libcblas=3.9.0 34 | - libcurl=7.80.0 35 | - libedit=3.1.20210910 36 | - libev=4.33 37 | - libffi=3.3 38 | - libgcc-ng=11.2.0 39 | - libgfortran-ng=11.2.0 40 | - libgfortran5=11.2.0 41 | - libglu=9.0.0 42 | - liblapack=3.9.0 43 | - liblapacke=3.9.0 44 | - libnetcdf=4.8.1 45 | - libnghttp2=1.46.0 46 | - libopenblas=0.3.12 47 | - libpng=1.6.37 48 | - libssh2=1.9.0 49 | - libstdcxx-ng=11.2.0 50 | - libxcb=1.14 51 | - libzip=1.8.0 52 | - llvm-openmp=12.0.1 53 | - mypy=0.812 54 | - mypy_extensions=0.4.3 55 | - ncurses=6.3 56 | - nomkl=3.0 57 | - numexpr=2.8.1 58 | - numpy=1.21.2 59 | - numpy-base=1.21.2 60 | - openssl=1.1.1m 61 | - packaging=21.3 62 | - pandas=1.3.5 63 | - pip=21.2.2 64 | - pluggy=1.0.0 65 | - psutil=5.8.0 66 | - py=1.11.0 67 | - pycparser=2.21 68 | - pyparsing=3.0.4 69 | - pytest=6.2.5 70 | - pytest-cov=3.0.0 71 | - python=3.7.11 72 | - python-dateutil=2.8.2 73 | - python-eccodes=1.6.0 74 | - python_abi=3.7 75 | - pytz=2021.3 76 | - readline=8.1.2 77 | - scipy=1.7.3 78 | - setuptools=58.0.4 79 | - six=1.16.0 80 | - sqlite=3.37.0 81 | - tk=8.6.11 82 | - toml=0.10.2 83 | - tomli=1.2.2 84 | - typed-ast=1.4.3 85 | - typing_extensions=3.10.0.2 86 | - wheel=0.37.1 87 | - xarray=0.20.2 88 | - xorg-fixesproto=5.0 89 | - xorg-inputproto=2.3.2 90 | - xorg-kbproto=1.0.7 91 | - xorg-libx11=1.7.2 92 | - xorg-libxau=1.0.9 93 | - xorg-libxext=1.3.4 94 | - xorg-libxfixes=5.0.3 95 | - xorg-libxi=1.7.10 96 | - xorg-xextproto=7.3.0 97 | - xorg-xproto=7.0.31 98 | - xz=5.2.5 99 | - zipp=3.7.0 100 | - zlib=1.2.11 101 | prefix: /usr/share/miniconda/envs/ubuntu-3.7 102 | -------------------------------------------------------------------------------- /tests/environment-ubuntu-3.8.yml: -------------------------------------------------------------------------------- 1 | name: ubuntu-3.8 2 | channels: 3 | - defaults 4 | - conda-forge 5 | dependencies: 6 | - _libgcc_mutex=0.1 7 | - _openmp_mutex=4.5 8 | - attrs=23.1.0 9 | - aws-c-auth=0.7.3 10 | - aws-c-cal=0.6.1 11 | - aws-c-common=0.9.0 12 | - aws-c-compression=0.2.17 13 | - aws-c-event-stream=0.3.1 14 | - aws-c-http=0.7.11 15 | - aws-c-io=0.13.32 16 | - aws-c-mqtt=0.9.3 17 | - aws-c-s3=0.3.14 18 | - aws-c-sdkutils=0.1.12 19 | - aws-checksums=0.1.17 20 | - aws-crt-cpp=0.21.0 21 | - aws-sdk-cpp=1.10.57 22 | - blas=2.117 23 | - blas-devel=3.9.0 24 | - blosc=1.21.4 25 | - bottleneck=1.3.7 26 | - brotli-python=1.0.9 27 | - bzip2=1.0.8 28 | - c-ares=1.19.1 29 | - ca-certificates=2023.7.22 30 | - certifi=2023.7.22 31 | - cffi=1.15.1 32 | - charset-normalizer=3.2.0 33 | - click=8.1.6 34 | - colorama=0.4.6 35 | - coverage=7.3.0 36 | - curl=8.2.1 37 | - eccodes=2.31.0 38 | - exceptiongroup=1.1.3 39 | - findlibs=0.0.5 40 | - freeglut=3.2.2 41 | - hdf4=4.2.15 42 | - hdf5=1.14.1 43 | - icu=72.1 44 | - idna=3.4 45 | - importlib-metadata=6.8.0 46 | - importlib_metadata=6.8.0 47 | - iniconfig=2.0.0 48 | - jasper=4.0.0 49 | - jpeg=9e 50 | - keyutils=1.6.1 51 | - krb5=1.21.2 52 | - ld_impl_linux-64=2.40 53 | - libaec=1.0.6 54 | - libblas=3.9.0 55 | - libcblas=3.9.0 56 | - libcurl=8.2.1 57 | - libedit=3.1.20191231 58 | - libev=4.33 59 | - libffi=3.4.2 60 | - libgcc-ng=13.1.0 61 | - libgfortran-ng=13.1.0 62 | - libgfortran5=13.1.0 63 | - libglu=9.0.0 64 | - libiconv=1.17 65 | - liblapack=3.9.0 66 | - liblapacke=3.9.0 67 | - libnetcdf=4.9.2 68 | - libnghttp2=1.52.0 69 | - libnsl=2.0.0 70 | - libopenblas=0.3.23 71 | - libpng=1.6.39 72 | - libsqlite=3.42.0 73 | - libssh2=1.11.0 74 | - libstdcxx-ng=13.1.0 75 | - libxcb=1.15 76 | - libxml2=2.11.5 77 | - libzip=1.9.2 78 | - libzlib=1.2.13 79 | - llvm-openmp=16.0.6 80 | - lz4-c=1.9.4 81 | - mypy=1.5.1 82 | - mypy_extensions=1.0.0 83 | - ncurses=6.4 84 | - nomkl=1.0 85 | - numexpr=2.8.1 86 | - numpy=1.21.2 87 | - numpy-base=1.21.2 88 | - openblas=0.3.23 89 | - openssl=3.1.2 90 | - packaging=23.1 91 | - pandas=1.5.3 92 | - pip=23.2.1 93 | - platformdirs=3.10.0 94 | - pluggy=1.2.0 95 | - pooch=1.7.0 96 | - psutil=5.9.5 97 | - pthread-stubs=0.4 98 | - py=1.11.0 99 | - pycparser=2.21 100 | - pyparsing=3.1.1 101 | - pysocks=1.7.1 102 | - pytest=7.4.0 103 | - pytest-cov=4.1.0 104 | - python=3.8.12 105 | - python-dateutil=2.8.2 106 | - python-eccodes=1.5.1 107 | - python_abi=3.8 108 | - pytz=2023.3 109 | - readline=8.2 110 | - requests=2.32.0 111 | - s2n=1.3.48 112 | - scipy=1.10.1 113 | - setuptools=68.0.0 114 | - six=1.16.0 115 | - snappy=1.1.10 116 | - sqlite=3.42.0 117 | - tk=8.6.12 118 | - toml=0.10.2 119 | - tomli=2.0.1 120 | - typed-ast=1.5.5 121 | - typing-extensions=4.7.1 122 | - typing_extensions=4.7.1 123 | - urllib3=2.0.4 124 | - wheel=0.41.1 125 | - xarray=2023.1.0 126 | - xorg-fixesproto=5.0 127 | - xorg-inputproto=2.3.2 128 | - xorg-kbproto=1.0.7 129 | - xorg-libx11=1.8.6 130 | - xorg-libxau=1.0.11 131 | - xorg-libxdmcp=1.1.3 132 | - xorg-libxext=1.3.4 133 | - xorg-libxfixes=5.0.3 134 | - xorg-libxi=1.7.10 135 | - xorg-xextproto=7.3.0 136 | - xorg-xproto=7.0.31 137 | - xz=5.2.6 138 | - zipp=3.16.2 139 | - zlib=1.2.13 140 | - zstd=1.5.2 141 | prefix: /usr/share/miniconda/envs/ubuntu-3.8 142 | -------------------------------------------------------------------------------- /tests/environment-ubuntu-3.9-minimal.yml: -------------------------------------------------------------------------------- 1 | name: ubuntu-3.9-minimal 2 | channels: 3 | - defaults 4 | - conda-forge 5 | dependencies: 6 | - _libgcc_mutex=0.1 7 | - _openmp_mutex=4.5 8 | - attrs=23.1.0 9 | - aws-c-auth=0.7.3 10 | - aws-c-cal=0.6.1 11 | - aws-c-common=0.9.0 12 | - aws-c-compression=0.2.17 13 | - aws-c-event-stream=0.3.1 14 | - aws-c-http=0.7.11 15 | - aws-c-io=0.13.32 16 | - aws-c-mqtt=0.9.3 17 | - aws-c-s3=0.3.14 18 | - aws-c-sdkutils=0.1.12 19 | - aws-checksums=0.1.17 20 | - aws-crt-cpp=0.21.0 21 | - aws-sdk-cpp=1.10.57 22 | - blas=2.117 23 | - blas-devel=3.9.0 24 | - blosc=1.21.4 25 | - bzip2=1.0.8 26 | - c-ares=1.19.1 27 | - ca-certificates=2023.7.22 28 | - certifi=2023.7.22 29 | - cffi=1.15.1 30 | - click=8.1.7 31 | - colorama=0.4.6 32 | - coverage=7.3.0 33 | - curl=8.2.1 34 | - eccodes=2.31.0 35 | - exceptiongroup=1.1.3 36 | - findlibs=0.0.5 37 | - freeglut=3.2.2 38 | - hdf4=4.2.15 39 | - hdf5=1.14.1 40 | - icu=72.1 41 | - iniconfig=2.0.0 42 | - jasper=4.0.0 43 | - jpeg=9e 44 | - keyutils=1.6.1 45 | - krb5=1.21.2 46 | - ld_impl_linux-64=2.40 47 | - libaec=1.0.6 48 | - libblas=3.9.0 49 | - libcblas=3.9.0 50 | - libcurl=8.2.1 51 | - libedit=3.1.20191231 52 | - libev=4.33 53 | - libffi=3.4.2 54 | - libgcc-ng=13.1.0 55 | - libgfortran-ng=13.1.0 56 | - libgfortran5=13.1.0 57 | - libglu=9.0.0 58 | - libiconv=1.17 59 | - liblapack=3.9.0 60 | - liblapacke=3.9.0 61 | - libnetcdf=4.9.2 62 | - libnghttp2=1.52.0 63 | - libopenblas=0.3.23 64 | - libpng=1.6.39 65 | - libsqlite=3.42.0 66 | - libssh2=1.11.0 67 | - libstdcxx-ng=13.1.0 68 | - libxcb=1.15 69 | - libxml2=2.11.5 70 | - libzip=1.9.2 71 | - libzlib=1.2.13 72 | - llvm-openmp=16.0.6 73 | - lz4-c=1.9.4 74 | - ncurses=6.4 75 | - nomkl=1.0 76 | - numpy=1.21.2 77 | - numpy-base=1.21.2 78 | - openblas=0.3.23 79 | - openssl=3.1.2 80 | - packaging=23.1 81 | - pip=23.2.1 82 | - pluggy=1.2.0 83 | - pthread-stubs=0.4 84 | - py=1.11.0 85 | - pycparser=2.21 86 | - pyparsing=3.1.1 87 | - pytest=7.4.0 88 | - pytest-cov=4.1.0 89 | - python=3.9.7 90 | - python-eccodes=1.5.1 91 | - python_abi=3.9 92 | - readline=8.2 93 | - s2n=1.3.48 94 | - setuptools=68.0.0 95 | - snappy=1.1.10 96 | - sqlite=3.42.0 97 | - tk=8.6.12 98 | - toml=0.10.2 99 | - tomli=2.0.1 100 | - tzdata=2023c 101 | - wheel=0.41.1 102 | - xorg-fixesproto=5.0 103 | - xorg-inputproto=2.3.2 104 | - xorg-kbproto=1.0.7 105 | - xorg-libx11=1.8.6 106 | - xorg-libxau=1.0.11 107 | - xorg-libxdmcp=1.1.3 108 | - xorg-libxext=1.3.4 109 | - xorg-libxfixes=5.0.3 110 | - xorg-libxi=1.7.10 111 | - xorg-xextproto=7.3.0 112 | - xorg-xproto=7.0.31 113 | - xz=5.2.6 114 | - zlib=1.2.13 115 | - zstd=1.5.2 116 | prefix: /usr/share/miniconda/envs/ubuntu-3.9-minimal 117 | -------------------------------------------------------------------------------- /tests/environment-windows-3.8.yml: -------------------------------------------------------------------------------- 1 | name: windows-3.8 2 | channels: 3 | - defaults 4 | - conda-forge 5 | dependencies: 6 | - atomicwrites=1.4.0 7 | - attrs=21.4.0 8 | - bzip2=1.0.8 9 | - ca-certificates=2021.10.26 10 | - certifi=2021.10.8 11 | - cffi=1.15.0 12 | - click=8.0.3 13 | - colorama=0.4.4 14 | - coverage=5.5 15 | - curl=7.80.0 16 | - eccodes=2.24.2 17 | - findlibs=0.0.2 18 | - freeglut=3.2.1 19 | - hdf4=4.2.15 20 | - hdf5=1.12.1 21 | - importlib-metadata=4.8.2 22 | - importlib_metadata=4.8.2 23 | - iniconfig=1.1.1 24 | - jasper=2.0.33 25 | - jpeg=9d 26 | - libblas=3.9.0 27 | - libcblas=3.9.0 28 | - libcurl=7.80.0 29 | - liblapack=3.9.0 30 | - libnetcdf=4.8.1 31 | - libpng=1.6.37 32 | - libssh2=1.9.0 33 | - libzip=1.8.0 34 | - m2w64-gcc-libgfortran=5.3.0 35 | - m2w64-gcc-libs=5.3.0 36 | - m2w64-gcc-libs-core=5.3.0 37 | - m2w64-gmp=6.1.0 38 | - m2w64-libwinpthread-git=5.0.0.4634.697f757 39 | - more-itertools=8.12.0 40 | - msys2-conda-epoch=20160418 41 | - mypy=0.812 42 | - mypy_extensions=0.4.3 43 | - nomkl=1.0 44 | - numpy=1.22.1 45 | - openssl=1.1.1m 46 | - packaging=21.3 47 | - pandas=1.2.5 48 | - pip=21.2.2 49 | - pluggy=0.13.1 50 | - psutil=5.8.0 51 | - py=1.11.0 52 | - pycparser=2.21 53 | - pyparsing=3.0.4 54 | - pytest=6.2.4 55 | - pytest-cov=3.0.0 56 | - python=3.8.12 57 | - python-dateutil=2.8.2 58 | - python-eccodes=1.4.0 59 | - python_abi=3.8 60 | - pytz=2021.3 61 | - scipy=1.7.3 62 | - setuptools=58.0.4 63 | - six=1.16.0 64 | - sqlite=3.37.0 65 | - toml=0.10.2 66 | - tomli=1.2.2 67 | - typed-ast=1.4.3 68 | - typing_extensions=3.10.0.2 69 | - vc=14.2 70 | - vs2015_runtime=14.27.29016 71 | - wheel=0.37.1 72 | - wincertstore=0.2 73 | - xarray=0.20.2 74 | - zipp=3.7.0 75 | - zlib=1.2.11 76 | prefix: C:\Miniconda\envs\windows-3.8 77 | -------------------------------------------------------------------------------- /tests/sample-data/alternate-scanning.grib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/tests/sample-data/alternate-scanning.grib -------------------------------------------------------------------------------- /tests/sample-data/cams-egg4-monthly.grib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/tests/sample-data/cams-egg4-monthly.grib -------------------------------------------------------------------------------- /tests/sample-data/cfrzr_and_cprat.grib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/tests/sample-data/cfrzr_and_cprat.grib -------------------------------------------------------------------------------- /tests/sample-data/cfrzr_and_cprat_0s.grib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/tests/sample-data/cfrzr_and_cprat_0s.grib -------------------------------------------------------------------------------- /tests/sample-data/ds.waveh.5.grib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/tests/sample-data/ds.waveh.5.grib -------------------------------------------------------------------------------- /tests/sample-data/era5-levels-corrupted.grib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/tests/sample-data/era5-levels-corrupted.grib -------------------------------------------------------------------------------- /tests/sample-data/era5-levels-members.grib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/tests/sample-data/era5-levels-members.grib -------------------------------------------------------------------------------- /tests/sample-data/era5-levels-members.nc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/tests/sample-data/era5-levels-members.nc -------------------------------------------------------------------------------- /tests/sample-data/era5-single-level-scalar-time.grib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/tests/sample-data/era5-single-level-scalar-time.grib -------------------------------------------------------------------------------- /tests/sample-data/fields_with_missing_values.grib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/tests/sample-data/fields_with_missing_values.grib -------------------------------------------------------------------------------- /tests/sample-data/forecast_monthly_ukmo.grib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/tests/sample-data/forecast_monthly_ukmo.grib -------------------------------------------------------------------------------- /tests/sample-data/hpa_and_pa.grib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/tests/sample-data/hpa_and_pa.grib -------------------------------------------------------------------------------- /tests/sample-data/lambert_grid.grib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/tests/sample-data/lambert_grid.grib -------------------------------------------------------------------------------- /tests/sample-data/multi_param_on_multi_dims.grib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/tests/sample-data/multi_param_on_multi_dims.grib -------------------------------------------------------------------------------- /tests/sample-data/ncep-seasonal-monthly.grib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/tests/sample-data/ncep-seasonal-monthly.grib -------------------------------------------------------------------------------- /tests/sample-data/reduced_gg.grib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/tests/sample-data/reduced_gg.grib -------------------------------------------------------------------------------- /tests/sample-data/regular_gg_ml.grib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/tests/sample-data/regular_gg_ml.grib -------------------------------------------------------------------------------- /tests/sample-data/regular_gg_ml_g2.grib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/tests/sample-data/regular_gg_ml_g2.grib -------------------------------------------------------------------------------- /tests/sample-data/regular_gg_pl.grib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/tests/sample-data/regular_gg_pl.grib -------------------------------------------------------------------------------- /tests/sample-data/regular_gg_sfc.grib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/tests/sample-data/regular_gg_sfc.grib -------------------------------------------------------------------------------- /tests/sample-data/regular_gg_wrong_increment.grib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/tests/sample-data/regular_gg_wrong_increment.grib -------------------------------------------------------------------------------- /tests/sample-data/regular_ll_msl.grib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/tests/sample-data/regular_ll_msl.grib -------------------------------------------------------------------------------- /tests/sample-data/regular_ll_sfc.grib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/tests/sample-data/regular_ll_sfc.grib -------------------------------------------------------------------------------- /tests/sample-data/regular_ll_wrong_increment.grib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/tests/sample-data/regular_ll_wrong_increment.grib -------------------------------------------------------------------------------- /tests/sample-data/scanning_mode_64.grib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/tests/sample-data/scanning_mode_64.grib -------------------------------------------------------------------------------- /tests/sample-data/single_gridpoint.grib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/tests/sample-data/single_gridpoint.grib -------------------------------------------------------------------------------- /tests/sample-data/soil-surface-level-mix.grib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/tests/sample-data/soil-surface-level-mix.grib -------------------------------------------------------------------------------- /tests/sample-data/spherical_harmonics.grib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/tests/sample-data/spherical_harmonics.grib -------------------------------------------------------------------------------- /tests/sample-data/step_60m.grib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/tests/sample-data/step_60m.grib -------------------------------------------------------------------------------- /tests/sample-data/t_analysis_and_fc_0.grib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/tests/sample-data/t_analysis_and_fc_0.grib -------------------------------------------------------------------------------- /tests/sample-data/t_on_different_level_types.grib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/tests/sample-data/t_on_different_level_types.grib -------------------------------------------------------------------------------- /tests/sample-data/tp_on_different_grid_resolutions.grib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/tests/sample-data/tp_on_different_grid_resolutions.grib -------------------------------------------------------------------------------- /tests/sample-data/uv_on_different_levels.grib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ecmwf/cfgrib/44ecc6228edd1d430712e61f5074fcaa25ae56c8/tests/sample-data/uv_on_different_levels.grib -------------------------------------------------------------------------------- /tests/test_10_cfunits.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | xr = pytest.importorskip("xarray") # noqa 4 | 5 | from cf2cdm import cfunits 6 | 7 | 8 | def test_are_convertible() -> None: 9 | assert cfunits.are_convertible("K", "K") 10 | assert cfunits.are_convertible("m", "meters") 11 | assert cfunits.are_convertible("hPa", "Pa") 12 | assert not cfunits.are_convertible("m", "Pa") 13 | -------------------------------------------------------------------------------- /tests/test_20_cfcoords.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import typing as T 3 | 4 | import numpy as np 5 | import pytest 6 | 7 | pytest.importorskip("xarray") # noqa 8 | 9 | import xarray as xr 10 | 11 | from cf2cdm import cfcoords 12 | 13 | 14 | @pytest.fixture 15 | def da1() -> xr.Dataset: 16 | latitude = [0.5, 0.0] 17 | longitude = [10.0, 10.5] 18 | time = ["2017-12-01T00:00:00", "2017-12-01T12:00:00", "2017-12-02T00:00:00"] 19 | level = [950, 500] 20 | data = xr.DataArray( 21 | np.zeros((2, 2, 3, 2), dtype="float32"), 22 | coords=[ 23 | ("lat", latitude, {"units": "degrees_north"}), 24 | ("lon", longitude, {"units": "degrees_east"}), 25 | ( 26 | "ref_time", 27 | np.array(time, dtype="datetime64[ns]"), 28 | {"standard_name": "forecast_reference_time"}, 29 | ), 30 | ("level", np.array(level), {"units": "hPa"}), 31 | ], 32 | ) 33 | return data.to_dataset(name="da1") 34 | 35 | 36 | @pytest.fixture 37 | def da2() -> xr.Dataset: 38 | latitude = [0.5, 0.0] 39 | longitude = [10.0, 10.5] 40 | time = ["2017-12-01T00:00:00", "2017-12-01T12:00:00", "2017-12-02T00:00:00"] 41 | level = [950, 500] 42 | data = xr.DataArray( 43 | np.zeros((2, 2, 3, 2), dtype="float32"), 44 | coords=[ 45 | ("lat", latitude, {"units": "degrees_north"}), 46 | ("lon", longitude, {"units": "degrees_east"}), 47 | ("time", np.array(time, dtype="datetime64[ns]")), 48 | ("level", np.array(level), {"units": "hPa"}), 49 | ], 50 | ) 51 | return data.to_dataset(name="da2") 52 | 53 | 54 | @pytest.fixture 55 | def da3() -> xr.Dataset: 56 | latitude = [0.5, 0.0] 57 | longitude = [10.0, 10.5] 58 | step = [0, 24, 48] 59 | time = ["2017-12-01T00:00:00", "2017-12-01T12:00:00"] 60 | level = [950, 500] 61 | data = xr.DataArray( 62 | np.zeros((2, 2, 3, 2, 2), dtype="float32"), 63 | coords=[ 64 | ("lat", latitude, {"units": "degrees_north"}), 65 | ("lon", longitude, {"units": "degrees_east"}), 66 | ("step", np.array(step, dtype="timedelta64[h]"), {"standard_name": "forecast_period"}), 67 | ( 68 | "ref_time", 69 | np.array(time, dtype="datetime64[ns]"), 70 | {"standard_name": "forecast_reference_time"}, 71 | ), 72 | ("time", np.array(level), {"units": "hPa"}), 73 | ], 74 | ) 75 | 76 | return data.to_dataset(name="da3") 77 | 78 | 79 | def test_match_values() -> None: 80 | mapping = {"callable": len, "int": 1} # type: T.Dict[T.Hashable, T.Any] 81 | res = cfcoords.match_values(callable, mapping) 82 | 83 | assert res == ["callable"] 84 | 85 | 86 | def test_translate_coord_direction(da1: xr.Dataset) -> None: 87 | res = cfcoords.translate_coord_direction(da1, "lat", "increasing") 88 | assert res.lat.values[-1] > res.lat.values[0] 89 | 90 | res = cfcoords.translate_coord_direction(da1, "lat", "decreasing") 91 | assert res.lat.values[-1] < res.lat.values[0] 92 | 93 | res = cfcoords.translate_coord_direction(da1, "lon", "decreasing") 94 | assert res.lon.values[-1] < res.lon.values[0] 95 | 96 | res = cfcoords.translate_coord_direction(da1, "lon", "increasing") 97 | assert res.lon.values[-1] > res.lon.values[0] 98 | 99 | res = cfcoords.translate_coord_direction(da1.isel(lon=0), "lon", "increasing") 100 | assert len(res.lon.shape) == 0 101 | 102 | with pytest.raises(ValueError): 103 | cfcoords.translate_coord_direction(da1, "lat", "wrong") 104 | 105 | 106 | def test_coord_translator(da1: xr.Dataset) -> None: 107 | res = cfcoords.coord_translator("level", "hPa", "decreasing", lambda x: False, "lvl", da1) 108 | assert da1.equals(res) 109 | 110 | with pytest.raises(ValueError): 111 | cfcoords.coord_translator("level", "hPa", "decreasing", lambda x: True, "lvl", da1) 112 | 113 | res = cfcoords.coord_translator("level", "hPa", "decreasing", cfcoords.is_isobaric, "lvl", da1) 114 | assert da1.equals(res) 115 | 116 | with pytest.raises(ValueError): 117 | cfcoords.coord_translator("level", "hPa", "decreasing", cfcoords.is_latitude, "lvl", da1) 118 | 119 | res = cfcoords.coord_translator("level", "Pa", "decreasing", cfcoords.is_isobaric, "lvl", da1) 120 | assert not da1.equals(res) 121 | 122 | res = cfcoords.coord_translator("step", "h", "increasing", cfcoords.is_step, "step", da1) 123 | assert da1.equals(res) 124 | 125 | 126 | def test_translate_coords(da1: xr.Dataset, da2: xr.Dataset, da3: xr.Dataset) -> None: 127 | res = cfcoords.translate_coords(da1) 128 | 129 | assert "latitude" in res.coords 130 | assert "longitude" in res.coords 131 | assert "time" in res.coords 132 | 133 | res = cfcoords.translate_coords(da2) 134 | 135 | assert "latitude" in res.coords 136 | assert "longitude" in res.coords 137 | assert "valid_time" in res.coords 138 | 139 | res = cfcoords.translate_coords(da3, errors="ignore") 140 | assert "latitude" in res.coords 141 | assert "longitude" in res.coords 142 | 143 | 144 | def test_translate_coords_errors(da3: xr.Dataset) -> None: 145 | cfcoords.translate_coords(da3) 146 | cfcoords.translate_coords(da3, errors="ignore") 147 | with pytest.raises(RuntimeError): 148 | cfcoords.translate_coords(da3, errors="raise") 149 | 150 | DATA_MODEL = {"config": {"preferred_time_dimension": "valid_time"}} 151 | cfcoords.translate_coords(da3, DATA_MODEL) 152 | 153 | da3_fail = da3.drop_vars("time") 154 | cfcoords.translate_coords(da3_fail, DATA_MODEL) 155 | cfcoords.translate_coords(da3_fail, DATA_MODEL, errors="ignore") 156 | -------------------------------------------------------------------------------- /tests/test_20_main.py: -------------------------------------------------------------------------------- 1 | import click.testing 2 | 3 | from cfgrib import __main__ 4 | 5 | 6 | def test_cfgrib_cli_selfcheck() -> None: 7 | runner = click.testing.CliRunner() 8 | 9 | res = runner.invoke(__main__.cfgrib_cli, ["selfcheck"]) 10 | 11 | assert res.exit_code == 0 12 | assert "Your system is ready." in res.output 13 | 14 | res = runner.invoke(__main__.cfgrib_cli, ["non-existent-command"]) 15 | assert res.exit_code == 2 16 | -------------------------------------------------------------------------------- /tests/test_20_messages.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | 3 | import eccodes # type: ignore 4 | import numpy as np 5 | import py 6 | import pytest 7 | 8 | from cfgrib import messages 9 | 10 | SAMPLE_DATA_FOLDER = os.path.join(os.path.dirname(__file__), "sample-data") 11 | TEST_DATA = os.path.join(SAMPLE_DATA_FOLDER, "era5-levels-members.grib") 12 | 13 | 14 | def test_Message_read() -> None: 15 | with open(TEST_DATA, "rb") as file: 16 | res1 = messages.Message.from_file(file) 17 | 18 | assert res1.message_get("paramId") == 129 19 | assert res1["paramId"] == 129 20 | assert isinstance(res1["paramId:float"], float) 21 | assert res1["centre"] == "ecmf" 22 | assert res1["centre:int"] == 98 23 | assert list(res1)[0] == "globalDomain" 24 | assert list(res1.message_grib_keys("time"))[0] == "dataDate" 25 | assert "paramId" in res1 26 | assert len(res1) > 100 27 | 28 | with pytest.raises(KeyError): 29 | res1["non-existent-key"] 30 | 31 | assert res1.message_get("non-existent-key", default=1) == 1 32 | 33 | res2 = messages.Message.from_message(res1) 34 | for (k2, v2), (k1, v1) in zip(res2.items(), res1.items()): 35 | assert k2 == k1 36 | if isinstance(v2, np.ndarray) or isinstance(v1, np.ndarray): 37 | assert np.allclose(v2, v1) 38 | else: 39 | assert v2 == v1 40 | 41 | with open(TEST_DATA, "rb") as file: 42 | with pytest.raises(EOFError): 43 | while True: 44 | messages.Message.from_file(file) 45 | 46 | 47 | def test_Message_write(tmpdir: py.path.local) -> None: 48 | res = messages.Message.from_sample_name("regular_ll_pl_grib2") 49 | assert res["gridType"] == "regular_ll" 50 | 51 | res.message_set("Ni", 20) 52 | assert res["Ni"] == 20 53 | 54 | res["iDirectionIncrementInDegrees"] = 1.0 55 | assert res["iDirectionIncrementInDegrees"] == 1.0 56 | 57 | res.message_set("gridType", "reduced_gg") 58 | assert res["gridType"] == "reduced_gg" 59 | 60 | res["pl"] = [2.0, 3.0] 61 | assert np.allclose(res["pl"], [2.0, 3.0]) 62 | 63 | # warn on errors 64 | res["centreDescription"] = "DUMMY" 65 | assert res["centreDescription"] != "DUMMY" 66 | res["edition"] = -1 67 | assert res["edition"] != -1 68 | 69 | # ignore errors 70 | res.errors = "ignore" 71 | res["centreDescription"] = "DUMMY" 72 | assert res["centreDescription"] != "DUMMY" 73 | 74 | # raise errors 75 | res.errors = "raise" 76 | with pytest.raises(KeyError): 77 | res["centreDescription"] = "DUMMY" 78 | 79 | with pytest.raises(NotImplementedError): 80 | del res["gridType"] 81 | 82 | out = tmpdir.join("test.grib") 83 | with open(str(out), "wb") as file: 84 | res.write(file) 85 | 86 | 87 | def test_ComputedKeysMessage_read() -> None: 88 | computed_keys = { 89 | "ref_time": (lambda m: str(m["dataDate"]) + str(m["dataTime"]), None), 90 | "error_key": (lambda m: 1 / 0, None), 91 | "centre": (lambda m: -1, lambda m, v: None), 92 | } 93 | with open(TEST_DATA, "rb") as file: 94 | res = messages.ComputedKeysMessage.from_file(file, computed_keys=computed_keys) 95 | 96 | assert res["paramId"] == 129 97 | assert res["ref_time"] == "201701010" 98 | assert len(res) > 100 99 | assert res["centre"] == -1 100 | 101 | with pytest.raises(ZeroDivisionError): 102 | res["error_key"] 103 | 104 | 105 | def test_ComputedKeysMessage_write() -> None: 106 | computed_keys = { 107 | "ref_time": (lambda m: "%s%04d" % (m["dataDate"], m["dataTime"]), None), 108 | "error_key": (lambda m: 1 / 0, None), 109 | "centre": (lambda m: -1, lambda m, v: None), 110 | } 111 | res = messages.ComputedKeysMessage.from_sample_name( 112 | "regular_ll_pl_grib2", computed_keys=computed_keys 113 | ) 114 | res["dataDate"] = 20180101 115 | res["dataTime"] = 0 116 | assert res["ref_time"] == "201801010000" 117 | 118 | res["centre"] = 1 119 | 120 | 121 | def test_compat_create_exclusive(tmpdir: py.path.local) -> None: 122 | test_file = tmpdir.join("file.grib.idx") 123 | 124 | try: 125 | with messages.compat_create_exclusive(str(test_file)): 126 | raise RuntimeError("Test remove") 127 | except RuntimeError: 128 | pass 129 | 130 | with messages.compat_create_exclusive(str(test_file)) as file: 131 | file.write(b"Hi!") 132 | 133 | with pytest.raises(OSError): 134 | with messages.compat_create_exclusive(str(test_file)) as file: 135 | pass # pragma: no cover 136 | 137 | 138 | def test_FileIndex() -> None: 139 | res = messages.FileIndex.from_fieldset(messages.FileStream(TEST_DATA), ["paramId"]) 140 | assert res["paramId"] == [129, 130] 141 | assert len(res) == 1 142 | assert list(res) == ["paramId"] 143 | assert res.first() 144 | 145 | with pytest.raises(ValueError): 146 | res.getone("paramId") 147 | 148 | with pytest.raises(KeyError): 149 | res["non-existent-key"] 150 | 151 | subres = res.subindex(paramId=130) 152 | 153 | assert subres.get("paramId") == [130] 154 | assert subres.getone("paramId") == 130 155 | assert len(subres) == 1 156 | 157 | 158 | def test_FileIndex_from_indexpath_or_filestream(tmpdir: py.path.local) -> None: 159 | grib_file = tmpdir.join("file.grib") 160 | 161 | with open(TEST_DATA, "rb") as file: 162 | grib_file.write_binary(file.read()) 163 | 164 | # create index file 165 | res = messages.FileIndex.from_indexpath_or_filestream( 166 | messages.FileStream(str(grib_file)), ["paramId"] 167 | ) 168 | assert isinstance(res, messages.FileIndex) 169 | 170 | # read index file 171 | res = messages.FileIndex.from_indexpath_or_filestream( 172 | messages.FileStream(str(grib_file)), ["paramId"] 173 | ) 174 | assert isinstance(res, messages.FileIndex) 175 | 176 | # do not read nor create the index file 177 | res = messages.FileIndex.from_indexpath_or_filestream( 178 | messages.FileStream(str(grib_file)), ["paramId"], indexpath="" 179 | ) 180 | assert isinstance(res, messages.FileIndex) 181 | 182 | # can't create nor read index file 183 | res = messages.FileIndex.from_indexpath_or_filestream( 184 | messages.FileStream(str(grib_file)), 185 | ["paramId"], 186 | indexpath=str(tmpdir.join("non-existent-folder").join("non-existent-file")), 187 | ) 188 | assert isinstance(res, messages.FileIndex) 189 | 190 | # trigger mtime check 191 | grib_file.remove() 192 | with open(TEST_DATA, "rb") as file: 193 | grib_file.write_binary(file.read()) 194 | 195 | res = messages.FileIndex.from_indexpath_or_filestream( 196 | messages.FileStream(str(grib_file)), ["paramId"] 197 | ) 198 | assert isinstance(res, messages.FileIndex) 199 | 200 | 201 | def test_FileIndex_errors() -> None: 202 | computed_keys = {"error_key": (lambda m: bool(1 / 0), lambda m, v: None)} # pragma: no branch 203 | 204 | stream = messages.FileStream(TEST_DATA) 205 | res = messages.FileIndex.from_fieldset(stream, ["paramId", "error_key"], computed_keys) 206 | assert res["paramId"] == [129, 130] 207 | assert len(res) == 2 208 | assert list(res) == ["paramId", "error_key"] 209 | assert res["error_key"] == ["undef"] 210 | 211 | 212 | def test_FileStream() -> None: 213 | res = messages.FileStream(TEST_DATA) 214 | leader = res[0] 215 | assert len(leader) > 100 216 | assert sum(1 for _ in res.items()) == leader["count"] 217 | 218 | # __file__ is not a GRIB, but contains the "GRIB" string, so it is a very tricky corner case 219 | res = messages.FileStream(str(__file__)) 220 | with pytest.raises(eccodes.UnsupportedEditionError): 221 | res[0] 222 | -------------------------------------------------------------------------------- /tests/test_25_cfmessage.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | import typing as T 3 | 4 | import numpy as np 5 | import pytest 6 | 7 | from cfgrib import cfmessage 8 | 9 | SAMPLE_DATA_FOLDER = os.path.join(os.path.dirname(__file__), "sample-data") 10 | TEST_DATA = os.path.join(SAMPLE_DATA_FOLDER, "era5-levels-members.grib") 11 | 12 | 13 | def test_from_grib_date_time() -> None: 14 | message = {"dataDate": 20160706, "dataTime": 1944} 15 | result = cfmessage.from_grib_date_time(message) 16 | 17 | assert result == 1467834240 18 | 19 | 20 | def test_to_grib_date_time() -> None: 21 | message = {} # type: T.Dict[str, T.Any] 22 | datetime_ns = np.datetime64("2001-10-11T01:01:00", "ns").item() 23 | 24 | cfmessage.to_grib_date_time(message, datetime_ns) 25 | 26 | assert message["dataDate"] == 20011011 27 | assert message["dataTime"] == 101 28 | 29 | 30 | def test_from_grib_step() -> None: 31 | message = {"endStep:int": 1, "stepUnits:int": 1} 32 | step_seconds = cfmessage.from_grib_step(message) 33 | 34 | assert step_seconds == 1 35 | 36 | 37 | def test_to_grib_step() -> None: 38 | message = {} # type: T.Dict[str, T.Any] 39 | step_ns = 3600 * 1_000_000_000 40 | 41 | cfmessage.to_grib_step(message, step_ns, step_unit=1) 42 | 43 | assert message["endStep:int"] == 1 44 | assert message["stepUnits:int"] == 1 45 | 46 | with pytest.raises(ValueError): 47 | cfmessage.to_grib_step(message, 0, step_unit=3) 48 | 49 | 50 | def test_build_valid_time() -> None: 51 | forecast_reference_time = np.array(0) 52 | forecast_period = np.array(0) 53 | 54 | dims, data = cfmessage.build_valid_time(forecast_reference_time, forecast_period) 55 | 56 | assert dims == () 57 | assert data.shape == () 58 | 59 | forecast_reference_time = np.array([0, 31536000]) 60 | forecast_period = np.array(0) 61 | 62 | dims, data = cfmessage.build_valid_time(forecast_reference_time, forecast_period) 63 | 64 | assert dims == ("time",) 65 | assert data.shape == forecast_reference_time.shape + forecast_period.shape 66 | 67 | forecast_reference_time = np.array(0) 68 | forecast_period = np.array([0, 12, 24, 36]) 69 | 70 | dims, data = cfmessage.build_valid_time(forecast_reference_time, forecast_period) 71 | 72 | assert dims == ("step",) 73 | assert data.shape == (4,) 74 | assert np.allclose((data - data[..., :1]) / 3600, forecast_period) 75 | 76 | forecast_reference_time = np.array([0, 31536000]) 77 | forecast_period = np.array([0, 12, 24, 36]) 78 | 79 | dims, data = cfmessage.build_valid_time(forecast_reference_time, forecast_period) 80 | 81 | assert dims == ("time", "step") 82 | assert data.shape == (2, 4) 83 | assert np.allclose((data - data[..., :1]) / 3600, forecast_period) 84 | -------------------------------------------------------------------------------- /tests/test_30_dataset.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | import pathlib 3 | import typing as T 4 | 5 | import numpy as np 6 | import pytest 7 | 8 | from cfgrib import cfmessage, dataset, messages 9 | 10 | SAMPLE_DATA_FOLDER = os.path.join(os.path.dirname(__file__), "sample-data") 11 | TEST_DATA = os.path.join(SAMPLE_DATA_FOLDER, "era5-levels-members.grib") 12 | TEST_DATA_UKMO = os.path.join(SAMPLE_DATA_FOLDER, "forecast_monthly_ukmo.grib") 13 | TEST_DATA_SCALAR_TIME = os.path.join(SAMPLE_DATA_FOLDER, "era5-single-level-scalar-time.grib") 14 | TEST_DATA_ALTERNATE_ROWS = os.path.join(SAMPLE_DATA_FOLDER, "alternate-scanning.grib") 15 | TEST_DATA_MISSING_VALS = os.path.join(SAMPLE_DATA_FOLDER, "fields_with_missing_values.grib") 16 | TEST_DATA_MULTI_PARAMS = os.path.join(SAMPLE_DATA_FOLDER, "multi_param_on_multi_dims.grib") 17 | 18 | 19 | def test_enforce_unique_attributes() -> None: 20 | assert dataset.enforce_unique_attributes({"key": [1]}, ["key"]) 21 | assert not dataset.enforce_unique_attributes({"key": ["undef"]}, ["key"]) 22 | 23 | with pytest.raises(dataset.DatasetBuildError): 24 | assert dataset.enforce_unique_attributes({"key": [1, 2]}, ["key"]) 25 | 26 | 27 | def test_Variable() -> None: 28 | res = dataset.Variable(dimensions=("lat",), data=np.array([0.0]), attributes={}) 29 | 30 | assert res == res 31 | assert res != 1 32 | 33 | 34 | @pytest.mark.parametrize( 35 | "item,shape,expected", 36 | [ 37 | (([1, 5],), (10,), ([1, 5],)), 38 | ((np.array([1]),), (10,), ([1],)), 39 | ((slice(0, 3, 2),), (10,), ([0, 2],)), 40 | ((1,), (10,), ([1],)), 41 | ], 42 | ) 43 | def test_expand_item(item: T.Any, shape: T.Any, expected: T.Any) -> None: 44 | assert dataset.expand_item(item, shape) == expected 45 | 46 | 47 | def test_expand_item_error() -> None: 48 | with pytest.raises(TypeError): 49 | dataset.expand_item((None,), (1,)) 50 | 51 | 52 | def test_dict_merge() -> None: 53 | master = {"one": 1} 54 | dataset.dict_merge(master, {"two": 2}) 55 | assert master == {"one": 1, "two": 2} 56 | dataset.dict_merge(master, {"two": 2}) 57 | assert master == {"one": 1, "two": 2} 58 | 59 | with pytest.raises(dataset.DatasetBuildError): 60 | dataset.dict_merge(master, {"two": 3}) 61 | 62 | 63 | def test_encode_cf_first() -> None: 64 | assert dataset.encode_cf_first({}) 65 | 66 | 67 | def test_build_data_var_components_no_encode() -> None: 68 | index_keys = sorted(dataset.INDEX_KEYS + ["time", "step"]) 69 | stream = messages.FileStream(path=TEST_DATA) 70 | index = messages.FileIndex.from_fieldset(stream, index_keys).subindex(paramId=130) 71 | dims, data_var, coord_vars = dataset.build_variable_components(index=index) 72 | assert dims == {"number": 10, "dataDate": 2, "dataTime": 2, "level": 2, "values": 7320} 73 | assert data_var.data.shape == (10, 2, 2, 2, 7320) 74 | 75 | # equivalent to not np.isnan without importing numpy 76 | assert data_var.data[:, :, :, :, :].mean() > 0.0 77 | 78 | 79 | def test_build_data_var_components_encode_cf_geography() -> None: 80 | stream = messages.FileStream(path=TEST_DATA) 81 | index_keys = sorted(dataset.INDEX_KEYS + ["time", "step"]) 82 | index = messages.FieldsetIndex.from_fieldset( 83 | stream, index_keys, cfmessage.COMPUTED_KEYS 84 | ).subindex(paramId=130) 85 | dims, data_var, coord_vars = dataset.build_variable_components( 86 | index=index, encode_cf="geography" 87 | ) 88 | assert dims == { 89 | "number": 10, 90 | "dataDate": 2, 91 | "dataTime": 2, 92 | "level": 2, 93 | "latitude": 61, 94 | "longitude": 120, 95 | } 96 | assert data_var.data.shape == (10, 2, 2, 2, 61, 120) 97 | 98 | # equivalent to not np.isnan without importing numpy 99 | assert data_var.data[:, :, :, :, :, :].mean() > 0.0 100 | 101 | 102 | def test_build_dataset_components_time_dims() -> None: 103 | index_keys = sorted(dataset.INDEX_KEYS + ["time", "step"]) 104 | stream = messages.FileStream(TEST_DATA_UKMO, "warn") 105 | index = dataset.open_fileindex(stream, messages.DEFAULT_INDEXPATH, index_keys) 106 | dims = dataset.build_dataset_components(index, read_keys=[])[0] 107 | assert dims == { 108 | "latitude": 6, 109 | "longitude": 11, 110 | "number": 28, 111 | "step": 20, 112 | "time": 8, 113 | } 114 | time_dims = ["indexing_time", "verifying_time"] 115 | index_keys = sorted(dataset.INDEX_KEYS + time_dims) 116 | stream = messages.FileStream(TEST_DATA_UKMO, "warn") 117 | index = dataset.open_fileindex(stream, messages.DEFAULT_INDEXPATH, index_keys) 118 | dims, *_ = dataset.build_dataset_components(index, read_keys=[], time_dims=time_dims) 119 | assert dims == { 120 | "number": 28, 121 | "indexing_time": 2, 122 | "verifying_time": 4, 123 | "latitude": 6, 124 | "longitude": 11, 125 | } 126 | 127 | time_dims = ["indexing_time", "step"] 128 | index_keys = sorted(dataset.INDEX_KEYS + time_dims) 129 | stream = messages.FileStream(TEST_DATA_UKMO, "warn") 130 | index = dataset.open_fileindex(stream, messages.DEFAULT_INDEXPATH, index_keys) 131 | dims, *_ = dataset.build_dataset_components(index, read_keys=[], time_dims=time_dims) 132 | assert dims == {"number": 28, "indexing_time": 2, "step": 20, "latitude": 6, "longitude": 11} 133 | 134 | 135 | def test_build_dataset_components_ignore_keys() -> None: 136 | stream = messages.FileStream(TEST_DATA_UKMO, "warn") 137 | index = dataset.open_fileindex(stream, messages.DEFAULT_INDEXPATH, dataset.INDEX_KEYS) 138 | assert "subCentre" in index.index_keys 139 | index = dataset.open_fileindex(stream, messages.DEFAULT_INDEXPATH, dataset.INDEX_KEYS, ignore_keys=["subCentre"]) 140 | assert "subCentre" not in index.index_keys 141 | 142 | def test_Dataset() -> None: 143 | res = dataset.open_file(TEST_DATA) 144 | assert "Conventions" in res.attributes 145 | assert "institution" in res.attributes 146 | assert "history" in res.attributes 147 | assert res.attributes["GRIB_edition"] == 1 148 | assert tuple(res.dimensions.keys()) == ( 149 | "number", 150 | "time", 151 | "isobaricInhPa", 152 | "latitude", 153 | "longitude", 154 | ) 155 | assert len(res.variables) == 9 156 | 157 | res1 = dataset.open_file(pathlib.Path(TEST_DATA)) 158 | 159 | assert res1 == res 160 | 161 | 162 | def test_Dataset_no_encode() -> None: 163 | res = dataset.open_file(TEST_DATA, encode_cf=()) 164 | assert "Conventions" in res.attributes 165 | assert "institution" in res.attributes 166 | assert "history" in res.attributes 167 | assert res.attributes["GRIB_edition"] == 1 168 | assert tuple(res.dimensions.keys()) == ("number", "dataDate", "dataTime", "level", "values") 169 | assert len(res.variables) == 9 170 | 171 | 172 | def test_Dataset_encode_cf_time() -> None: 173 | res = dataset.open_file(TEST_DATA, encode_cf=("time",)) 174 | assert "history" in res.attributes 175 | assert res.attributes["GRIB_edition"] == 1 176 | assert tuple(res.dimensions.keys()) == ("number", "time", "level", "values") 177 | assert len(res.variables) == 9 178 | 179 | # equivalent to not np.isnan without importing numpy 180 | assert res.variables["t"].data[:, :, :, :].mean() > 0.0 181 | 182 | 183 | def test_Dataset_encode_ignore_keys() -> None: 184 | res = dataset.open_file(TEST_DATA) 185 | assert res.attributes["GRIB_edition"] == 1 186 | 187 | res = dataset.open_file(TEST_DATA, ignore_keys=["edition"]) 188 | assert "GRIB_edition" not in res.attributes 189 | 190 | 191 | def test_Dataset_encode_cf_geography() -> None: 192 | res = dataset.open_file(TEST_DATA, encode_cf=("geography",)) 193 | assert "history" in res.attributes 194 | assert res.attributes["GRIB_edition"] == 1 195 | assert tuple(res.dimensions.keys()) == ( 196 | "number", 197 | "dataDate", 198 | "dataTime", 199 | "level", 200 | "latitude", 201 | "longitude", 202 | ) 203 | assert len(res.variables) == 9 204 | 205 | # equivalent to not np.isnan without importing numpy 206 | assert res.variables["t"].data[:, :, :, :, :, :].mean() > 0.0 207 | 208 | 209 | def test_Dataset_encode_cf_vertical() -> None: 210 | res = dataset.open_file(TEST_DATA, encode_cf=("vertical",)) 211 | assert "history" in res.attributes 212 | assert res.attributes["GRIB_edition"] == 1 213 | expected_dimensions = ("number", "dataDate", "dataTime", "isobaricInhPa", "values") 214 | assert tuple(res.dimensions.keys()) == expected_dimensions 215 | assert len(res.variables) == 9 216 | 217 | # equivalent to not np.isnan without importing numpy 218 | assert res.variables["t"].data[:, :, :, :, :].mean() > 0.0 219 | 220 | 221 | def test_Dataset_reguler_gg_surface() -> None: 222 | path = os.path.join(SAMPLE_DATA_FOLDER, "regular_gg_sfc.grib") 223 | res = dataset.open_file(path) 224 | 225 | assert res.dimensions == {"latitude": 96, "longitude": 192} 226 | assert np.allclose(res.variables["latitude"].data[:2], [88.57216851, 86.72253095]) 227 | 228 | 229 | def test_Dataset_extra_coords() -> None: 230 | res = dataset.open_file(TEST_DATA, extra_coords={"experimentVersionNumber": "time"}) 231 | assert "experimentVersionNumber" in res.variables 232 | assert res.variables["experimentVersionNumber"].dimensions == ("time",) 233 | 234 | 235 | def test_Dataset_scalar_extra_coords() -> None: 236 | res = dataset.open_file( 237 | TEST_DATA_SCALAR_TIME, extra_coords={"experimentVersionNumber": "time"} 238 | ) 239 | assert "experimentVersionNumber" in res.variables 240 | assert res.variables["experimentVersionNumber"].dimensions == () 241 | 242 | 243 | def test_Dataset_extra_coords_error() -> None: 244 | with pytest.raises(ValueError): 245 | dataset.open_file(TEST_DATA, extra_coords={"validityDate": "number"}) 246 | 247 | 248 | def test_OnDiskArray() -> None: 249 | res = dataset.open_file(TEST_DATA).variables["t"] 250 | 251 | assert isinstance(res.data, dataset.OnDiskArray) 252 | assert np.allclose( 253 | res.data[2:4:2, [0, 3], 0, 0, 0], res.data.build_array()[2:4:2, [0, 3], 0, 0, 0] 254 | ) 255 | 256 | 257 | def test_open_fieldset_dict() -> None: 258 | fieldset = { 259 | -10: { 260 | "gridType": "regular_ll", 261 | "Nx": 2, 262 | "Ny": 3, 263 | "distinctLatitudes": [-10.0, 0.0, 10.0], 264 | "distinctLongitudes": [0.0, 10.0], 265 | "paramId": 167, 266 | "shortName": "2t", 267 | "values": [[1, 2], [3, 4], [5, 6]], 268 | } 269 | } 270 | 271 | res = dataset.open_fieldset(fieldset) 272 | 273 | assert res.dimensions == {"latitude": 3, "longitude": 2} 274 | assert set(res.variables) == {"latitude", "longitude", "2t"} 275 | assert np.array_equal(res.variables["2t"].data[()], np.array(fieldset[-10]["values"])) 276 | 277 | 278 | def test_open_fieldset_list() -> None: 279 | fieldset = [ 280 | { 281 | "gridType": "regular_ll", 282 | "Nx": 2, 283 | "Ny": 3, 284 | "distinctLatitudes": [-10.0, 0.0, 10.0], 285 | "distinctLongitudes": [0.0, 10.0], 286 | "paramId": 167, 287 | "shortName": "2t", 288 | "values": [[1, 2], [3, 4], [5, 6]], 289 | } 290 | ] 291 | 292 | res = dataset.open_fieldset(fieldset) 293 | 294 | assert res.dimensions == {"latitude": 3, "longitude": 2} 295 | assert set(res.variables) == {"latitude", "longitude", "2t"} 296 | assert np.array_equal(res.variables["2t"].data[()], np.array(fieldset[0]["values"])) 297 | 298 | 299 | def test_open_fieldset_computed_keys() -> None: 300 | fieldset = [ 301 | { 302 | "gridType": "regular_ll", 303 | "Nx": 2, 304 | "Ny": 3, 305 | "distinctLatitudes": [-10.0, 0.0, 10.0], 306 | "distinctLongitudes": [0.0, 10.0], 307 | "paramId": 167, 308 | "shortName": "2t", 309 | "values": [[1, 2], [3, 4], [5, 6]], 310 | "dataDate": 20200101, 311 | "dataTime": 1200, 312 | } 313 | ] 314 | 315 | res = dataset.open_fieldset(fieldset) 316 | 317 | assert res.dimensions == {"latitude": 3, "longitude": 2} 318 | assert set(res.variables) == {"latitude", "longitude", "time", "2t"} 319 | assert np.array_equal(res.variables["2t"].data[()], np.array(fieldset[0]["values"])) 320 | 321 | 322 | def test_open_fieldset_ignore_keys() -> None: 323 | fieldset = { 324 | -10: { 325 | "gridType": "regular_ll", 326 | "Nx": 2, 327 | "Ny": 3, 328 | "distinctLatitudes": [-10.0, 0.0, 10.0], 329 | "distinctLongitudes": [0.0, 10.0], 330 | "paramId": 167, 331 | "shortName": "2t", 332 | "subCentre": "test", 333 | "values": [[1, 2], [3, 4], [5, 6]], 334 | } 335 | } 336 | 337 | res = dataset.open_fieldset(fieldset) 338 | assert "GRIB_subCentre" in res.attributes 339 | 340 | res = dataset.open_fieldset(fieldset, ignore_keys="subCentre") 341 | assert "GRIB_subCentre" not in res.attributes 342 | 343 | def test_open_file() -> None: 344 | res = dataset.open_file(TEST_DATA) 345 | 346 | assert "t" in res.variables 347 | assert "z" in res.variables 348 | 349 | 350 | def test_open_file_filter_by_keys() -> None: 351 | res = dataset.open_file(TEST_DATA, filter_by_keys={"shortName": "t"}) 352 | 353 | assert "t" in res.variables 354 | assert "z" not in res.variables 355 | 356 | res = dataset.open_file(TEST_DATA_MULTI_PARAMS) 357 | 358 | assert "t" in res.variables 359 | assert "z" in res.variables 360 | assert "u" in res.variables 361 | 362 | res = dataset.open_file(TEST_DATA_MULTI_PARAMS, filter_by_keys={"shortName": ["t", "z"]}) 363 | 364 | assert "t" in res.variables 365 | assert "z" in res.variables 366 | assert "u" not in res.variables 367 | 368 | 369 | def test_alternating_rows() -> None: 370 | res = dataset.open_file(TEST_DATA_ALTERNATE_ROWS) 371 | # the vals at the east end should be larger than those at the west 372 | east_ref = [301.78, 303.78, 305.03] 373 | west_ref = [292.03, 291.78, 291.78] 374 | assert np.all(np.isclose(res.variables["t2m"].data[84, 288:291], east_ref, 0.0001)) 375 | assert np.all(np.isclose(res.variables["t2m"].data[85, 0:3], west_ref, 0.0001)) 376 | 377 | 378 | def test_missing_field_values() -> None: 379 | res = dataset.open_file(TEST_DATA_MISSING_VALS) 380 | t2 = res.variables["t2m"] 381 | assert np.isclose(np.nanmean(t2.data[0, :, :]), 268.375) 382 | assert np.isclose(np.nanmean(t2.data[1, :, :]), 270.716) 383 | 384 | 385 | def test_default_values_dtype() -> None: 386 | res = dataset.open_file(TEST_DATA_MISSING_VALS) 387 | assert res.variables["t2m"].data.dtype == np.dtype("float32") 388 | assert res.variables["latitude"].data.dtype == np.dtype("float64") 389 | assert res.variables["longitude"].data.dtype == np.dtype("float64") 390 | 391 | 392 | def test_float64_values_dtype() -> None: 393 | res = dataset.open_file(TEST_DATA_MISSING_VALS, values_dtype=np.dtype("float64")) 394 | assert res.variables["t2m"].data.dtype == np.dtype("float64") 395 | assert res.variables["latitude"].data.dtype == np.dtype("float64") 396 | assert res.variables["longitude"].data.dtype == np.dtype("float64") 397 | -------------------------------------------------------------------------------- /tests/test_40_xarray_store.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "%matplotlib inline\n", 10 | "\n", 11 | "import xarray as xr\n", 12 | "\n", 13 | "TEST_GRIB = \"sample-data/era5-levels-members.grib\"" 14 | ] 15 | }, 16 | { 17 | "cell_type": "code", 18 | "execution_count": null, 19 | "metadata": {}, 20 | "outputs": [], 21 | "source": [ 22 | "ds = xr.open_dataset(TEST_GRIB, engine=\"cfgrib\", cache=False)\n", 23 | "ds" 24 | ] 25 | }, 26 | { 27 | "cell_type": "code", 28 | "execution_count": null, 29 | "metadata": {}, 30 | "outputs": [], 31 | "source": [ 32 | "da = ds.data_vars[\"t\"]\n", 33 | "da" 34 | ] 35 | }, 36 | { 37 | "cell_type": "code", 38 | "execution_count": null, 39 | "metadata": {}, 40 | "outputs": [], 41 | "source": [ 42 | "da.dims" 43 | ] 44 | }, 45 | { 46 | "cell_type": "code", 47 | "execution_count": null, 48 | "metadata": {}, 49 | "outputs": [], 50 | "source": [ 51 | "da.attrs" 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": null, 57 | "metadata": {}, 58 | "outputs": [], 59 | "source": [ 60 | "da.time" 61 | ] 62 | }, 63 | { 64 | "cell_type": "code", 65 | "execution_count": null, 66 | "metadata": {}, 67 | "outputs": [], 68 | "source": [ 69 | "da.longitude" 70 | ] 71 | }, 72 | { 73 | "cell_type": "code", 74 | "execution_count": null, 75 | "metadata": {}, 76 | "outputs": [], 77 | "source": [ 78 | "da.latitude" 79 | ] 80 | }, 81 | { 82 | "cell_type": "code", 83 | "execution_count": null, 84 | "metadata": {}, 85 | "outputs": [], 86 | "source": [ 87 | "da.mean()" 88 | ] 89 | }, 90 | { 91 | "cell_type": "code", 92 | "execution_count": null, 93 | "metadata": {}, 94 | "outputs": [], 95 | "source": [ 96 | "dasel = da.sel(isobaricInhPa=500, time=\"2017-01-02T12:00:00\")\n", 97 | "dasel" 98 | ] 99 | }, 100 | { 101 | "cell_type": "code", 102 | "execution_count": null, 103 | "metadata": {}, 104 | "outputs": [], 105 | "source": [ 106 | "dasel.plot(col=\"number\", col_wrap=3, figsize=(15, 12))" 107 | ] 108 | }, 109 | { 110 | "cell_type": "code", 111 | "execution_count": null, 112 | "metadata": {}, 113 | "outputs": [], 114 | "source": [] 115 | } 116 | ], 117 | "metadata": { 118 | "kernelspec": { 119 | "display_name": "Python 3", 120 | "language": "python", 121 | "name": "python3" 122 | }, 123 | "language_info": { 124 | "codemirror_mode": { 125 | "name": "ipython", 126 | "version": 3 127 | }, 128 | "file_extension": ".py", 129 | "mimetype": "text/x-python", 130 | "name": "python", 131 | "nbconvert_exporter": "python", 132 | "pygments_lexer": "ipython3", 133 | "version": "3.7.0" 134 | } 135 | }, 136 | "nbformat": 4, 137 | "nbformat_minor": 2 138 | } 139 | -------------------------------------------------------------------------------- /tests/test_40_xarray_store.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | 3 | import gribapi # type: ignore 4 | import numpy as np 5 | import pandas as pd 6 | import pytest 7 | 8 | xr = pytest.importorskip("xarray") # noqa 9 | 10 | from cfgrib import dataset, xarray_store 11 | 12 | SAMPLE_DATA_FOLDER = os.path.join(os.path.dirname(__file__), "sample-data") 13 | TEST_DATA = os.path.join(SAMPLE_DATA_FOLDER, "era5-levels-members.grib") 14 | TEST_CORRUPTED = os.path.join(SAMPLE_DATA_FOLDER, "era5-levels-corrupted.grib") 15 | TEST_DATASETS = os.path.join(SAMPLE_DATA_FOLDER, "t_on_different_level_types.grib") 16 | TEST_IGNORE = os.path.join(SAMPLE_DATA_FOLDER, "uv_on_different_levels.grib") 17 | TEST_DATA_NCEP_MONTHLY = os.path.join(SAMPLE_DATA_FOLDER, "ncep-seasonal-monthly.grib") 18 | TEST_DATA_MULTIPLE_FIELDS = os.path.join(SAMPLE_DATA_FOLDER, "regular_gg_ml_g2.grib") 19 | TEST_DATA_DIFFERENT_STEP_TYPES = os.path.join(SAMPLE_DATA_FOLDER, "cfrzr_and_cprat.grib") 20 | TEST_DATA_DIFFERENT_STEP_TYPES_ZEROS = os.path.join(SAMPLE_DATA_FOLDER, "cfrzr_and_cprat_0s.grib") 21 | TEST_DATA_STEPS_IN_MINUTES = os.path.join(SAMPLE_DATA_FOLDER, "step_60m.grib") 22 | TEST_DATA_ALTERNATE_ROWS_MERCATOR = os.path.join(SAMPLE_DATA_FOLDER, "ds.waveh.5.grib") 23 | 24 | 25 | def test_open_dataset() -> None: 26 | res = xarray_store.open_dataset(TEST_DATA) 27 | 28 | assert res.attrs["GRIB_edition"] == 1 29 | 30 | var = res["t"] 31 | assert var.attrs["GRIB_gridType"] == "regular_ll" 32 | assert var.attrs["units"] == "K" 33 | assert var.dims == ("number", "time", "isobaricInhPa", "latitude", "longitude") 34 | 35 | assert var.mean() > 0.0 36 | 37 | with pytest.raises(ValueError): 38 | xarray_store.open_dataset(TEST_DATA, engine="any-other-engine") 39 | 40 | res = xarray_store.open_dataset(TEST_IGNORE, backend_kwargs={"errors": "warn"}) 41 | assert "isobaricInhPa" in res.dims 42 | 43 | res = xarray_store.open_dataset(TEST_IGNORE, backend_kwargs={"errors": "ignore"}) 44 | assert "isobaricInhPa" in res.dims 45 | 46 | with pytest.raises(dataset.DatasetBuildError): 47 | xarray_store.open_dataset(TEST_IGNORE, backend_kwargs={"errors": "raise"}) 48 | 49 | xarray_store.open_dataset(TEST_DATA, backend_kwargs={"errors": "raise"}) 50 | 51 | 52 | def test_open_dataset_corrupted() -> None: 53 | res = xarray_store.open_dataset(TEST_CORRUPTED) 54 | 55 | assert res.attrs["GRIB_edition"] == 1 56 | assert len(res.data_vars) == 1 57 | 58 | with pytest.raises(gribapi.GribInternalError): 59 | xarray_store.open_dataset(TEST_CORRUPTED, backend_kwargs={"errors": "raise"}) 60 | 61 | 62 | def test_open_dataset_encode_cf_time() -> None: 63 | backend_kwargs = {"encode_cf": ("time",)} 64 | res = xarray_store.open_dataset(TEST_DATA, backend_kwargs=backend_kwargs) 65 | 66 | assert res.attrs["GRIB_edition"] == 1 67 | assert res["t"].attrs["GRIB_gridType"] == "regular_ll" 68 | assert res["t"].attrs["GRIB_units"] == "K" 69 | assert res["t"].dims == ("number", "time", "level", "values") 70 | 71 | assert res["t"].mean() > 0.0 72 | 73 | 74 | def test_open_dataset_encode_cf_vertical() -> None: 75 | backend_kwargs = {"encode_cf": ("vertical",)} 76 | res = xarray_store.open_dataset(TEST_DATA, backend_kwargs=backend_kwargs) 77 | 78 | var = res["t"] 79 | assert var.dims == ("number", "dataDate", "dataTime", "isobaricInhPa", "values") 80 | 81 | assert var.mean() > 0.0 82 | 83 | 84 | def test_open_dataset_encode_cf_geography() -> None: 85 | backend_kwargs = {"encode_cf": ("geography",)} 86 | res = xarray_store.open_dataset(TEST_DATA, backend_kwargs=backend_kwargs) 87 | 88 | assert res.attrs["GRIB_edition"] == 1 89 | 90 | var = res["t"] 91 | assert var.attrs["GRIB_gridType"] == "regular_ll" 92 | assert var.attrs["GRIB_units"] == "K" 93 | assert var.dims == ("number", "dataDate", "dataTime", "level", "latitude", "longitude") 94 | 95 | assert var.mean() > 0.0 96 | 97 | 98 | def test_open_dataset_extra_coords_attrs() -> None: 99 | backend_kwargs = { 100 | "time_dims": ("forecastMonth", "indexing_time"), 101 | "extra_coords": {"time": "number"}, 102 | } 103 | 104 | res = xarray_store.open_dataset(TEST_DATA_NCEP_MONTHLY, backend_kwargs=backend_kwargs) 105 | assert "time" in res.variables 106 | assert res.variables["time"].dims == ("number",) 107 | assert res.variables["time"].data[0] == np.datetime64("2021-09-01T00:00:00") 108 | assert res.variables["time"].data[123] == np.datetime64("2021-08-02T00:18:00") 109 | assert res.variables["time"].attrs["standard_name"] == "forecast_reference_time" 110 | 111 | 112 | def test_open_dataset_eccodes() -> None: 113 | res = xarray_store.open_dataset(TEST_DATA) 114 | 115 | assert res.attrs["GRIB_edition"] == 1 116 | 117 | var = res["t"] 118 | assert var.attrs["GRIB_gridType"] == "regular_ll" 119 | assert var.attrs["units"] == "K" 120 | assert var.dims == ("number", "time", "isobaricInhPa", "latitude", "longitude") 121 | 122 | assert var.mean() > 0.0 123 | 124 | 125 | def test_open_datasets() -> None: 126 | res = xarray_store.open_datasets(TEST_DATASETS) 127 | 128 | assert len(res) > 1 129 | assert res[0].attrs["GRIB_centre"] == "ecmf" 130 | 131 | 132 | def test_cached_geo_coords() -> None: 133 | ds1 = xarray_store.open_dataset(TEST_DATA_MULTIPLE_FIELDS) 134 | ds2 = xarray_store.open_dataset( 135 | TEST_DATA_MULTIPLE_FIELDS, backend_kwargs=dict(cache_geo_coords=False) 136 | ) 137 | assert ds2.identical(ds1) 138 | 139 | 140 | def test_open_datasets_different_step_types() -> None: 141 | res = xarray_store.open_datasets(TEST_DATA_DIFFERENT_STEP_TYPES) 142 | 143 | assert len(res) == 2 144 | try: 145 | cpr0 = res[0]["cpr"] 146 | cpr1 = res[1]["cpr"] 147 | except KeyError: 148 | print("Using the old shortName for Convective precipitation rate") 149 | cpr0 = res[0]["cprat"] 150 | cpr1 = res[1]["cprat"] 151 | assert cpr0.attrs["GRIB_stepType"] == "instant" 152 | assert res[0].cfrzr.attrs["GRIB_stepType"] == "instant" 153 | assert cpr1.attrs["GRIB_stepType"] == "avg" 154 | assert res[1].cfrzr.attrs["GRIB_stepType"] == "avg" 155 | 156 | 157 | # test the case where we have two different step types, but the data values 158 | # are all zero - we should still separate into differernt datasets 159 | def test_open_datasets_different_step_types_zeros() -> None: 160 | res = xarray_store.open_datasets(TEST_DATA_DIFFERENT_STEP_TYPES_ZEROS) 161 | 162 | assert len(res) == 2 163 | try: 164 | cpr0 = res[0]["cpr"] 165 | cpr1 = res[1]["cpr"] 166 | except KeyError: 167 | print("Using the old shortName for Convective precipitation rate") 168 | cpr0 = res[0]["cprat"] 169 | cpr1 = res[1]["cprat"] 170 | assert cpr0.attrs["GRIB_stepType"] == "instant" 171 | assert res[0].cfrzr.attrs["GRIB_stepType"] == "instant" 172 | assert cpr1.attrs["GRIB_stepType"] == "avg" 173 | assert res[1].cfrzr.attrs["GRIB_stepType"] == "avg" 174 | 175 | 176 | # ensure that the encoding of the coordinates is preserved 177 | def test_open_datasets_different_preserve_coordinate_encoding() -> None: 178 | res = xarray_store.open_datasets(TEST_DATA_DIFFERENT_STEP_TYPES) 179 | assert len(res) == 2 180 | assert "units" in res[0].valid_time.encoding 181 | assert "units" in res[1].valid_time.encoding 182 | 183 | res = xarray_store.open_datasets(TEST_DATA_DIFFERENT_STEP_TYPES_ZEROS) 184 | assert len(res) == 2 185 | assert "units" in res[0].valid_time.encoding 186 | assert "units" in res[1].valid_time.encoding 187 | 188 | 189 | def test_open_dataset_steps_in_minutes() -> None: 190 | res = xarray_store.open_dataset(TEST_DATA_STEPS_IN_MINUTES) 191 | 192 | var = res["t2m"] 193 | steps = var.step 194 | assert steps[0] == pd.Timedelta("0 hours") 195 | assert steps[1] == pd.Timedelta("1 hours") 196 | assert steps[5] == pd.Timedelta("5 hours") 197 | 198 | 199 | def test_alternating_scanning_mercator() -> None: 200 | ds = xarray_store.open_dataset(TEST_DATA_ALTERNATE_ROWS_MERCATOR) 201 | values = ds.variables["shww"].data 202 | assert np.isnan(values[5]) 203 | assert values[760500] == 1.5 204 | values_all = ds.variables["shww"].data[:] 205 | -------------------------------------------------------------------------------- /tests/test_40_xarray_to_grib_regular_ll.py: -------------------------------------------------------------------------------- 1 | import typing as T 2 | 3 | import numpy as np 4 | import py 5 | import pytest 6 | 7 | pd = pytest.importorskip("pandas") # noqa 8 | pytest.importorskip("xarray") # noqa 9 | 10 | import xarray as xr 11 | 12 | from cfgrib import xarray_to_grib 13 | 14 | 15 | # we make sure to test the cases where we have a) multiple dates, and b) a single date 16 | @pytest.fixture(params=[4, 1]) 17 | def canonic_da(request) -> xr.DataArray: 18 | coords: T.List[T.Any] = [ 19 | pd.date_range("2018-01-01T00:00", "2018-01-02T12:00", periods=request.param), 20 | pd.timedelta_range(0, "12h", periods=2), 21 | [1000.0, 850.0, 500.0], 22 | np.linspace(90.0, -90.0, 5), 23 | np.linspace(0.0, 360.0, 6, endpoint=False), 24 | ] 25 | da = xr.DataArray( 26 | np.zeros((request.param, 2, 3, 5, 6)), 27 | coords=coords, 28 | dims=["time", "step", "isobaricInhPa", "latitude", "longitude"], 29 | ) 30 | return da 31 | 32 | 33 | def test_canonical_dataarray_to_grib_with_grib_keys( 34 | canonic_da: xr.DataArray, tmpdir: py.path.local 35 | ) -> None: 36 | out_path = tmpdir.join("res.grib") 37 | grib_keys = {"gridType": "regular_ll"} 38 | with open(str(out_path), "wb") as file: 39 | xarray_to_grib.canonical_dataarray_to_grib(canonic_da, file, grib_keys=grib_keys) 40 | 41 | 42 | def test_canonical_dataarray_to_grib_detect_grib_keys( 43 | canonic_da: xr.DataArray, tmpdir: py.path.local 44 | ) -> None: 45 | out_path = tmpdir.join("res.grib") 46 | with open(str(out_path), "wb") as file: 47 | xarray_to_grib.canonical_dataarray_to_grib(canonic_da, file) 48 | 49 | 50 | def test_canonical_dataarray_to_grib_conflicting_detect_grib_keys( 51 | canonic_da: xr.DataArray, tmpdir: py.path.local 52 | ) -> None: 53 | out_path = tmpdir.join("res.grib") 54 | grib_keys = {"gridType": "reduced_ll"} 55 | with open(str(out_path), "wb") as file: 56 | with pytest.raises(ValueError): 57 | xarray_to_grib.canonical_dataarray_to_grib(canonic_da, file, grib_keys=grib_keys) 58 | 59 | 60 | def test_canonical_dataset_to_grib(canonic_da: xr.DataArray, tmpdir: py.path.local) -> None: 61 | out_path = tmpdir.join("res.grib") 62 | canonic_ds = canonic_da.to_dataset(name="t") 63 | with pytest.warns(FutureWarning): 64 | xarray_to_grib.canonical_dataset_to_grib(canonic_ds, str(out_path)) 65 | 66 | xarray_to_grib.canonical_dataset_to_grib(canonic_ds, str(out_path), no_warn=True) 67 | 68 | 69 | def test_to_grib(canonic_da: xr.DataArray, tmpdir: py.path.local) -> None: 70 | out_path = tmpdir.join("res.grib") 71 | canonic_ds = canonic_da.to_dataset(name="t") 72 | with pytest.warns(FutureWarning): 73 | xarray_to_grib.to_grib(canonic_ds, str(out_path)) 74 | -------------------------------------------------------------------------------- /tests/test_50_datamodels.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | 3 | import pytest 4 | 5 | xr = pytest.importorskip("xarray") # noqa 6 | 7 | from cf2cdm import cfcoords, datamodels 8 | from cfgrib import xarray_store 9 | 10 | SAMPLE_DATA_FOLDER = os.path.join(os.path.dirname(__file__), "sample-data") 11 | TEST_DATA1 = os.path.join(SAMPLE_DATA_FOLDER, "era5-levels-members.grib") 12 | TEST_DATA2 = os.path.join(SAMPLE_DATA_FOLDER, "lambert_grid.grib") 13 | 14 | 15 | def test_cds() -> None: 16 | ds = xarray_store.open_dataset(TEST_DATA1) 17 | 18 | res = cfcoords.translate_coords(ds, coord_model=datamodels.CDS) 19 | 20 | assert set(res.dims) == {"forecast_reference_time", "lat", "lon", "plev", "realization"} 21 | assert set(res.coords) == { 22 | "forecast_reference_time", 23 | "lat", 24 | "leadtime", 25 | "lon", 26 | "plev", 27 | "realization", 28 | "time", 29 | } 30 | 31 | ds = xarray_store.open_dataset(TEST_DATA2) 32 | 33 | res = cfcoords.translate_coords(ds, coord_model=datamodels.CDS) 34 | 35 | assert set(res.dims) == {"x", "y"} 36 | assert set(res.coords) == { 37 | "forecast_reference_time", 38 | "heightAboveGround", 39 | "lat", 40 | "leadtime", 41 | "lon", 42 | "time", 43 | } 44 | 45 | 46 | def test_ecmwf() -> None: 47 | ds = xarray_store.open_dataset(TEST_DATA1) 48 | 49 | res = cfcoords.translate_coords(ds, coord_model=datamodels.ECMWF) 50 | 51 | assert set(res.dims) == {"latitude", "level", "longitude", "number", "time"} 52 | assert set(res.coords) == { 53 | "latitude", 54 | "level", 55 | "longitude", 56 | "number", 57 | "step", 58 | "time", 59 | "valid_time", 60 | } 61 | 62 | ds = xarray_store.open_dataset(TEST_DATA2) 63 | 64 | res = cfcoords.translate_coords(ds, coord_model=datamodels.ECMWF) 65 | 66 | assert set(res.dims) == {"x", "y"} 67 | assert set(res.coords) == { 68 | "heightAboveGround", 69 | "latitude", 70 | "longitude", 71 | "step", 72 | "time", 73 | "valid_time", 74 | } 75 | -------------------------------------------------------------------------------- /tests/test_50_sample_data.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | import typing as T 3 | 4 | import numpy as np 5 | import py 6 | import pytest 7 | 8 | xr = pytest.importorskip("xarray") # noqa 9 | 10 | from cfgrib import dataset, xarray_store, xarray_to_grib 11 | 12 | SAMPLE_DATA_FOLDER = os.path.join(os.path.dirname(__file__), "sample-data") 13 | 14 | 15 | @pytest.mark.parametrize( 16 | "grib_name", 17 | [ 18 | "era5-levels-members", 19 | "fields_with_missing_values", 20 | "lambert_grid", 21 | "reduced_gg", 22 | "regular_gg_sfc", 23 | "regular_gg_pl", 24 | "regular_gg_ml", 25 | "regular_gg_ml_g2", 26 | "regular_ll_sfc", 27 | "regular_ll_msl", 28 | "scanning_mode_64", 29 | "single_gridpoint", 30 | "spherical_harmonics", 31 | "t_analysis_and_fc_0", 32 | "step_60m", 33 | ], 34 | ) 35 | def test_open_dataset(grib_name: str) -> None: 36 | grib_path = os.path.join(SAMPLE_DATA_FOLDER, grib_name + ".grib") 37 | res = xarray_store.open_dataset(grib_path, cache=False) 38 | print(res) 39 | 40 | 41 | @pytest.mark.parametrize( 42 | "grib_name", 43 | [ 44 | "hpa_and_pa", 45 | "t_on_different_level_types", 46 | "tp_on_different_grid_resolutions", 47 | "uv_on_different_levels", 48 | ], 49 | ) 50 | def test_open_dataset_fail(grib_name: str) -> None: 51 | grib_path = os.path.join(SAMPLE_DATA_FOLDER, grib_name + ".grib") 52 | 53 | with pytest.raises(dataset.DatasetBuildError): 54 | xarray_store.open_dataset(grib_path, cache=False, backend_kwargs={"errors": "raise"}) 55 | 56 | 57 | @pytest.mark.parametrize( 58 | "grib_name", ["hpa_and_pa", "t_on_different_level_types", "tp_on_different_grid_resolutions"] 59 | ) 60 | def test_open_datasets(grib_name: str) -> None: 61 | grib_path = os.path.join(SAMPLE_DATA_FOLDER, grib_name + ".grib") 62 | 63 | res = xarray_store.open_datasets(grib_path) 64 | 65 | assert len(res) > 1 66 | 67 | 68 | @pytest.mark.parametrize( 69 | "grib_name", 70 | [ 71 | pytest.param("era5-levels-members", marks=pytest.mark.xfail), 72 | "fields_with_missing_values", 73 | pytest.param("lambert_grid", marks=pytest.mark.xfail), 74 | "reduced_gg", 75 | "regular_gg_sfc", 76 | "regular_gg_pl", 77 | "regular_gg_ml", 78 | pytest.param("regular_gg_ml_g2", marks=pytest.mark.xfail), 79 | "regular_ll_sfc", 80 | pytest.param("regular_ll_msl", marks=pytest.mark.xfail), 81 | "scanning_mode_64", 82 | pytest.param("spherical_harmonics", marks=pytest.mark.xfail), 83 | "t_analysis_and_fc_0", 84 | ], 85 | ) 86 | def test_canonical_dataset_to_grib(grib_name: str, tmpdir: py.path.local) -> None: 87 | grib_path = os.path.join(SAMPLE_DATA_FOLDER, grib_name + ".grib") 88 | out_path = str(tmpdir.join(grib_name + ".grib")) 89 | 90 | res = xarray_store.open_dataset(grib_path) 91 | 92 | with pytest.warns(FutureWarning): 93 | xarray_to_grib.canonical_dataset_to_grib(res, out_path) 94 | reread = xarray_store.open_dataset(out_path) 95 | assert res.equals(reread) 96 | 97 | 98 | @pytest.mark.parametrize( 99 | "grib_name,ndims", 100 | [ 101 | ("era5-levels-members", 1), 102 | ("era5-single-level-scalar-time", 0), 103 | ], 104 | ) 105 | def test_open_dataset_extra_coords(grib_name: str, ndims: T.Any) -> None: 106 | grib_path = os.path.join(SAMPLE_DATA_FOLDER, grib_name + ".grib") 107 | res = xarray_store.open_dataset( 108 | grib_path, 109 | backend_kwargs={"extra_coords": {"experimentVersionNumber": "time"}}, 110 | cache=False, 111 | ) 112 | assert "experimentVersionNumber" in res.coords 113 | assert len(res["experimentVersionNumber"].dims) == ndims 114 | 115 | 116 | def test_dataset_missing_field_values() -> None: 117 | res = xarray_store.open_dataset( 118 | os.path.join(SAMPLE_DATA_FOLDER, "fields_with_missing_values.grib") 119 | ) 120 | t2 = res.variables["t2m"] 121 | assert np.isclose(np.nanmean(t2[0, :, :]), 268.375) 122 | assert np.isclose(np.nanmean(t2[1, :, :]), 270.716) 123 | 124 | 125 | def test_valid_month_time_dim() -> None: 126 | 127 | test_file = os.path.join(SAMPLE_DATA_FOLDER, "cams-egg4-monthly.grib") 128 | ds = xr.open_dataset(test_file, time_dims=["valid_month"]) 129 | 130 | assert "valid_month" in ds.dims 131 | 132 | -------------------------------------------------------------------------------- /tests/test_50_xarray_getitem.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | 3 | import pytest 4 | 5 | xr = pytest.importorskip("xarray") # noqa 6 | 7 | from cfgrib import xarray_store 8 | 9 | SAMPLE_DATA_FOLDER = os.path.join(os.path.dirname(__file__), "sample-data") 10 | TEST_DATA = os.path.join(SAMPLE_DATA_FOLDER, "era5-levels-members.grib") 11 | 12 | 13 | @pytest.mark.parametrize("cache", [True, False]) 14 | def test_all(cache: bool) -> None: 15 | da = xarray_store.open_dataset(TEST_DATA, cache=cache).data_vars["t"] 16 | va = da.values[:] 17 | 18 | assert va.shape == (10, 4, 2, 61, 120) 19 | 20 | assert da.mean() == va.mean() 21 | 22 | 23 | @pytest.mark.parametrize("cache", [True, False]) 24 | def test_getitem_int(cache: bool) -> None: 25 | da = xarray_store.open_dataset(TEST_DATA, cache=cache).data_vars["t"] 26 | va = da.values[:] 27 | 28 | assert da.isel(isobaricInhPa=1).values.shape == va[:, :, 1].shape 29 | assert da.isel(isobaricInhPa=1).mean() == va[:, :, 1].mean() 30 | assert da.sel(isobaricInhPa=500).mean() == va[:, :, 1].mean() 31 | 32 | 33 | @pytest.mark.parametrize("cache", [True, False]) 34 | def test_getitem_slice(cache: bool) -> None: 35 | da = xarray_store.open_dataset(TEST_DATA, cache=cache).data_vars["t"] 36 | va = da.values[:] 37 | 38 | assert da.isel(number=slice(2, 6)).mean() == va[2:6].mean() 39 | assert da.isel(number=slice(2, 6, 2)).mean() == va[2:6:2].mean() 40 | # NOTE: label based indexing in xarray is inclusive of both the start and stop bounds. 41 | assert da.sel(number=slice(2, 6)).mean() == va[2:7].mean() 42 | assert da.sel(number=slice(2, 6, 2)).mean() == va[2:7:2].mean() 43 | 44 | 45 | @pytest.mark.parametrize("cache", [True, False]) 46 | def test_getitem_list(cache: bool) -> None: 47 | da = xarray_store.open_dataset(TEST_DATA, cache=cache).data_vars["t"] 48 | va = da.values[:] 49 | 50 | assert da.isel(number=[2, 3, 4, 5]).mean() == va[[2, 3, 4, 5]].mean() 51 | assert da.isel(number=[4, 3, 2, 5]).mean() == va[[4, 3, 2, 5]].mean() 52 | assert da.sel(number=[2, 3, 4, 5]).mean() == va[[2, 3, 4, 5]].mean() 53 | assert da.sel(number=[4, 3, 2, 5]).mean() == va[[4, 3, 2, 5]].mean() 54 | 55 | 56 | @pytest.mark.parametrize("cache", [True, False]) 57 | def test_getitem_latlon(cache: bool) -> None: 58 | da = xarray_store.open_dataset(TEST_DATA, cache=cache).data_vars["t"] 59 | va = da.values[:] 60 | 61 | assert da.isel(latitude=slice(0, 3), longitude=slice(0, 33)).mean() == va[..., :3, :33].mean() 62 | assert da.sel(latitude=slice(90, 0), longitude=slice(0, 90)).mean() == va[..., :31, :31].mean() 63 | -------------------------------------------------------------------------------- /tests/test_50_xarray_plugin.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import numpy as np 4 | import pytest 5 | 6 | xr = pytest.importorskip( 7 | "xarray", minversion="0.17.1.dev0", reason="required xarray>=0.18" 8 | ) # noqa 9 | 10 | SAMPLE_DATA_FOLDER = os.path.join(os.path.dirname(__file__), "sample-data") 11 | TEST_DATA = os.path.join(SAMPLE_DATA_FOLDER, "regular_ll_sfc.grib") 12 | TEST_DATA_MISSING_VALS = os.path.join(SAMPLE_DATA_FOLDER, "fields_with_missing_values.grib") 13 | TEST_DATA_MULTI_PARAMS = os.path.join(SAMPLE_DATA_FOLDER, "multi_param_on_multi_dims.grib") 14 | TEST_DATA_MULTI_LEVTYPES = os.path.join(SAMPLE_DATA_FOLDER, "soil-surface-level-mix.grib") 15 | 16 | 17 | def test_plugin() -> None: 18 | engines = xr.backends.list_engines() 19 | cfgrib_entrypoint = engines["cfgrib"] 20 | assert cfgrib_entrypoint.__module__ == "cfgrib.xarray_plugin" 21 | 22 | 23 | def test_xr_open_dataset_file() -> None: 24 | expected = { 25 | "latitude": 37, 26 | "longitude": 72, 27 | } 28 | 29 | ds = xr.open_dataset(TEST_DATA, engine="cfgrib") 30 | assert ds.dims == expected 31 | assert list(ds.data_vars) == ["skt"] 32 | 33 | 34 | def test_xr_open_dataset_file_filter_by_keys() -> None: 35 | ds = xr.open_dataset(TEST_DATA_MULTI_PARAMS, engine="cfgrib") 36 | 37 | assert "t" in ds.data_vars 38 | assert "z" in ds.data_vars 39 | assert "u" in ds.data_vars 40 | 41 | ds = xr.open_dataset( 42 | TEST_DATA_MULTI_PARAMS, engine="cfgrib", filter_by_keys={"shortName": "t"} 43 | ) 44 | 45 | assert "t" in ds.data_vars 46 | assert "z" not in ds.data_vars 47 | assert "u" not in ds.data_vars 48 | 49 | ds = xr.open_dataset( 50 | TEST_DATA_MULTI_PARAMS, engine="cfgrib", filter_by_keys={"shortName": ["t", "z"]} 51 | ) 52 | 53 | assert "t" in ds.data_vars 54 | assert "z" in ds.data_vars 55 | assert "u" not in ds.data_vars 56 | 57 | 58 | def test_xr_open_dataset_file_ignore_keys() -> None: 59 | ds = xr.open_dataset(TEST_DATA, engine="cfgrib") 60 | assert "GRIB_typeOfLevel" in ds["skt"].attrs 61 | ds = xr.open_dataset(TEST_DATA, engine="cfgrib", ignore_keys=["typeOfLevel"]) 62 | assert "GRIB_typeOfLevel" not in ds["skt"].attrs 63 | 64 | 65 | def test_xr_open_dataset_dict() -> None: 66 | fieldset = { 67 | -10: { 68 | "gridType": "regular_ll", 69 | "Nx": 2, 70 | "Ny": 3, 71 | "distinctLatitudes": [-10.0, 0.0, 10.0], 72 | "distinctLongitudes": [0.0, 10.0], 73 | "paramId": 167, 74 | "shortName": "2t", 75 | "values": [[1, 2], [3, 4], [5, 6]], 76 | } 77 | } 78 | 79 | ds = xr.open_dataset(fieldset, engine="cfgrib") 80 | 81 | assert ds.dims == {"latitude": 3, "longitude": 2} 82 | assert list(ds.data_vars) == ["2t"] 83 | 84 | 85 | def test_xr_open_dataset_dict_ignore_keys() -> None: 86 | fieldset = { 87 | -10: { 88 | "gridType": "regular_ll", 89 | "Nx": 2, 90 | "Ny": 3, 91 | "distinctLatitudes": [-10.0, 0.0, 10.0], 92 | "distinctLongitudes": [0.0, 10.0], 93 | "paramId": 167, 94 | "shortName": "2t", 95 | "typeOfLevel": "surface", 96 | "values": [[1, 2], [3, 4], [5, 6]], 97 | } 98 | } 99 | ds = xr.open_dataset(fieldset, engine="cfgrib") 100 | assert "GRIB_typeOfLevel" in ds["2t"].attrs 101 | ds = xr.open_dataset(fieldset, engine="cfgrib", ignore_keys=["typeOfLevel"]) 102 | assert "GRIB_typeOfLevel" not in ds["2t"].attrs 103 | 104 | 105 | def test_xr_open_dataset_list() -> None: 106 | fieldset = [ 107 | { 108 | "gridType": "regular_ll", 109 | "Nx": 2, 110 | "Ny": 3, 111 | "distinctLatitudes": [-10.0, 0.0, 10.0], 112 | "distinctLongitudes": [0.0, 10.0], 113 | "paramId": 167, 114 | "shortName": "2t", 115 | "values": [[1, 2], [3, 4], [5, 6]], 116 | } 117 | ] 118 | 119 | ds = xr.open_dataset(fieldset, engine="cfgrib") 120 | 121 | assert ds.dims == {"latitude": 3, "longitude": 2} 122 | assert list(ds.data_vars) == ["2t"] 123 | 124 | ds_empty = xr.open_dataset([], engine="cfgrib") 125 | 126 | assert ds_empty.equals(xr.Dataset()) 127 | 128 | 129 | def test_xr_open_dataset_list_ignore_keys() -> None: 130 | fieldset = [ 131 | { 132 | "gridType": "regular_ll", 133 | "Nx": 2, 134 | "Ny": 3, 135 | "distinctLatitudes": [-10.0, 0.0, 10.0], 136 | "distinctLongitudes": [0.0, 10.0], 137 | "paramId": 167, 138 | "shortName": "2t", 139 | "typeOfLevel": "surface", 140 | "values": [[1, 2], [3, 4], [5, 6]], 141 | } 142 | ] 143 | 144 | ds = xr.open_dataset(fieldset, engine="cfgrib") 145 | assert "GRIB_typeOfLevel" in ds["2t"].attrs 146 | ds = xr.open_dataset(fieldset, engine="cfgrib", ignore_keys=["typeOfLevel"]) 147 | assert "GRIB_typeOfLevel" not in ds["2t"].attrs 148 | 149 | 150 | def test_read() -> None: 151 | expected = { 152 | "latitude": 37, 153 | "longitude": 72, 154 | } 155 | import cfgrib.xarray_plugin 156 | 157 | opener = cfgrib.xarray_plugin.CfGribBackend() 158 | ds = opener.open_dataset(TEST_DATA) 159 | assert ds.dims == expected 160 | assert list(ds.data_vars) == ["skt"] 161 | 162 | 163 | def test_xr_open_dataset_file_missing_vals() -> None: 164 | ds = xr.open_dataset(TEST_DATA_MISSING_VALS, engine="cfgrib") 165 | t2 = ds["t2m"] 166 | assert np.isclose(np.nanmean(t2.values[0, :, :]), 268.375) 167 | assert np.isclose(np.nanmean(t2.values[1, :, :]), 270.716) 168 | 169 | 170 | def test_xr_open_dataset_coords_to_attributes() -> None: 171 | ds = xr.open_dataset( 172 | TEST_DATA_MULTI_LEVTYPES, engine="cfgrib", coords_as_attributes=["surface", "depthBelowLandLayer"] 173 | ) 174 | assert "surface" not in ds.coords 175 | assert "depthBelowLandLayer" not in ds.coords 176 | 177 | assert "GRIB_surface" in ds["t2m"].attrs 178 | assert "GRIB_depthBelowLandLayer" in ds["stl1"].attrs 179 | 180 | 181 | def test_xr_open_dataset_default_values_dtype() -> None: 182 | ds = xr.open_dataset(TEST_DATA_MISSING_VALS, engine="cfgrib") 183 | assert ds["t2m"].data.dtype == np.dtype("float32") 184 | assert ds["latitude"].data.dtype == np.dtype("float64") 185 | assert ds["longitude"].data.dtype == np.dtype("float64") 186 | 187 | 188 | def test_xr_open_dataset_float64_values_dtype() -> None: 189 | ds = xr.open_dataset(TEST_DATA_MISSING_VALS, engine="cfgrib", values_dtype=np.dtype("float64")) 190 | assert ds["t2m"].data.dtype == np.dtype("float64") 191 | assert ds["latitude"].data.dtype == np.dtype("float64") 192 | assert ds["longitude"].data.dtype == np.dtype("float64") 193 | -------------------------------------------------------------------------------- /tests/test_60_main_commands.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | 3 | import click.testing 4 | import py 5 | import pytest 6 | 7 | pytest.importorskip("scipy", reason="scpy not found") 8 | xr = pytest.importorskip("xarray") # noqa 9 | 10 | from cfgrib import __main__ 11 | 12 | SAMPLE_DATA_FOLDER = os.path.join(os.path.dirname(__file__), "sample-data") 13 | TEST_DATA = os.path.join(SAMPLE_DATA_FOLDER, "era5-levels-members.grib") 14 | 15 | 16 | def test_cfgrib_cli_to_netcdf(tmpdir: py.path.local) -> None: 17 | runner = click.testing.CliRunner() 18 | 19 | res = runner.invoke(__main__.cfgrib_cli, ["to_netcdf"]) 20 | 21 | assert res.exit_code == 0 22 | assert res.output == "" 23 | 24 | res = runner.invoke(__main__.cfgrib_cli, ["to_netcdf", TEST_DATA]) 25 | 26 | assert res.exit_code == 0 27 | assert res.output == "" 28 | 29 | out = tmpdir.join("tmp.nc") 30 | res = runner.invoke(__main__.cfgrib_cli, ["to_netcdf", TEST_DATA, "-o" + str(out), "-cCDS"]) 31 | 32 | assert res.exit_code == 0 33 | assert res.output == "" 34 | 35 | 36 | def test_cfgrib_cli_to_netcdf_backend_kwargs(tmpdir: py.path.local) -> None: 37 | runner = click.testing.CliRunner() 38 | 39 | backend_kwargs = '{"time_dims": ["time"]}' 40 | res = runner.invoke(__main__.cfgrib_cli, ["to_netcdf", TEST_DATA, "-b", backend_kwargs]) 41 | 42 | assert res.exit_code == 0 43 | assert res.output == "" 44 | 45 | backend_kwargs_json = tmpdir.join("temp.json") 46 | with open(backend_kwargs_json, "w") as f: 47 | f.write(backend_kwargs) 48 | res = runner.invoke( 49 | __main__.cfgrib_cli, ["to_netcdf", TEST_DATA, "-b", str(backend_kwargs_json)] 50 | ) 51 | 52 | assert res.exit_code == 0 53 | assert res.output == "" 54 | 55 | 56 | def test_cfgrib_cli_to_netcdf_netcdf_kwargs(tmpdir: py.path.local) -> None: 57 | runner = click.testing.CliRunner() 58 | 59 | netcdf_kwargs = '{"engine": "scipy"}' 60 | res = runner.invoke(__main__.cfgrib_cli, ["to_netcdf", TEST_DATA, "-n", netcdf_kwargs]) 61 | 62 | assert res.exit_code == 0 63 | assert res.output == "" 64 | 65 | netcdf_kwargs_json = tmpdir.join("temp.json") 66 | with open(netcdf_kwargs_json, "w") as f: 67 | f.write(netcdf_kwargs) 68 | res = runner.invoke( 69 | __main__.cfgrib_cli, ["to_netcdf", TEST_DATA, "-n", str(netcdf_kwargs_json)] 70 | ) 71 | 72 | assert res.exit_code == 0 73 | assert res.output == "" 74 | 75 | 76 | def test_cfgrib_cli_to_netcdf_var_encoding(tmpdir: py.path.local) -> None: 77 | runner = click.testing.CliRunner() 78 | 79 | var_encoding = '{"dtype": "float", "scale_factor": 0.1}' 80 | res = runner.invoke(__main__.cfgrib_cli, ["to_netcdf", TEST_DATA, "-v", var_encoding]) 81 | 82 | assert res.exit_code == 0 83 | assert res.output == "" 84 | 85 | var_encoding_json = tmpdir.join("temp.json") 86 | with open(var_encoding_json, "w") as f: 87 | f.write(var_encoding) 88 | res = runner.invoke( 89 | __main__.cfgrib_cli, ["to_netcdf", TEST_DATA, "-v", str(var_encoding_json)] 90 | ) 91 | 92 | assert res.exit_code == 0 93 | assert res.output == "" 94 | 95 | 96 | def test_cfgrib_cli_dump() -> None: 97 | runner = click.testing.CliRunner() 98 | 99 | res = runner.invoke(__main__.cfgrib_cli, ["dump"]) 100 | 101 | assert res.exit_code == 0 102 | assert res.output == "" 103 | 104 | res = runner.invoke(__main__.cfgrib_cli, ["dump", TEST_DATA]) 105 | 106 | assert res.exit_code == 0 107 | assert "