├── .github
└── workflows
│ ├── check_black.yml
│ ├── python-publish.yml
│ └── test_pysteps.yml
├── .gitignore
├── .pre-commit-config.yaml
├── .readthedocs.yml
├── CITATION.bib
├── CONTRIBUTING.rst
├── LICENSE
├── MANIFEST.in
├── PKG-INFO
├── README.rst
├── ci
├── ci_test_env.yml
├── fetch_pysteps_data.py
└── test_plugin_support.py
├── doc
├── .gitignore
├── Makefile
├── _static
│ ├── pysteps.css
│ └── pysteps_logo.png
├── _templates
│ └── layout.html
├── make.bat
├── rebuild_docs.sh
├── requirements.txt
└── source
│ ├── conf.py
│ ├── developer_guide
│ ├── build_the_docs.rst
│ ├── contributors_guidelines.rst
│ ├── importer_plugins.rst
│ ├── pypi.rst
│ ├── test_pysteps.rst
│ └── update_conda_forge.rst
│ ├── index.rst
│ ├── pysteps_reference
│ ├── blending.rst
│ ├── cascade.rst
│ ├── datasets.rst
│ ├── decorators.rst
│ ├── downscaling.rst
│ ├── extrapolation.rst
│ ├── feature.rst
│ ├── index.rst
│ ├── io.rst
│ ├── motion.rst
│ ├── noise.rst
│ ├── nowcasts.rst
│ ├── postprocessing.rst
│ ├── pysteps.rst
│ ├── timeseries.rst
│ ├── tracking.rst
│ ├── utils.rst
│ ├── verification.rst
│ └── visualization.rst
│ ├── references.bib
│ ├── user_guide
│ ├── example_data.rst
│ ├── install_pysteps.rst
│ ├── machine_learning_pysteps.rst
│ ├── pystepsrc_example.rst
│ └── set_pystepsrc.rst
│ └── zz_bibliography.rst
├── environment.yml
├── environment_dev.yml
├── examples
├── LK_buffer_mask.py
├── README.txt
├── advection_correction.py
├── anvil_nowcast.py
├── blended_forecast.py
├── data_transformations.py
├── linda_nowcasts.py
├── my_first_nowcast.ipynb
├── optical_flow_methods_convergence.py
├── plot_cascade_decomposition.py
├── plot_custom_precipitation_range.py
├── plot_ensemble_verification.py
├── plot_extrapolation_nowcast.py
├── plot_linear_blending.py
├── plot_noise_generators.py
├── plot_optical_flow.py
├── plot_steps_nowcast.py
├── probability_forecast.py
├── rainfarm_downscale.py
└── thunderstorm_detection_and_tracking.py
├── pyproject.toml
├── pysteps
├── __init__.py
├── blending
│ ├── __init__.py
│ ├── clim.py
│ ├── interface.py
│ ├── linear_blending.py
│ ├── skill_scores.py
│ ├── steps.py
│ └── utils.py
├── cascade
│ ├── __init__.py
│ ├── bandpass_filters.py
│ ├── decomposition.py
│ └── interface.py
├── datasets.py
├── decorators.py
├── downscaling
│ ├── __init__.py
│ ├── interface.py
│ └── rainfarm.py
├── exceptions.py
├── extrapolation
│ ├── __init__.py
│ ├── interface.py
│ └── semilagrangian.py
├── feature
│ ├── __init__.py
│ ├── blob.py
│ ├── interface.py
│ ├── shitomasi.py
│ └── tstorm.py
├── io
│ ├── __init__.py
│ ├── archive.py
│ ├── exporters.py
│ ├── importers.py
│ ├── interface.py
│ ├── mch_lut_8bit_Metranet_AZC_V104.txt
│ ├── mch_lut_8bit_Metranet_v103.txt
│ ├── nowcast_importers.py
│ └── readers.py
├── motion
│ ├── __init__.py
│ ├── _proesmans.pyx
│ ├── _vet.pyx
│ ├── constant.py
│ ├── darts.py
│ ├── interface.py
│ ├── lucaskanade.py
│ ├── proesmans.py
│ └── vet.py
├── noise
│ ├── __init__.py
│ ├── fftgenerators.py
│ ├── interface.py
│ ├── motion.py
│ └── utils.py
├── nowcasts
│ ├── __init__.py
│ ├── anvil.py
│ ├── extrapolation.py
│ ├── interface.py
│ ├── lagrangian_probability.py
│ ├── linda.py
│ ├── sprog.py
│ ├── sseps.py
│ ├── steps.py
│ └── utils.py
├── postprocessing
│ ├── __init__.py
│ ├── diagnostics.py
│ ├── ensemblestats.py
│ ├── interface.py
│ └── probmatching.py
├── pystepsrc
├── pystepsrc_schema.json
├── scripts
│ ├── __init__.py
│ ├── fit_vel_pert_params.py
│ └── run_vel_pert_analysis.py
├── tests
│ ├── __init__.py
│ ├── helpers.py
│ ├── test_archive.py
│ ├── test_blending_clim.py
│ ├── test_blending_linear_blending.py
│ ├── test_blending_skill_scores.py
│ ├── test_blending_steps.py
│ ├── test_blending_utils.py
│ ├── test_cascade.py
│ ├── test_datasets.py
│ ├── test_decorators.py
│ ├── test_downscaling_rainfarm.py
│ ├── test_ensscores.py
│ ├── test_exporters.py
│ ├── test_extrapolation_semilagrangian.py
│ ├── test_feature.py
│ ├── test_feature_tstorm.py
│ ├── test_importer_decorator.py
│ ├── test_interfaces.py
│ ├── test_io_archive.py
│ ├── test_io_bom_rf3.py
│ ├── test_io_fmi_geotiff.py
│ ├── test_io_fmi_pgm.py
│ ├── test_io_knmi_hdf5.py
│ ├── test_io_mch_gif.py
│ ├── test_io_mrms_grib.py
│ ├── test_io_nowcast_importers.py
│ ├── test_io_opera_hdf5.py
│ ├── test_io_readers.py
│ ├── test_io_saf_crri.py
│ ├── test_motion.py
│ ├── test_motion_lk.py
│ ├── test_noise_fftgenerators.py
│ ├── test_noise_motion.py
│ ├── test_nowcasts_anvil.py
│ ├── test_nowcasts_lagrangian_probability.py
│ ├── test_nowcasts_linda.py
│ ├── test_nowcasts_sprog.py
│ ├── test_nowcasts_sseps.py
│ ├── test_nowcasts_steps.py
│ ├── test_nowcasts_utils.py
│ ├── test_paramsrc.py
│ ├── test_plt_animate.py
│ ├── test_plt_cartopy.py
│ ├── test_plt_motionfields.py
│ ├── test_plt_precipfields.py
│ ├── test_plugins_support.py
│ ├── test_postprocessing_ensemblestats.py
│ ├── test_postprocessing_probmatching.py
│ ├── test_timeseries_autoregression.py
│ ├── test_tracking_tdating.py
│ ├── test_utils_arrays.py
│ ├── test_utils_cleansing.py
│ ├── test_utils_conversion.py
│ ├── test_utils_dimension.py
│ ├── test_utils_interpolate.py
│ ├── test_utils_reprojection.py
│ ├── test_utils_spectral.py
│ ├── test_utils_transformation.py
│ ├── test_verification_detcatscores.py
│ ├── test_verification_detcontscores.py
│ ├── test_verification_probscores.py
│ ├── test_verification_salscores.py
│ └── test_verification_spatialscores.py
├── timeseries
│ ├── __init__.py
│ ├── autoregression.py
│ └── correlation.py
├── tracking
│ ├── __init__.py
│ ├── interface.py
│ ├── lucaskanade.py
│ └── tdating.py
├── utils
│ ├── __init__.py
│ ├── arrays.py
│ ├── check_norain.py
│ ├── cleansing.py
│ ├── conversion.py
│ ├── dimension.py
│ ├── fft.py
│ ├── images.py
│ ├── interface.py
│ ├── interpolate.py
│ ├── reprojection.py
│ ├── spectral.py
│ ├── tapering.py
│ └── transformation.py
├── verification
│ ├── __init__.py
│ ├── detcatscores.py
│ ├── detcontscores.py
│ ├── ensscores.py
│ ├── interface.py
│ ├── lifetime.py
│ ├── plots.py
│ ├── probscores.py
│ ├── salscores.py
│ └── spatialscores.py
└── visualization
│ ├── __init__.py
│ ├── animations.py
│ ├── basemaps.py
│ ├── motionfields.py
│ ├── precipfields.py
│ ├── spectral.py
│ ├── thunderstorms.py
│ └── utils.py
├── requirements.txt
├── requirements_dev.txt
├── setup.py
└── tox.ini
/.github/workflows/check_black.yml:
--------------------------------------------------------------------------------
1 | # This workflow will test the code base using the LATEST version of black
2 |
3 | # IMPORTANT: Black is under development. Hence, minor fommatting changes between
4 | # different version are expected.
5 | # If this test fails, install the latest version of black and then run black.
6 | # Preferably, run black only on the files that you have modified.
7 | # This will faciliate the revision of the proposed changes.
8 |
9 | name: Check Black
10 |
11 | on:
12 | # Triggers the workflow on push or pull request events but only for the master branch
13 | push:
14 | branches: [ master ]
15 | pull_request:
16 | branches: [ master ]
17 |
18 | jobs:
19 | build:
20 |
21 | runs-on: ubuntu-latest
22 |
23 | steps:
24 | - uses: actions/checkout@v4
25 | - name: Set up Python 3.10
26 | uses: actions/setup-python@v5
27 | with:
28 | python-version: "3.10"
29 | - name: Install dependencies
30 | run: |
31 | python -m pip install --upgrade pip
32 | pip install black
33 |
34 | - name: Black version
35 | run: black --version
36 |
37 | - name: Black check
38 | working-directory: ${{github.workspace}}
39 | run: black --check .
40 |
--------------------------------------------------------------------------------
/.github/workflows/python-publish.yml:
--------------------------------------------------------------------------------
1 | # This workflows will upload a Python Package using Twine when a release is created
2 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
3 |
4 | name: Upload Python Package
5 |
6 | on:
7 | release:
8 | types: [published]
9 |
10 | jobs:
11 | deploy:
12 |
13 | runs-on: ubuntu-latest
14 | permissions:
15 | id-token: write
16 |
17 | steps:
18 | - uses: actions/checkout@v4
19 | - name: Set up Python
20 | uses: actions/setup-python@v5
21 | with:
22 | python-version: '3.x'
23 | - name: Install dependencies
24 | run: |
25 | python -m pip install --upgrade pip
26 | pip install setuptools wheel numpy cython
27 | - name: Build
28 | run: |
29 | python setup.py sdist
30 | - name: Publish
31 | uses: pypa/gh-action-pypi-publish@release/v1
32 |
--------------------------------------------------------------------------------
/.github/workflows/test_pysteps.yml:
--------------------------------------------------------------------------------
1 | name: Test pysteps
2 |
3 | on:
4 | # Triggers the workflow on push or pull request events to the master branch
5 | push:
6 | branches:
7 | - master
8 | - pysteps-v2
9 | pull_request:
10 | branches:
11 | - master
12 | - pysteps-v2
13 |
14 | jobs:
15 | unit_tests:
16 | name: Unit Tests (${{ matrix.python-version }}, ${{ matrix.os }})
17 | runs-on: ${{ matrix.os }}
18 |
19 | strategy:
20 | fail-fast: false
21 | matrix:
22 | os: [ "ubuntu-latest", "macos-latest", "windows-latest" ]
23 | python-version: ["3.10", "3.12"]
24 | max-parallel: 6
25 |
26 | defaults:
27 | run:
28 | shell: bash -l {0}
29 |
30 | steps:
31 | - uses: actions/checkout@v4
32 | - uses: actions/setup-python@v5
33 | with:
34 | python-version: ${{ matrix.python-version }}
35 |
36 | # need headless opencv on Linux, see https://github.com/conda-forge/opencv-feedstock/issues/401
37 | - name: Install mamba and create environment for Linux
38 | if: matrix.os == 'ubuntu-latest'
39 | uses: mamba-org/setup-micromamba@v1
40 | with:
41 | # https://github.com/mamba-org/setup-micromamba/issues/225
42 | micromamba-version: 1.5.10-0
43 | environment-file: ci/ci_test_env.yml
44 | environment-name: test_environment
45 | generate-run-shell: false
46 | create-args: >-
47 | python=${{ matrix.python-version }}
48 | libopencv=*=headless*
49 |
50 | - name: Install mamba and create environment (not Linux)
51 | if: matrix.os != 'ubuntu-latest'
52 | uses: mamba-org/setup-micromamba@v1
53 | with:
54 | # https://github.com/mamba-org/setup-micromamba/issues/225
55 | micromamba-version: 1.5.10-0
56 | environment-file: ci/ci_test_env.yml
57 | environment-name: test_environment
58 | generate-run-shell: false
59 | create-args: python=${{ matrix.python-version }}
60 |
61 | - name: Install pygrib (not win)
62 | if: matrix.os != 'windows-latest'
63 | run: mamba install --quiet pygrib
64 |
65 | - name: Install pysteps for MacOS
66 | if: matrix.os == 'macos-latest'
67 | working-directory: ${{github.workspace}}
68 | env:
69 | CC: gcc-13
70 | CXX: g++-13
71 | CXX1X: g++-13
72 | HOMEBREW_NO_INSTALL_CLEANUP: 1
73 | run: |
74 | brew update-reset
75 | brew update
76 | gcc-13 --version || brew install gcc@13
77 | pip install .
78 |
79 | - name: Install pysteps
80 | if: matrix.os != 'macos-latest'
81 | working-directory: ${{github.workspace}}
82 | run: pip install .
83 |
84 | - name: Download pysteps data
85 | env:
86 | PYSTEPS_DATA_PATH: ${{github.workspace}}/pysteps_data
87 | working-directory: ${{github.workspace}}/ci
88 | run: python fetch_pysteps_data.py
89 |
90 | - name: Check imports
91 | working-directory: ${{github.workspace}}/pysteps_data
92 | run: |
93 | python --version
94 | python -c "import pysteps; print(pysteps.__file__)"
95 | python -c "from pysteps import motion"
96 | python -c "from pysteps.motion import vet"
97 | python -c "from pysteps.motion import proesmans"
98 |
99 | - name: Run tests and coverage report
100 | working-directory: ${{github.workspace}}/pysteps_data
101 | env:
102 | PYSTEPSRC: ${{github.workspace}}/pysteps_data/pystepsrc
103 | run: pytest --pyargs pysteps --cov=pysteps --cov-report=xml --cov-report=term -ra
104 |
105 | - name: Upload coverage to Codecov (Linux only)
106 | if: matrix.os == 'ubuntu-latest'
107 | uses: codecov/codecov-action@v4
108 | env:
109 | OS: ${{ matrix.os }}
110 | PYTHON: ${{ matrix.python-version }}
111 | with:
112 | token: ${{ secrets.CODECOV_TOKEN }}
113 | files: ${{github.workspace}}/pysteps_data/coverage.xml
114 | flags: unit_tests
115 | env_vars: OS,PYTHON
116 | fail_ci_if_error: true
117 | verbose: true
118 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 | *.c
9 |
10 | # Distribution / packaging
11 | .Python
12 | .tox
13 | build/
14 | develop-eggs/
15 | dist/
16 | downloads/
17 | eggs/
18 | .eggs/
19 | lib/
20 | lib64/
21 | parts/
22 | sdist/
23 | var/
24 | wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | .hypothesis/
50 | .pytest_cache/
51 |
52 | # Translations
53 | *.mo
54 | *.pot
55 |
56 | # Sphinx documentation
57 | docs/_build/
58 | doc/_build/
59 |
60 | # Jupyter Notebook
61 | .ipynb_checkpoints
62 |
63 | # pyenv
64 | .python-version
65 |
66 |
67 | # Environments
68 | .env
69 | .venv
70 | env/
71 | venv/
72 | ENV/
73 | env.bak/
74 | venv.bak/
75 |
76 | # Pycharm
77 | .idea
78 |
79 | # Spyder project settings
80 | .spyderproject
81 | .spyproject
82 |
83 | # VSCode
84 | .vscode
85 |
86 | # Rope project settings
87 | .ropeproject
88 |
89 | # mypy
90 | .mypy_cache/
91 |
92 | # Mac OS Stuff
93 | .DS_Store
94 |
95 | # Running local tests
96 | /tmp
97 | /pysteps/tests/tmp/
98 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/psf/black
3 | rev: 25.1.0
4 | hooks:
5 | - id: black
6 | language_version: python3
7 |
--------------------------------------------------------------------------------
/.readthedocs.yml:
--------------------------------------------------------------------------------
1 | # Read the Docs configuration file
2 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
3 |
4 | version: 2
5 |
6 | # the build.os and build.tools section is mandatory
7 | build:
8 | os: "ubuntu-22.04"
9 | tools:
10 | python: "3.10"
11 |
12 | sphinx:
13 | configuration: doc/source/conf.py
14 |
15 | formats:
16 | - htmlzip
17 |
18 | python:
19 | install:
20 | - requirements: requirements.txt
21 | - requirements: doc/requirements.txt
22 | - method: pip
23 | path: .
24 |
--------------------------------------------------------------------------------
/CITATION.bib:
--------------------------------------------------------------------------------
1 | @Article{gmd-12-4185-2019,
2 | AUTHOR = {Pulkkinen, S. and Nerini, D. and P\'erez Hortal, A. A. and Velasco-Forero, C. and Seed, A. and Germann, U. and Foresti, L.},
3 | TITLE = {Pysteps: an open-source Python library for probabilistic precipitation nowcasting (v1.0)},
4 | JOURNAL = {Geoscientific Model Development},
5 | VOLUME = {12},
6 | YEAR = {2019},
7 | NUMBER = {10},
8 | PAGES = {4185--4219},
9 | URL = {https://gmd.copernicus.org/articles/12/4185/2019/},
10 | DOI = {10.5194/gmd-12-4185-2019}
11 | }
12 | @article{qj.4461,
13 | AUTHOR = {Imhoff, Ruben O. and De Cruz, Lesley and Dewettinck, Wout and Brauer, Claudia C. and Uijlenhoet, Remko and van Heeringen, Klaas-Jan and Velasco-Forero, Carlos and Nerini, Daniele and Van Ginderachter, Michiel and Weerts, Albrecht H.},
14 | TITLE = {Scale-dependent blending of ensemble rainfall nowcasts and NWP in the open-source pysteps library},
15 | JOURNAL = {Quarterly Journal of the Royal Meteorological Society},
16 | VOLUME = {n/a},
17 | NUMBER = {n/a},
18 | YEAR = {2023},
19 | PAGES ={1--30},
20 | DOI = {https://doi.org/10.1002/qj.4461},
21 | URL = {https://rmets.onlinelibrary.wiley.com/doi/abs/10.1002/qj.4461},
22 | }
23 |
24 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2019, PySteps developers
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | * Redistributions of source code must retain the above copyright notice, this
10 | list of conditions and the following disclaimer.
11 |
12 | * Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 |
16 | * Neither the name of the copyright holder nor the names of its
17 | contributors may be used to endorse or promote products derived from
18 | this software without specific prior written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include LICENSE *.txt *.rst
2 | include pysteps/pystepsrc
3 | include pysteps/pystepsrc_schema.json
4 | include pysteps/io/mch_lut_8bit_Metranet_AZC_V104.txt
5 | include pysteps/io/mch_lut_8bit_Metranet_v103.txt
6 | recursive-include pysteps *.pyx
7 | include pyproject.toml
8 |
9 |
--------------------------------------------------------------------------------
/PKG-INFO:
--------------------------------------------------------------------------------
1 | Metadata-Version: 1.2
2 | Name: pysteps
3 | Version: 1.17.0
4 | Summary: Python framework for short-term ensemble prediction systems
5 | Home-page: http://pypi.python.org/pypi/pysteps/
6 | License: LICENSE
7 | Description: =======
8 | pySteps
9 | =======
10 | The pysteps initiative is a community that develops and maintains an easy to
11 | use, modular, free and open-source python framework for short-term ensemble
12 | prediction systems.
13 |
14 | The focus is on probabilistic nowcasting of radar precipitation fields,
15 | but pysteps is designed to allow a wider range of uses.
16 |
17 | Platform: UNKNOWN
18 |
--------------------------------------------------------------------------------
/ci/ci_test_env.yml:
--------------------------------------------------------------------------------
1 | # pysteps development environment
2 | name: test_environment
3 | channels:
4 | - conda-forge
5 | - defaults
6 | dependencies:
7 | - python>=3.10
8 | - pip
9 | - mamba
10 | # Minimal dependencies
11 | - numpy
12 | - cython
13 | - jsmin
14 | - jsonschema
15 | - matplotlib
16 | - netCDF4
17 | - opencv
18 | - pillow
19 | - pyproj
20 | - scipy
21 | # Optional dependencies
22 | - dask
23 | - pyfftw
24 | - cartopy
25 | - h5py
26 | - PyWavelets
27 | - pandas
28 | - scikit-image
29 | - rasterio
30 | - gdal
31 | # Test dependencies
32 | - pytest
33 | - pytest-cov
34 | - pip:
35 | - cookiecutter
36 |
--------------------------------------------------------------------------------
/ci/fetch_pysteps_data.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Script used to install the pysteps data in a test environment and set a pystepsrc
4 | configuration file that points to that data.
5 |
6 | The test data is downloaded in the `PYSTEPS_DATA_PATH` environmental variable.
7 |
8 | After this script is run, the `PYSTEPSRC` environmental variable should be set to
9 | PYSTEPSRC=$PYSTEPS_DATA_PATH/pystepsrc for pysteps to use that configuration file.
10 | """
11 |
12 | import os
13 |
14 | from pysteps.datasets import create_default_pystepsrc, download_pysteps_data
15 |
16 | tox_test_data_dir = os.environ["PYSTEPS_DATA_PATH"]
17 |
18 | download_pysteps_data(tox_test_data_dir, force=True)
19 |
20 | create_default_pystepsrc(
21 | tox_test_data_dir, config_dir=tox_test_data_dir, file_name="pystepsrc"
22 | )
23 |
--------------------------------------------------------------------------------
/ci/test_plugin_support.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """
4 | Script to test the plugin support.
5 |
6 | This script assumes that a package created with the default pysteps plugin template
7 | (and using the default values) is installed.
8 |
9 | https://github.com/pySTEPS/cookiecutter-pysteps-plugin
10 |
11 | """
12 |
13 | from pysteps import io
14 |
15 | print("Testing plugin support: ", end="")
16 | assert hasattr(io.importers, "import_institution_name")
17 |
18 | assert "institution_name" in io.interface._importer_methods
19 |
20 | from pysteps.io.importers import import_institution_name
21 |
22 | import_institution_name("filename")
23 | print("PASSED")
24 |
--------------------------------------------------------------------------------
/doc/.gitignore:
--------------------------------------------------------------------------------
1 | _build/
2 | generated
3 | auto_examples
--------------------------------------------------------------------------------
/doc/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | SPHINXPROJ = pysteps
8 | SOURCEDIR = source
9 | BUILDDIR = _build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/doc/_static/pysteps.css:
--------------------------------------------------------------------------------
1 |
2 | .section h1 {
3 | border-bottom: 2px solid #0099ff;
4 | display: inline-block;
5 | }
6 |
7 | .section h2 {
8 | border-bottom: 2px solid #ccebff;
9 | display: inline-block;
10 | }
11 |
12 | /* override table width restrictions */
13 | @media screen and (min-width: 767px) {
14 |
15 | .wy-table-responsive table td {
16 | /* !important prevents the common CSS stylesheets from overriding
17 | this as on RTD they are loaded after this stylesheet */
18 | white-space: normal !important;
19 | }
20 |
21 | .wy-table-responsive {
22 | overflow: visible !important;
23 | }
24 | }
--------------------------------------------------------------------------------
/doc/_static/pysteps_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pySTEPS/pysteps/abd1f52ce9f042a3e508fbf9c74eb5b172f79552/doc/_static/pysteps_logo.png
--------------------------------------------------------------------------------
/doc/_templates/layout.html:
--------------------------------------------------------------------------------
1 | {% extends "!layout.html" %}
2 | {% set css_files = css_files + ["_static/pysteps.css"] %}
--------------------------------------------------------------------------------
/doc/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=.
11 | set BUILDDIR=_build
12 | set SPHINXPROJ=pysteps
13 |
14 | if "%1" == "" goto help
15 |
16 | %SPHINXBUILD% >NUL 2>NUL
17 | if errorlevel 9009 (
18 | echo.
19 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
20 | echo.installed, then set the SPHINXBUILD environment variable to point
21 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
22 | echo.may add the Sphinx directory to PATH.
23 | echo.
24 | echo.If you don't have Sphinx installed, grab it from
25 | echo.http://sphinx-doc.org/
26 | exit /b 1
27 | )
28 |
29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
30 | goto end
31 |
32 | :help
33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
34 |
35 | :end
36 | popd
37 |
--------------------------------------------------------------------------------
/doc/rebuild_docs.sh:
--------------------------------------------------------------------------------
1 | # Build documentation from scratch.
2 |
3 | rm -r source/generated &> /dev/null
4 | rm -r source/auto_examples &> /dev/null
5 |
6 | make clean
7 |
8 | make html
9 |
--------------------------------------------------------------------------------
/doc/requirements.txt:
--------------------------------------------------------------------------------
1 | # Additional requirements related to the documentation build only
2 | sphinx
3 | sphinxcontrib.bibtex
4 | sphinx-book-theme
5 | sphinx_gallery
6 | scikit-image
7 | pandas
8 | git+https://github.com/pySTEPS/pysteps-nwp-importers.git@main#egg=pysteps_nwp_importers
9 | pygrib
--------------------------------------------------------------------------------
/doc/source/developer_guide/build_the_docs.rst:
--------------------------------------------------------------------------------
1 | .. _build_the_docs:
2 |
3 | =================
4 | Building the docs
5 | =================
6 |
7 | The pysteps documentations is build using
8 | `Sphinx `_,
9 | a tool that makes it easy to create intelligent and beautiful documentation
10 |
11 | The documentation is located in the **doc** folder in the pysteps repo.
12 |
13 | Automatic build
14 | ---------------
15 |
16 | The simplest way to build the documentation is using tox and the tox-conda
17 | plugin (conda needed).
18 | To install these packages activate your conda development environment and run::
19 |
20 | conda install -c conda-forge tox tox-conda
21 |
22 | Then, to build the documentation, from the repo's root run::
23 |
24 | tox -e docs
25 |
26 | This will create a conda environment will all the necessary dependencies and the
27 | data needed to create the examples.
28 |
29 |
30 | Manual build
31 | ------------
32 | To build the docs you need to need to satisfy a few more dependencies
33 | related to Sphinx that are specified in the doc/requirements.txt file:
34 |
35 | - sphinx
36 | - numpydoc
37 | - sphinxcontrib.bibtex
38 | - sphinx-book-theme
39 | - sphinx_gallery
40 |
41 | You can install these packages running `pip install -r doc/requirements.txt`.
42 |
43 | In addition to this requirements, to build the example gallery in the
44 | documentation the example pysteps-data is needed. To download and install this
45 | data see the installation instructions in the :ref:`example_data` section.
46 |
47 | Once these requirements are met, to build the documentation, in the **doc**
48 | folder run::
49 |
50 | make html
51 |
52 | This will build the documentation along with the example gallery.
53 |
54 | The build documentation (html web page) will be available in
55 | **doc/_build/html/**.
56 | To correctly visualize the documentation, you need to set up and run a local
57 | HTTP server. To do that, in the **doc/_build/html/** directory run::
58 |
59 | python -m http.server
60 |
61 | This will set up a local HTTP server on 0.0.0.0 port 8000.
62 | To see the built documentation open the following url in the browser:
63 | http://0.0.0.0:8000/
64 |
--------------------------------------------------------------------------------
/doc/source/developer_guide/contributors_guidelines.rst:
--------------------------------------------------------------------------------
1 | .. _contributor_guidelines:
2 |
3 | .. include:: ../../../CONTRIBUTING.rst
--------------------------------------------------------------------------------
/doc/source/developer_guide/importer_plugins.rst:
--------------------------------------------------------------------------------
1 | .. _importer-plugins:
2 |
3 | ===========================
4 | Create your importer plugin
5 | ===========================
6 |
7 | Since version 1.4, pysteps allows the users to add new importers by installing external
8 | packages, called plugins, without modifying the pysteps installation. These plugins need
9 | to follow a particular structure to allow pysteps to discover and integrate the new
10 | importers to the pysteps interface without any user intervention.
11 |
12 | .. contents:: Table of Contents
13 | :local:
14 | :depth: 3
15 |
16 | How do the plugins work?
17 | ========================
18 |
19 | When the plugin is installed, it advertises the new importers to other packages (in our
20 | case, pysteps) using the python `entry points specification`_.
21 | These new importers are automatically discovered every time that the pysteps library is
22 | imported. The discovered importers are added as attributes to the io.importers module
23 | and also registered to the io.get_method interface without any user intervention.
24 | In addition, since the installation of the plugins does not modify the actual pysteps
25 | installation (i.e., the pysteps sources), the pysteps library can be updated without
26 | reinstalling the plugin.
27 |
28 | .. _`entry points specification`: https://packaging.python.org/specifications/entry-points/
29 |
30 |
31 | Create your plugin
32 | ==================
33 |
34 | There are two ways of creating a plugin. The first one is building the importers plugin
35 | from scratch. However, this can be a daunting task if you are creating your first plugin.
36 | To facilitate the creating of new plugins, we provide a `Cookiecutter`_ template, in a
37 | separate project, that creates a template project to be used as a starting point to build
38 | the plugin.
39 |
40 | The template for the pysteps plugins is maintained as a separate project at
41 | `cookiecutter-pysteps-plugin `_.
42 | For detailed instruction on how to create a plugin, `check the template's documentation`_.
43 |
44 | .. _`check the template's documentation`: https://cookiecutter-pysteps-plugin.readthedocs.io/en/latest
45 |
46 | .. _Cookiecutter: https://cookiecutter.readthedocs.io
47 |
--------------------------------------------------------------------------------
/doc/source/developer_guide/test_pysteps.rst:
--------------------------------------------------------------------------------
1 | .. _testing_pysteps:
2 |
3 | ===============
4 | Testing pysteps
5 | ===============
6 |
7 | The pysteps distribution includes a small test suite for some of the
8 | modules. To run the tests the `pytest `__
9 | package is needed. To install it, in a terminal run::
10 |
11 | pip install pytest
12 |
13 |
14 | Automatic testing
15 | =================
16 |
17 | The simplest way to run the pysteps' test suite is using tox and the tox-conda
18 | plugin (conda needed).
19 | To install these packages activate your conda development environment and run::
20 |
21 | conda install -c conda-forge tox tox-conda
22 |
23 | Then, to run the tests, from the repo's root run::
24 |
25 | tox # Run pytests
26 | tox -e install # Test package installation
27 | tox -e black # Test for black formatting warnings
28 |
29 |
30 | Manual testing
31 | ==============
32 |
33 |
34 | Example data
35 | ------------
36 |
37 | The build-in tests require the pysteps example data installed.
38 | See the installation instructions in the :ref:`example_data` section.
39 |
40 | Test an installed package
41 | -------------------------
42 |
43 | After the package is installed, you can launch the test suite from any
44 | directory by running::
45 |
46 | pytest --pyargs pysteps
47 |
48 | Test from sources
49 | -----------------
50 |
51 | Before testing the package directly from the sources, we need to build
52 | the extensions in-place. To do that, from the root pysteps folder run::
53 |
54 | python setup.py build_ext -i
55 |
56 | Now, the package sources can be tested in-place using the **pytest**
57 | command on the root of the pysteps source directory. E.g.::
58 |
59 | pytest -v --tb=line
60 |
61 |
--------------------------------------------------------------------------------
/doc/source/index.rst:
--------------------------------------------------------------------------------
1 | pysteps -- The nowcasting initiative
2 | ====================================
3 |
4 | Pysteps is a community-driven initiative for developing and maintaining an easy
5 | to use, modular, free and open source Python framework for short-term ensemble
6 | prediction systems.
7 |
8 | The focus is on probabilistic nowcasting of radar precipitation fields,
9 | but pysteps is designed to allow a wider range of uses.
10 |
11 | Pysteps is actively developed on GitHub__, while a more thorough description
12 | of pysteps is available in the pysteps reference publications:
13 |
14 | .. note::
15 | Pulkkinen, S., D. Nerini, A. Perez Hortal, C. Velasco-Forero, U. Germann,
16 | A. Seed, and L. Foresti, 2019: Pysteps: an open-source Python library for
17 | probabilistic precipitation nowcasting (v1.0). *Geosci. Model Dev.*, **12 (10)**,
18 | 4185–4219, doi:`10.5194/gmd-12-4185-2019 `_.
19 |
20 | Imhoff, R.O., L. De Cruz, W. Dewettinck, C.C. Brauer, R. Uijlenhoet, K-J. van Heeringen,
21 | C. Velasco-Forero, D. Nerini, M. Van Ginderachter, and A.H. Weerts, 2023:
22 | Scale-dependent blending of ensemble rainfall nowcasts and NWP in the open-source
23 | pysteps library. *Q J R Meteorol Soc.*, 1-30,
24 | doi: `doi:10.1002/qj.4461 `_.
25 |
26 | __ https://github.com/pySTEPS/pysteps
27 |
28 | .. toctree::
29 | :maxdepth: 1
30 | :hidden:
31 | :caption: For users
32 |
33 | Installation
34 | Gallery <../auto_examples/index>
35 | My first nowcast (Colab Notebook)
36 | API Reference
37 | Example data
38 | Configuration file (pystepsrc)
39 | Machine learning applications
40 | Bibliography
41 |
42 | .. toctree::
43 | :maxdepth: 1
44 | :hidden:
45 | :caption: For developers
46 |
47 | Contributing Guide
48 | Importer plugins
49 | Testing
50 | Building the docs
51 | Packaging
52 | Publishing to conda-forge
53 | GitHub repository
54 |
--------------------------------------------------------------------------------
/doc/source/pysteps_reference/blending.rst:
--------------------------------------------------------------------------------
1 | ================
2 | pysteps.blending
3 | ================
4 |
5 | Implementation of blending methods for blending (ensemble) nowcasts with Numerical Weather Prediction (NWP) models.
6 |
7 | .. automodule:: pysteps.blending.interface
8 | .. automodule:: pysteps.blending.clim
9 | .. automodule:: pysteps.blending.linear_blending
10 | .. automodule:: pysteps.blending.skill_scores
11 | .. automodule:: pysteps.blending.steps
12 | .. automodule:: pysteps.blending.utils
13 |
--------------------------------------------------------------------------------
/doc/source/pysteps_reference/cascade.rst:
--------------------------------------------------------------------------------
1 | ===============
2 | pysteps.cascade
3 | ===============
4 |
5 | Methods for constructing bandpass filters and decomposing 2d precipitation
6 | fields into different spatial scales.
7 |
8 | .. automodule:: pysteps.cascade.interface
9 | .. automodule:: pysteps.cascade.bandpass_filters
10 | .. automodule:: pysteps.cascade.decomposition
11 |
12 |
13 |
--------------------------------------------------------------------------------
/doc/source/pysteps_reference/datasets.rst:
--------------------------------------------------------------------------------
1 | .. automodule:: pysteps.datasets
--------------------------------------------------------------------------------
/doc/source/pysteps_reference/decorators.rst:
--------------------------------------------------------------------------------
1 | .. automodule:: pysteps.decorators
2 |
--------------------------------------------------------------------------------
/doc/source/pysteps_reference/downscaling.rst:
--------------------------------------------------------------------------------
1 | ===================
2 | pysteps.downscaling
3 | ===================
4 |
5 | Implementation of deterministic and ensemble downscaling methods.
6 |
7 |
8 | .. automodule:: pysteps.downscaling.interface
9 | .. automodule:: pysteps.downscaling.rainfarm
10 |
11 |
--------------------------------------------------------------------------------
/doc/source/pysteps_reference/extrapolation.rst:
--------------------------------------------------------------------------------
1 | =====================
2 | pysteps.extrapolation
3 | =====================
4 |
5 | Extrapolation module functions and interfaces.
6 |
7 | .. automodule:: pysteps.extrapolation.interface
8 | .. automodule:: pysteps.extrapolation.semilagrangian
9 |
10 |
--------------------------------------------------------------------------------
/doc/source/pysteps_reference/feature.rst:
--------------------------------------------------------------------------------
1 | ===============
2 | pysteps.feature
3 | ===============
4 |
5 | Implementations of feature detection methods.
6 |
7 |
8 | .. automodule:: pysteps.feature.interface
9 | .. automodule:: pysteps.feature.blob
10 | .. automodule:: pysteps.feature.tstorm
11 | .. automodule:: pysteps.feature.shitomasi
12 |
13 |
--------------------------------------------------------------------------------
/doc/source/pysteps_reference/index.rst:
--------------------------------------------------------------------------------
1 | .. _pysteps-reference:
2 |
3 | API Reference
4 | =============
5 |
6 | :Release: |version|
7 | :Date: |today|
8 |
9 | This page gives an comprehensive description of all the modules and functions
10 | available in pysteps.
11 |
12 | .. toctree::
13 | :maxdepth: 2
14 | :caption: API Reference
15 |
16 | pysteps
17 | blending
18 | cascade
19 | decorators
20 | extrapolation
21 | datasets
22 | downscaling
23 | feature
24 | io
25 | motion
26 | noise
27 | nowcasts
28 | postprocessing
29 | timeseries
30 | tracking
31 | utils
32 | verification
33 | visualization
34 |
35 | .. only:: html
36 |
37 | Indices and tables
38 | ==================
39 |
40 | * :ref:`genindex`
41 | * :ref:`modindex`
42 | * :ref:`search`
43 |
44 | .. only:: html
45 |
46 | Bibliography
47 | ------------
48 |
49 | * :ref:`bibliography`
50 |
--------------------------------------------------------------------------------
/doc/source/pysteps_reference/io.rst:
--------------------------------------------------------------------------------
1 | ==========
2 | pysteps.io
3 | ==========
4 |
5 | Methods for browsing data archives, reading 2d precipitation fields and writing
6 | forecasts into files.
7 |
8 | .. automodule:: pysteps.io.interface
9 | .. automodule:: pysteps.io.archive
10 | .. automodule:: pysteps.io.importers
11 | .. automodule:: pysteps.io.nowcast_importers
12 | .. automodule:: pysteps.io.exporters
13 | .. automodule:: pysteps.io.readers
14 |
--------------------------------------------------------------------------------
/doc/source/pysteps_reference/motion.rst:
--------------------------------------------------------------------------------
1 | ==============
2 | pysteps.motion
3 | ==============
4 |
5 | Implementations of optical flow methods.
6 |
7 |
8 | .. automodule:: pysteps.motion.interface
9 | .. automodule:: pysteps.motion.constant
10 | .. automodule:: pysteps.motion.darts
11 | .. automodule:: pysteps.motion.lucaskanade
12 | .. automodule:: pysteps.motion.proesmans
13 | .. automodule:: pysteps.motion.vet
14 |
--------------------------------------------------------------------------------
/doc/source/pysteps_reference/noise.rst:
--------------------------------------------------------------------------------
1 | =============
2 | pysteps.noise
3 | =============
4 |
5 | Implementation of deterministic and ensemble nowcasting methods.
6 |
7 |
8 | .. automodule:: pysteps.noise.interface
9 | .. automodule:: pysteps.noise.fftgenerators
10 | .. automodule:: pysteps.noise.motion
11 | .. automodule:: pysteps.noise.utils
12 |
--------------------------------------------------------------------------------
/doc/source/pysteps_reference/nowcasts.rst:
--------------------------------------------------------------------------------
1 | ================
2 | pysteps.nowcasts
3 | ================
4 |
5 | Implementation of deterministic and ensemble nowcasting methods.
6 |
7 |
8 | .. automodule:: pysteps.nowcasts.interface
9 | .. automodule:: pysteps.nowcasts.anvil
10 | .. automodule:: pysteps.nowcasts.extrapolation
11 | .. automodule:: pysteps.nowcasts.linda
12 | .. automodule:: pysteps.nowcasts.lagrangian_probability
13 | .. automodule:: pysteps.nowcasts.sprog
14 | .. automodule:: pysteps.nowcasts.sseps
15 | .. automodule:: pysteps.nowcasts.steps
16 | .. automodule:: pysteps.nowcasts.utils
17 |
--------------------------------------------------------------------------------
/doc/source/pysteps_reference/postprocessing.rst:
--------------------------------------------------------------------------------
1 | ======================
2 | pysteps.postprocessing
3 | ======================
4 |
5 | Methods for post-processing of forecasts.
6 |
7 |
8 | .. automodule:: pysteps.postprocessing.ensemblestats
9 | .. automodule:: pysteps.postprocessing.probmatching
10 |
11 |
--------------------------------------------------------------------------------
/doc/source/pysteps_reference/pysteps.rst:
--------------------------------------------------------------------------------
1 | =======
2 | pysteps
3 | =======
4 |
5 | Pystep top module utils
6 |
7 | .. autosummary::
8 | :toctree: ../generated/
9 |
10 | pysteps.load_config_file
11 |
--------------------------------------------------------------------------------
/doc/source/pysteps_reference/timeseries.rst:
--------------------------------------------------------------------------------
1 | ==================
2 | pysteps.timeseries
3 | ==================
4 |
5 | Methods and models for time series analysis.
6 |
7 |
8 | .. automodule:: pysteps.timeseries.autoregression
9 | .. automodule:: pysteps.timeseries.correlation
10 |
11 |
--------------------------------------------------------------------------------
/doc/source/pysteps_reference/tracking.rst:
--------------------------------------------------------------------------------
1 | ================
2 | pysteps.tracking
3 | ================
4 |
5 | Implementations of feature tracking methods.
6 |
7 |
8 | .. automodule:: pysteps.tracking.interface
9 | .. automodule:: pysteps.tracking.lucaskanade
10 | .. automodule:: pysteps.tracking.tdating
11 |
--------------------------------------------------------------------------------
/doc/source/pysteps_reference/utils.rst:
--------------------------------------------------------------------------------
1 | =============
2 | pysteps.utils
3 | =============
4 |
5 | Implementation of miscellaneous utility functions.
6 |
7 |
8 | .. automodule:: pysteps.utils.interface
9 | .. automodule:: pysteps.utils.arrays
10 | .. automodule:: pysteps.utils.cleansing
11 | .. automodule:: pysteps.utils.conversion
12 | .. automodule:: pysteps.utils.dimension
13 | .. automodule:: pysteps.utils.fft
14 | .. automodule:: pysteps.utils.images
15 | .. automodule:: pysteps.utils.interpolate
16 | .. automodule:: pysteps.utils.spectral
17 | .. automodule:: pysteps.utils.tapering
18 | .. automodule:: pysteps.utils.transformation
19 | .. automodule:: pysteps.utils.reprojection
20 |
--------------------------------------------------------------------------------
/doc/source/pysteps_reference/verification.rst:
--------------------------------------------------------------------------------
1 | ====================
2 | pysteps.verification
3 | ====================
4 |
5 | Methods for verification of deterministic, probabilistic and ensemble forecasts.
6 |
7 | .. automodule:: pysteps.verification.interface
8 | .. automodule:: pysteps.verification.detcatscores
9 | .. automodule:: pysteps.verification.detcontscores
10 | .. automodule:: pysteps.verification.ensscores
11 | .. automodule:: pysteps.verification.lifetime
12 | .. automodule:: pysteps.verification.plots
13 | .. automodule:: pysteps.verification.probscores
14 | .. automodule:: pysteps.verification.salscores
15 | .. automodule:: pysteps.verification.spatialscores
16 |
--------------------------------------------------------------------------------
/doc/source/pysteps_reference/visualization.rst:
--------------------------------------------------------------------------------
1 | =====================
2 | pysteps.visualization
3 | =====================
4 |
5 | Methods for plotting precipitation and motion fields.
6 |
7 | .. automodule:: pysteps.visualization.animations
8 | .. automodule:: pysteps.visualization.basemaps
9 | .. automodule:: pysteps.visualization.motionfields
10 | .. automodule:: pysteps.visualization.precipfields
11 | .. automodule:: pysteps.visualization.spectral
12 | .. automodule:: pysteps.visualization.thunderstorms
13 | .. automodule:: pysteps.visualization.utils
14 |
--------------------------------------------------------------------------------
/doc/source/user_guide/example_data.rst:
--------------------------------------------------------------------------------
1 | .. _example_data:
2 |
3 | Installing the example data
4 | ===========================
5 |
6 | The examples scripts in the user guide, as well as the build-in tests,
7 | use the example radar data available in a separate repository:
8 | `pysteps-data `_.
9 |
10 | The easiest way to install the example data is by using the
11 | :func:`~pysteps.datasets.download_pysteps_data` and
12 | :func:`~pysteps.datasets.create_default_pystepsrc` functions from
13 | the :mod:`pysteps.datasets` module.
14 |
15 | Installation using the datasets module
16 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
17 |
18 | Below is a snippet code that can be used to install can configure `pystepsrc` file to
19 | point to that example data.
20 |
21 |
22 | In the example below, the example data is placed in the user's home folder under the
23 | **pysteps_data** directory. It also creates a default configuration file that points to
24 | the downloaded data and places it in the $HOME/.pysteps (Unix and Mac OS X) or
25 | $USERPROFILE/pysteps (Windows). This is one of the default locations where pysteps
26 | looks for the configuration file (see :ref:`pysteps_lookup` for
27 | more information).
28 |
29 | .. code-block:: python
30 |
31 | import os
32 |
33 | # Import the helper functions
34 | from pysteps.datasets import download_pysteps_data, create_default_pystepsrc
35 |
36 | # In this example we will place it in the user's home folder on the
37 | # `pysteps_data` folder.
38 | home_dir = os.path.expanduser("~")
39 | pysteps_data_dir_path = os.path.join(home_dir, "pysteps_data")
40 |
41 | # Download the pysteps data.
42 | download_pysteps_data(pysteps_data_dir_path, force=True)
43 |
44 | # Create a default configuration file that points to the downloaded data.
45 | # By default it will place the configuration file in the
46 | # $HOME/.pysteps (unix and Mac OS X) or $USERPROFILE/pysteps (windows).
47 | config_file_path = create_default_pystepsrc(pysteps_data_dir_path)
48 |
49 | Note that for these changes to take effect you need to restart the python interpreter or
50 | use the :func:`pysteps.load_config_file` function as follows::
51 |
52 | # Load the new configuration file and replace the default configuration
53 | import pysteps
54 | pysteps.load_config_file(config_file_path, verbose=True)
55 |
56 |
57 | To customize the default configuration file see the :ref:`pystepsrc` section.
58 |
59 |
60 | Manual installation
61 | ~~~~~~~~~~~~~~~~~~~
62 |
63 | Another alternative is to download the data manually into your computer and configure the
64 | :ref:`pystepsrc ` file to point to that example data.
65 |
66 | First, download the data from the repository by
67 | `clicking here `_.
68 |
69 | Unzip the data into a folder of your preference. Once the data is unzipped, the
70 | directory structure looks like this::
71 |
72 |
73 | pysteps-data
74 | |
75 | ├── radar
76 | ├── KNMI
77 | ├── OPERA
78 | ├── bom
79 | ├── fmi
80 | ├── mch
81 |
82 | The next step is updating the *pystepsrc* file to point to these directories,
83 | as described in the :ref:`pystepsrc` section.
84 |
85 |
86 |
87 |
88 |
--------------------------------------------------------------------------------
/doc/source/user_guide/pystepsrc_example.rst:
--------------------------------------------------------------------------------
1 | .. _pystepsrc_example:
2 |
3 | Example of pystepsrc file
4 | =========================
5 |
6 | Below you can find the default pystepsrc file.
7 | The lines starting with "//" are comments and they are ignored.
8 |
9 | .. code::
10 |
11 | // pysteps configuration
12 | {
13 | // "silent_import" : whether to suppress the initial pysteps message
14 | "silent_import": false,
15 | "outputs": {
16 | // path_outputs : path where to save results (figures, forecasts, etc)
17 | "path_outputs": "./"
18 | },
19 | "plot": {
20 | // "motion_plot" : "streamplot" or "quiver"
21 | "motion_plot": "quiver",
22 | // "colorscale" : "BOM-RF3", "pysteps" or "STEPS-BE"
23 | "colorscale": "pysteps"
24 | },
25 | "data_sources": {
26 | "bom": {
27 | "root_path": "./radar/bom",
28 | "path_fmt": "prcp-cscn/2/%Y/%m/%d",
29 | "fn_pattern": "2_%Y%m%d_%H%M00.prcp-cscn",
30 | "fn_ext": "nc",
31 | "importer": "bom_rf3",
32 | "timestep": 6,
33 | "importer_kwargs": {
34 | "gzipped": true
35 | }
36 | },
37 | "fmi": {
38 | "root_path": "./radar/fmi",
39 | "path_fmt": "%Y%m%d",
40 | "fn_pattern": "%Y%m%d%H%M_fmi.radar.composite.lowest_FIN_SUOMI1",
41 | "fn_ext": "pgm.gz",
42 | "importer": "fmi_pgm",
43 | "timestep": 5,
44 | "importer_kwargs": {
45 | "gzipped": true
46 | }
47 | },
48 | "mch": {
49 | "root_path": "./radar/mch",
50 | "path_fmt": "%Y%m%d",
51 | "fn_pattern": "AQC%y%j%H%M?_00005.801",
52 | "fn_ext": "gif",
53 | "importer": "mch_gif",
54 | "timestep": 5,
55 | "importer_kwargs": {
56 | "product": "AQC",
57 | "unit": "mm",
58 | "accutime": 5
59 | }
60 | },
61 | "opera": {
62 | "root_path": "./radar/OPERA",
63 | "path_fmt": "%Y%m%d",
64 | "fn_pattern": "T_PAAH21_C_EUOC_%Y%m%d%H%M%S",
65 | "fn_ext": "hdf",
66 | "importer": "opera_hdf5",
67 | "timestep": 15,
68 | "importer_kwargs": {}
69 | },
70 | "knmi": {
71 | "root_path": "./radar/KNMI",
72 | "path_fmt": "%Y/%m",
73 | "fn_pattern": "RAD_NL25_RAP_5min_%Y%m%d%H%M",
74 | "fn_ext": "h5",
75 | "importer": "knmi_hdf5",
76 | "timestep": 5,
77 | "importer_kwargs": {
78 | "accutime": 5,
79 | "qty": "ACRR",
80 | "pixelsize": 1000.0
81 | }
82 | },
83 | "saf": {
84 | "root_path": "./saf",
85 | "path_fmt": "%Y%m%d/CRR",
86 | "fn_pattern": "S_NWC_CRR_MSG4_Europe-VISIR_%Y%m%dT%H%M00Z",
87 | "fn_ext": "nc",
88 | "importer": "saf_crri",
89 | "timestep": 15,
90 | "importer_kwargs": {
91 | "gzipped": true
92 | }
93 | }
94 | }
95 | }
96 |
--------------------------------------------------------------------------------
/doc/source/zz_bibliography.rst:
--------------------------------------------------------------------------------
1 | .. _bibliography:
2 |
3 | ============
4 | Bibliography
5 | ============
6 |
7 |
8 | .. bibliography::
9 | :all:
10 |
--------------------------------------------------------------------------------
/environment.yml:
--------------------------------------------------------------------------------
1 | name: pysteps
2 | channels:
3 | - conda-forge
4 | - defaults
5 | dependencies:
6 | - python>=3.10
7 | - jsmin
8 | - jsonschema
9 | - matplotlib
10 | - netCDF4
11 | - numpy
12 | - opencv
13 | - pillow
14 | - pyproj
15 | - scipy
16 |
--------------------------------------------------------------------------------
/environment_dev.yml:
--------------------------------------------------------------------------------
1 | # pysteps development environment
2 | name: pysteps_dev
3 | channels:
4 | - conda-forge
5 | - defaults
6 | dependencies:
7 | - python>=3.10
8 | - pip
9 | - jsmin
10 | - jsonschema
11 | - matplotlib
12 | - netCDF4
13 | - numpy
14 | - opencv
15 | - pillow
16 | - pyproj
17 | - scipy
18 | - pytest
19 | - pywavelets
20 | - cython
21 | - dask
22 | - pyfftw
23 | - h5py
24 | - PyWavelets
25 | - pygrib
26 | - black
27 | - pytest-cov
28 | - codecov
29 | - pre_commit
30 | - cartopy>=0.18
31 | - scikit-image
32 | - pandas
33 | - rasterio
34 |
--------------------------------------------------------------------------------
/examples/README.txt:
--------------------------------------------------------------------------------
1 | .. _example_gallery:
2 |
3 | Example gallery
4 | ===============
5 |
6 | Below is a collection of example scripts and tutorials to illustrate the usage
7 | of pysteps.
8 |
9 | These scripts require the pysteps example data.
10 | See the installation instructions in the :ref:`example_data` section.
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = [
3 | "wheel",
4 | "setuptools>=40.8.0",
5 | "Cython>=0.29.2",
6 | "numpy>=1.13"
7 | ]
8 | # setuptools 40.8.0 is the first version of setuptools that offers
9 | # a PEP 517 backend that closely mimics directly executing setup.py.
10 | build-backend = "setuptools.build_meta:__legacy__"
11 | #https://pip.pypa.io/en/stable/reference/pip/#pep-517-and-518-support
12 |
13 |
14 | # Define black parameters for the project
15 | # https://black.readthedocs.io/en/stable/pyproject_toml.html#configuration-format
16 | [tool.black]
17 | target-version = ['py36']
18 | line-length = 88
19 | exclude = '''
20 | /(
21 | \.eggs
22 | | \.git
23 | | _build
24 | | build
25 | | dist
26 | )/
27 | '''
28 |
--------------------------------------------------------------------------------
/pysteps/blending/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """Methods for blending NWP model(s) with nowcasts."""
3 |
4 | from pysteps.blending.interface import get_method
5 | from .clim import *
6 | from .skill_scores import *
7 | from .utils import *
8 |
--------------------------------------------------------------------------------
/pysteps/blending/interface.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pysteps.blending.interface
4 | ==========================
5 | Interface for the blending module. It returns a callable function for computing
6 | blended nowcasts with NWP models.
7 |
8 | .. autosummary::
9 | :toctree: ../generated/
10 |
11 | get_method
12 | """
13 |
14 | from functools import partial
15 |
16 | from pysteps.blending import linear_blending
17 | from pysteps.blending import steps
18 |
19 | _blending_methods = dict()
20 | _blending_methods["linear_blending"] = linear_blending.forecast
21 | _blending_methods["salient_blending"] = partial(linear_blending.forecast, saliency=True)
22 | _blending_methods["steps"] = steps.forecast
23 |
24 |
25 | def get_method(name):
26 | """
27 | Return a callable function for computing nowcasts blending into an NWP
28 | forecast.
29 |
30 | Implemented methods:
31 |
32 | +------------------+------------------------------------------------------+
33 | | Name | Description |
34 | +==================+======================================================+
35 | | linear_blending | the linear blending of a nowcast method with other |
36 | | | data (e.g. NWP data). |
37 | +------------------+------------------------------------------------------+
38 | | salient_blending | the salient blending of a nowcast method with other |
39 | | | data (e.g. NWP data) described in :cite:`Hwang2015`. |
40 | | | The blending is based on intensities and forecast |
41 | | | times. The blended product preserves pixel |
42 | | | intensities with time if they are strong enough based|
43 | | | on their ranked salience. |
44 | +------------------+------------------------------------------------------+
45 | | steps | the STEPS stochastic nowcasting blending method |
46 | | | described in :cite:`Seed2003`, :cite:`BPS2006` and |
47 | | | :cite:`SPN2013`. The blending weights approach |
48 | | | currently follows :cite:`BPS2006`. |
49 | +------------------+------------------------------------------------------+
50 | """
51 | if isinstance(name, str):
52 | name = name.lower()
53 | else:
54 | raise TypeError(
55 | "Only strings supported for the method's names.\n"
56 | + "Available names:"
57 | + str(list(_blending_methods.keys()))
58 | ) from None
59 |
60 | try:
61 | return _blending_methods[name]
62 | except KeyError:
63 | raise ValueError(
64 | f"Unknown blending method {name}."
65 | "The available methods are: "
66 | f"{*list(_blending_methods.keys()),}"
67 | ) from None
68 |
--------------------------------------------------------------------------------
/pysteps/cascade/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Methods for constructing bandpass filters and decomposing 2d precipitation
4 | fields into different spatial scales.
5 | """
6 |
7 | from .interface import get_method
8 |
--------------------------------------------------------------------------------
/pysteps/cascade/interface.py:
--------------------------------------------------------------------------------
1 | """
2 | pysteps.cascade.interface
3 | =========================
4 |
5 | Interface for the cascade module.
6 |
7 | .. autosummary::
8 | :toctree: ../generated/
9 |
10 | get_method
11 | """
12 |
13 | from pysteps.cascade import decomposition, bandpass_filters
14 |
15 | _cascade_methods = dict()
16 | _cascade_methods["fft"] = (decomposition.decomposition_fft, decomposition.recompose_fft)
17 | _cascade_methods["gaussian"] = bandpass_filters.filter_gaussian
18 | _cascade_methods["uniform"] = bandpass_filters.filter_uniform
19 |
20 |
21 | def get_method(name):
22 | """
23 | Return a callable function for the bandpass filter or cascade decomposition
24 | method corresponding to the given name. For the latter, two functions are
25 | returned: the first is for the decomposing and the second is for recomposing
26 | the cascade.
27 |
28 | Filter methods:
29 |
30 | +-------------------+------------------------------------------------------+
31 | | Name | Description |
32 | +===================+======================================================+
33 | | gaussian | implementation of bandpass filter using Gaussian |
34 | | | weights |
35 | +-------------------+------------------------------------------------------+
36 | | uniform | implementation of a filter where all weights are set |
37 | | | to one |
38 | +-------------------+------------------------------------------------------+
39 |
40 | Decomposition/recomposition methods:
41 |
42 | +-------------------+------------------------------------------------------+
43 | | Name | Description |
44 | +===================+======================================================+
45 | | fft | decomposition into multiple spatial scales based on |
46 | | | the fast Fourier Transform (FFT) and a set of |
47 | | | bandpass filters |
48 | +-------------------+------------------------------------------------------+
49 |
50 | """
51 |
52 | if isinstance(name, str):
53 | name = name.lower()
54 | else:
55 | raise TypeError(
56 | "Only strings supported for the method's names.\n"
57 | + "Available names:"
58 | + str(list(_cascade_methods.keys()))
59 | ) from None
60 | try:
61 | return _cascade_methods[name]
62 | except KeyError:
63 | raise ValueError(
64 | "Unknown method {}\n".format(name)
65 | + "The available methods are:"
66 | + str(list(_cascade_methods.keys()))
67 | ) from None
68 |
--------------------------------------------------------------------------------
/pysteps/downscaling/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """Implementations of deterministic and ensemble downscaling methods."""
3 |
4 | from pysteps.downscaling.interface import get_method
5 |
--------------------------------------------------------------------------------
/pysteps/downscaling/interface.py:
--------------------------------------------------------------------------------
1 | """
2 | pysteps.downscaling.interface
3 | =============================
4 |
5 | Interface for the downscaling module. It returns a callable function for computing
6 | downscaling.
7 |
8 | .. autosummary::
9 | :toctree: ../generated/
10 |
11 | get_method
12 | """
13 |
14 | from pysteps.downscaling import rainfarm
15 |
16 | _downscale_methods = dict()
17 | _downscale_methods["rainfarm"] = rainfarm.downscale
18 |
19 |
20 | def get_method(name):
21 | """
22 | Return a callable function for computing downscaling.
23 |
24 | Description:
25 | Return a callable function for computing deterministic or ensemble
26 | precipitation downscaling.
27 |
28 | Implemented methods:
29 |
30 | +-----------------+-------------------------------------------------------+
31 | | Name | Description |
32 | +=================+=======================================================+
33 | | rainfarm | the rainfall downscaling by a filtered autoregressive |
34 | | | model (RainFARM) method developed in |
35 | | | :cite:`Rebora2006` |
36 | +-----------------+-------------------------------------------------------+
37 | """
38 | if isinstance(name, str):
39 | name = name.lower()
40 | else:
41 | raise TypeError(
42 | "Only strings supported for the method's names.\n"
43 | + "Available names:"
44 | + str(list(_downscale_methods.keys()))
45 | ) from None
46 |
47 | try:
48 | return _downscale_methods[name]
49 | except KeyError:
50 | raise ValueError(
51 | "Unknown downscaling method {}\n".format(name)
52 | + "The available methods are:"
53 | + str(list(_downscale_methods.keys()))
54 | ) from None
55 |
--------------------------------------------------------------------------------
/pysteps/exceptions.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Custom pySteps exceptions
4 |
5 |
6 | class MissingOptionalDependency(Exception):
7 | """Raised when an optional dependency is needed but not found."""
8 |
9 | pass
10 |
11 |
12 | class DirectoryNotEmpty(Exception):
13 | """Raised when the destination directory in a file copy operation is not empty."""
14 |
15 | pass
16 |
17 |
18 | class DataModelError(Exception):
19 | """Raised when a file is not compilant with the Data Information Model."""
20 |
21 | pass
22 |
--------------------------------------------------------------------------------
/pysteps/extrapolation/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Methods for advection-based extrapolation of precipitation fields.
4 | Currently the module contains an implementation of the
5 | semi-Lagrangian method described in :cite:`GZ2002` and the
6 | eulerian persistence."""
7 |
8 | from pysteps.extrapolation.interface import get_method
9 |
--------------------------------------------------------------------------------
/pysteps/feature/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """Implementations of feature detection methods."""
3 |
4 | from pysteps.feature.interface import get_method
5 |
--------------------------------------------------------------------------------
/pysteps/feature/blob.py:
--------------------------------------------------------------------------------
1 | """
2 | pysteps.feature.blob
3 | ====================
4 |
5 | Blob detection methods.
6 |
7 | .. autosummary::
8 | :toctree: ../generated/
9 |
10 | detection
11 | """
12 |
13 | import numpy as np
14 |
15 | from pysteps.exceptions import MissingOptionalDependency
16 |
17 | from scipy.ndimage import gaussian_laplace
18 |
19 | try:
20 | from skimage import feature
21 |
22 | SKIMAGE_IMPORTED = True
23 | except ImportError:
24 | SKIMAGE_IMPORTED = False
25 |
26 |
27 | def detection(
28 | input_image,
29 | max_num_features=None,
30 | method="log",
31 | threshold=0.5,
32 | min_sigma=3,
33 | max_sigma=20,
34 | overlap=0.5,
35 | return_sigmas=False,
36 | **kwargs,
37 | ):
38 | """
39 | .. _`feature.blob_*`:\
40 | https://scikit-image.org/docs/dev/auto_examples/features_detection/plot_blob.html
41 |
42 | Interface to the `feature.blob_*`_ methods implemented in scikit-image. A
43 | blob is defined as a scale-space maximum of a Gaussian-filtered image.
44 |
45 | .. _ndarray:\
46 | https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
47 |
48 | Parameters
49 | ----------
50 | input_image: array_like
51 | Array of shape (m, n) containing the input image. Nan values are ignored.
52 | max_num_features : int, optional
53 | The maximum number of blobs to detect. Set to None for no restriction.
54 | If specified, the most significant blobs are chosen based on their
55 | intensities in the corresponding Laplacian of Gaussian (LoG)-filtered
56 | images.
57 | method: {'log', 'dog', 'doh'}, optional
58 | The method to use: 'log' = Laplacian of Gaussian, 'dog' = Difference of
59 | Gaussian, 'doh' = Determinant of Hessian.
60 | threshold: float, optional
61 | Detection threshold.
62 | min_sigma: float, optional
63 | The minimum standard deviation for the Gaussian kernel.
64 | max_sigma: float, optional
65 | The maximum standard deviation for the Gaussian kernel.
66 | overlap: float, optional
67 | A value between 0 and 1. If the area of two blobs overlaps by a fraction
68 | greater than the value for overlap, the smaller blob is eliminated.
69 | return_sigmas: bool, optional
70 | If True, return the standard deviations of the Gaussian kernels
71 | corresponding to the detected blobs.
72 |
73 | Returns
74 | -------
75 | points: ndarray_
76 | Array of shape (p, 2) or (p, 3) indicating the pixel coordinates of *p*
77 | detected blobs. If return_sigmas is True, the third column contains
78 | the standard deviations of the Gaussian kernels corresponding to the
79 | blobs.
80 | """
81 | if method not in ["log", "dog", "doh"]:
82 | raise ValueError("unknown method %s, must be 'log', 'dog' or 'doh'" % method)
83 |
84 | if not SKIMAGE_IMPORTED:
85 | raise MissingOptionalDependency(
86 | "skimage is required for the blob_detection routine but it is not installed"
87 | )
88 |
89 | if method == "log":
90 | detector = feature.blob_log
91 | elif method == "dog":
92 | detector = feature.blob_dog
93 | else:
94 | detector = feature.blob_doh
95 |
96 | blobs = detector(
97 | input_image,
98 | min_sigma=min_sigma,
99 | max_sigma=max_sigma,
100 | threshold=threshold,
101 | overlap=overlap,
102 | **kwargs,
103 | )
104 |
105 | if max_num_features is not None and blobs.shape[0] > max_num_features:
106 | blob_intensities = []
107 | for i in range(blobs.shape[0]):
108 | gl_image = -gaussian_laplace(input_image, blobs[i, 2]) * blobs[i, 2] ** 2
109 | blob_intensities.append(gl_image[int(blobs[i, 0]), int(blobs[i, 1])])
110 | idx = np.argsort(blob_intensities)[::-1]
111 | blobs = blobs[idx[:max_num_features], :]
112 |
113 | if not return_sigmas:
114 | return np.column_stack([blobs[:, 1], blobs[:, 0]])
115 | else:
116 | return np.column_stack([blobs[:, 1], blobs[:, 0], blobs[:, 2]])
117 |
--------------------------------------------------------------------------------
/pysteps/feature/interface.py:
--------------------------------------------------------------------------------
1 | """
2 | pysteps.feature.interface
3 | =========================
4 |
5 | Interface for the feature detection module. It returns a callable function for
6 | detecting features from two-dimensional images.
7 |
8 | The feature detectors implement the following interface:
9 |
10 | ``detection(input_image, **keywords)``
11 |
12 | The input is a two-dimensional image. Additional arguments to the specific
13 | method can be given via ``**keywords``. The output is an array of shape (n, m),
14 | where each row corresponds to one of the n features. The first two columns
15 | contain the coordinates (x, y) of the features, and additional information can
16 | be specified in the remaining columns.
17 |
18 | All implemented methods support the following keyword arguments:
19 |
20 | +------------------+-----------------------------------------------------+
21 | | Key | Value |
22 | +==================+=====================================================+
23 | | max_num_features | maximum number of features to detect |
24 | +------------------+-----------------------------------------------------+
25 |
26 | .. autosummary::
27 | :toctree: ../generated/
28 |
29 | get_method
30 | """
31 |
32 | from pysteps.feature import blob
33 | from pysteps.feature import tstorm
34 | from pysteps.feature import shitomasi
35 |
36 | _detection_methods = dict()
37 | _detection_methods["blob"] = blob.detection
38 | _detection_methods["tstorm"] = tstorm.detection
39 | _detection_methods["shitomasi"] = shitomasi.detection
40 |
41 |
42 | def get_method(name):
43 | """
44 | Return a callable function for feature detection.
45 |
46 | Implemented methods:
47 |
48 | +-----------------+-------------------------------------------------------+
49 | | Name | Description |
50 | +=================+=======================================================+
51 | | blob | blob detection in scale space |
52 | +-----------------+-------------------------------------------------------+
53 | | tstorm | Thunderstorm cell detection |
54 | +-----------------+-------------------------------------------------------+
55 | | shitomasi | Shi-Tomasi corner detection |
56 | +-----------------+-------------------------------------------------------+
57 | """
58 | if isinstance(name, str):
59 | name = name.lower()
60 | else:
61 | raise TypeError(
62 | "Only strings supported for the method's names.\n"
63 | + "Available names:"
64 | + str(list(_detection_methods.keys()))
65 | ) from None
66 |
67 | try:
68 | return _detection_methods[name]
69 | except KeyError:
70 | raise ValueError(
71 | "Unknown detection method {}\n".format(name)
72 | + "The available methods are:"
73 | + str(list(_detection_methods.keys()))
74 | ) from None
75 |
--------------------------------------------------------------------------------
/pysteps/io/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Methods for browsing data archives, reading 2d precipitation fields and writing
4 | forecasts into files.
5 | """
6 |
7 | from .interface import get_method, discover_importers, importers_info
8 | from .archive import *
9 | from .exporters import *
10 | from .importers import *
11 | from .nowcast_importers import *
12 | from .readers import *
13 |
--------------------------------------------------------------------------------
/pysteps/io/readers.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pysteps.io.readers
4 | ==================
5 |
6 | Module with the reader functions.
7 |
8 | .. autosummary::
9 | :toctree: ../generated/
10 |
11 | read_timeseries
12 | """
13 |
14 | import numpy as np
15 |
16 |
17 | def read_timeseries(inputfns, importer, **kwargs):
18 | """
19 | Read a time series of input files using the methods implemented in the
20 | :py:mod:`pysteps.io.importers` module and stack them into a 3d array of
21 | shape (num_timesteps, height, width).
22 |
23 | Parameters
24 | ----------
25 | inputfns: tuple
26 | Input files returned by a function implemented in the
27 | :py:mod:`pysteps.io.archive` module.
28 | importer: function
29 | A function implemented in the :py:mod:`pysteps.io.importers` module.
30 | kwargs: dict
31 | Optional keyword arguments for the importer.
32 |
33 | Returns
34 | -------
35 | out: tuple
36 | A three-element tuple containing the read data and quality rasters and
37 | associated metadata. If an input file name is None, the corresponding
38 | precipitation and quality fields are filled with nan values. If all
39 | input file names are None or if the length of the file name list is
40 | zero, a three-element tuple containing None values is returned.
41 |
42 | """
43 |
44 | # check for missing data
45 | precip_ref = None
46 | if all(ifn is None for ifn in inputfns):
47 | return None, None, None
48 | else:
49 | if len(inputfns[0]) == 0:
50 | return None, None, None
51 | for ifn in inputfns[0]:
52 | if ifn is not None:
53 | precip_ref, quality_ref, metadata = importer(ifn, **kwargs)
54 | break
55 |
56 | if precip_ref is None:
57 | return None, None, None
58 |
59 | precip = []
60 | quality = []
61 | timestamps = []
62 | for i, ifn in enumerate(inputfns[0]):
63 | if ifn is not None:
64 | precip_, quality_, _ = importer(ifn, **kwargs)
65 | precip.append(precip_)
66 | quality.append(quality_)
67 | timestamps.append(inputfns[1][i])
68 | else:
69 | precip.append(precip_ref * np.nan)
70 | if quality_ref is not None:
71 | quality.append(quality_ref * np.nan)
72 | else:
73 | quality.append(None)
74 | timestamps.append(inputfns[1][i])
75 |
76 | # Replace this with stack?
77 | precip = np.concatenate([precip_[None, :, :] for precip_ in precip])
78 | # TODO: Q should be organized as R, but this is not trivial as Q_ can be also None or a scalar
79 | metadata["timestamps"] = np.array(timestamps)
80 |
81 | return precip, quality, metadata
82 |
--------------------------------------------------------------------------------
/pysteps/motion/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Implementations of optical flow methods."""
4 |
5 | from .interface import get_method
6 |
--------------------------------------------------------------------------------
/pysteps/motion/constant.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pysteps.motion.constant
4 | =======================
5 |
6 | Implementation of a constant advection field estimation by maximizing the
7 | correlation between two images.
8 |
9 | .. autosummary::
10 | :toctree: ../generated/
11 |
12 | constant
13 | """
14 |
15 | import numpy as np
16 | import scipy.optimize as op
17 | from scipy.ndimage import map_coordinates
18 |
19 |
20 | def constant(R, **kwargs):
21 | """
22 | Compute a constant advection field by finding a translation vector that
23 | maximizes the correlation between two successive images.
24 |
25 | Parameters
26 | ----------
27 | R: array_like
28 | Array of shape (T,m,n) containing a sequence of T two-dimensional input
29 | images of shape (m,n). If T > 2, two last elements along axis 0 are used.
30 |
31 | Returns
32 | -------
33 | out: array_like
34 | The constant advection field having shape (2, m, n), where out[0, :, :]
35 | contains the x-components of the motion vectors and out[1, :, :]
36 | contains the y-components.
37 | """
38 | m, n = R.shape[1:]
39 | X, Y = np.meshgrid(np.arange(n), np.arange(m))
40 |
41 | def f(v):
42 | XYW = [Y + v[1], X + v[0]]
43 | R_w = map_coordinates(
44 | R[-2, :, :], XYW, mode="constant", cval=np.nan, order=0, prefilter=False
45 | )
46 |
47 | mask = np.logical_and(np.isfinite(R[-1, :, :]), np.isfinite(R_w))
48 |
49 | return -np.corrcoef(R[-1, :, :][mask], R_w[mask])[0, 1]
50 |
51 | options = {"initial_simplex": (np.array([(0, 1), (1, 0), (1, 1)]))}
52 | result = op.minimize(f, (1, 1), method="Nelder-Mead", options=options)
53 |
54 | return np.stack([-result.x[0] * np.ones((m, n)), -result.x[1] * np.ones((m, n))])
55 |
--------------------------------------------------------------------------------
/pysteps/motion/proesmans.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pysteps.motion.proesmans
4 | ========================
5 |
6 | Implementation of the anisotropic diffusion method of Proesmans et al. (1994).
7 |
8 | .. autosummary::
9 | :toctree: ../generated/
10 |
11 | proesmans
12 | """
13 |
14 | import numpy as np
15 | from scipy.ndimage import gaussian_filter
16 |
17 | from pysteps.decorators import check_input_frames
18 | from pysteps.motion._proesmans import _compute_advection_field
19 |
20 |
21 | @check_input_frames(2, 2)
22 | def proesmans(
23 | input_images,
24 | lam=50.0,
25 | num_iter=100,
26 | num_levels=6,
27 | filter_std=0.0,
28 | verbose=True,
29 | full_output=False,
30 | ):
31 | """
32 | Implementation of the anisotropic diffusion method of Proesmans et al.
33 | (1994).
34 |
35 | Parameters
36 | ----------
37 | input_images: array_like
38 | Array of shape (2, m, n) containing the first and second input image.
39 | lam: float
40 | Multiplier of the smoothness term. Smaller values give a smoother motion
41 | field.
42 | num_iter: float
43 | The number of iterations to use.
44 | num_levels: int
45 | The number of image pyramid levels to use.
46 | filter_std: float
47 | Standard deviation of an optional Gaussian filter that is applied before
48 | computing the optical flow.
49 | verbose: bool, optional
50 | Verbosity enabled if True (default).
51 | full_output: bool, optional
52 | If True, the output is a two-element tuple containing the
53 | forward-backward advection and consistency fields. The first element
54 | is shape (2, 2, m, n), where the index along the first dimension refers
55 | to the forward and backward advection fields. The second element is an
56 | array of shape (2, m, n), where the index along the first dimension
57 | refers to the forward and backward consistency fields.
58 | Default: False.
59 |
60 | Returns
61 | -------
62 | out: ndarray
63 | If full_output=False, the advection field having shape (2, m, n), where
64 | out[0, :, :] contains the x-components of the motion vectors and
65 | out[1, :, :] contains the y-components. The velocities are in units of
66 | pixels / timestep, where timestep is the time difference between the
67 | two input images.
68 |
69 | References
70 | ----------
71 | :cite:`PGPO1994`
72 |
73 | """
74 | del verbose # Not used
75 |
76 | im1 = input_images[-2, :, :].copy()
77 | im2 = input_images[-1, :, :].copy()
78 |
79 | im = np.stack([im1, im2])
80 | im_min = np.min(im)
81 | im_max = np.max(im)
82 | if im_max - im_min > 1e-8:
83 | im = (im - im_min) / (im_max - im_min) * 255.0
84 |
85 | if filter_std > 0.0:
86 | im[0, :, :] = gaussian_filter(im[0, :, :], filter_std)
87 | im[1, :, :] = gaussian_filter(im[1, :, :], filter_std)
88 |
89 | advfield, quality = _compute_advection_field(im, lam, num_iter, num_levels)
90 |
91 | if not full_output:
92 | return advfield[0]
93 | else:
94 | return advfield, quality
95 |
--------------------------------------------------------------------------------
/pysteps/noise/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Methods for generating stochastic perturbations of 2d precipitation and
4 | velocity fields.
5 | """
6 |
7 | from .interface import get_method
8 | from . import utils, motion, fftgenerators
9 |
--------------------------------------------------------------------------------
/pysteps/noise/interface.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pysteps.noise.interface
4 | =======================
5 |
6 | Interface for the noise module.
7 |
8 | .. autosummary::
9 | :toctree: ../generated/
10 |
11 | get_method
12 | """
13 |
14 | from pysteps.noise.fftgenerators import (
15 | initialize_param_2d_fft_filter,
16 | generate_noise_2d_fft_filter,
17 | initialize_nonparam_2d_fft_filter,
18 | initialize_nonparam_2d_ssft_filter,
19 | generate_noise_2d_ssft_filter,
20 | initialize_nonparam_2d_nested_filter,
21 | )
22 | from pysteps.noise.motion import initialize_bps, generate_bps
23 |
24 | _noise_methods = dict()
25 |
26 | _noise_methods["parametric"] = (
27 | initialize_param_2d_fft_filter,
28 | generate_noise_2d_fft_filter,
29 | )
30 |
31 | _noise_methods["nonparametric"] = (
32 | initialize_nonparam_2d_fft_filter,
33 | generate_noise_2d_fft_filter,
34 | )
35 | _noise_methods["ssft"] = (
36 | initialize_nonparam_2d_ssft_filter,
37 | generate_noise_2d_ssft_filter,
38 | )
39 |
40 | _noise_methods["nested"] = (
41 | initialize_nonparam_2d_nested_filter,
42 | generate_noise_2d_ssft_filter,
43 | )
44 |
45 | _noise_methods["bps"] = (initialize_bps, generate_bps)
46 |
47 |
48 | def get_method(name):
49 | """
50 | Return two callable functions to initialize and generate 2d perturbations
51 | of precipitation or velocity fields.\n
52 |
53 | Methods for precipitation fields:
54 |
55 | +-------------------+------------------------------------------------------+
56 | | Name | Description |
57 | +===================+======================================================+
58 | | parametric | this global generator uses parametric Fourier |
59 | | | filtering (power-law model) |
60 | +-------------------+------------------------------------------------------+
61 | | nonparametric | this global generator uses nonparametric Fourier |
62 | | | filtering |
63 | +-------------------+------------------------------------------------------+
64 | | ssft | this local generator uses the short-space Fourier |
65 | | | filtering |
66 | +-------------------+------------------------------------------------------+
67 | | nested | this local generator uses a nested Fourier filtering |
68 | +-------------------+------------------------------------------------------+
69 |
70 | Methods for velocity fields:
71 |
72 | +-------------------+------------------------------------------------------+
73 | | Name | Description |
74 | +===================+======================================================+
75 | | bps | The method described in :cite:`BPS2006`, where |
76 | | | time-dependent velocity perturbations are sampled |
77 | | | from the exponential distribution |
78 | +-------------------+------------------------------------------------------+
79 |
80 | """
81 | if isinstance(name, str):
82 | name = name.lower()
83 | else:
84 | raise TypeError(
85 | "Only strings supported for the method's names.\n"
86 | + "Available names:"
87 | + str(list(_noise_methods.keys()))
88 | ) from None
89 |
90 | try:
91 | return _noise_methods[name]
92 | except KeyError:
93 | raise ValueError(
94 | "Unknown method {}\n".format(name)
95 | + "The available methods are:"
96 | + str(list(_noise_methods.keys()))
97 | ) from None
98 |
--------------------------------------------------------------------------------
/pysteps/nowcasts/__init__.py:
--------------------------------------------------------------------------------
1 | """Implementations of deterministic and ensemble nowcasting methods."""
2 |
3 | from pysteps.nowcasts.interface import get_method
4 |
--------------------------------------------------------------------------------
/pysteps/nowcasts/extrapolation.py:
--------------------------------------------------------------------------------
1 | """
2 | pysteps.nowcasts.extrapolation
3 | ==============================
4 |
5 | Implementation of extrapolation-based nowcasting methods.
6 |
7 | .. autosummary::
8 | :toctree: ../generated/
9 |
10 | forecast
11 | """
12 |
13 | import time
14 | import numpy as np
15 |
16 | from pysteps import extrapolation
17 |
18 |
19 | def forecast(
20 | precip,
21 | velocity,
22 | timesteps,
23 | extrap_method="semilagrangian",
24 | extrap_kwargs=None,
25 | measure_time=False,
26 | ):
27 | """
28 | Generate a nowcast by applying a simple advection-based extrapolation to
29 | the given precipitation field.
30 |
31 | .. _ndarray: http://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
32 |
33 | Parameters
34 | ----------
35 | precip: array-like
36 | Two-dimensional array of shape (m,n) containing the input precipitation
37 | field.
38 | velocity: array-like
39 | Array of shape (2,m,n) containing the x- and y-components of the
40 | advection field. The velocities are assumed to represent one time step
41 | between the inputs.
42 | timesteps: int or list of floats
43 | Number of time steps to forecast or a list of time steps for which the
44 | forecasts are computed (relative to the input time step). The elements
45 | of the list are required to be in ascending order.
46 | extrap_method: str, optional
47 | Name of the extrapolation method to use. See the documentation of
48 | pysteps.extrapolation.interface.
49 | extrap_kwargs: dict, optional
50 | Optional dictionary that is expanded into keyword arguments for the
51 | extrapolation method.
52 | measure_time: bool, optional
53 | If True, measure, print, and return the computation time.
54 |
55 | Returns
56 | -------
57 | out: ndarray_
58 | Three-dimensional array of shape (num_timesteps, m, n) containing a time
59 | series of nowcast precipitation fields. The time series starts from
60 | t0 + timestep, where timestep is taken from the advection field velocity.
61 | If *measure_time* is True, the return value is a two-element tuple
62 | containing this array and the computation time (seconds).
63 |
64 | See also
65 | --------
66 | pysteps.extrapolation.interface
67 | """
68 |
69 | _check_inputs(precip, velocity, timesteps)
70 |
71 | if extrap_kwargs is None:
72 | extrap_kwargs = dict()
73 | else:
74 | extrap_kwargs = extrap_kwargs.copy()
75 |
76 | extrap_kwargs["allow_nonfinite_values"] = (
77 | True if np.any(~np.isfinite(precip)) else False
78 | )
79 |
80 | if measure_time:
81 | print(
82 | "Computing extrapolation nowcast from a "
83 | f"{precip.shape[0]:d}x{precip.shape[1]:d} input grid... ",
84 | end="",
85 | )
86 |
87 | if measure_time:
88 | start_time = time.time()
89 |
90 | extrapolation_method = extrapolation.get_method(extrap_method)
91 |
92 | precip_forecast = extrapolation_method(precip, velocity, timesteps, **extrap_kwargs)
93 |
94 | if measure_time:
95 | computation_time = time.time() - start_time
96 | print(f"{computation_time:.2f} seconds.")
97 |
98 | if measure_time:
99 | return precip_forecast, computation_time
100 | else:
101 | return precip_forecast
102 |
103 |
104 | def _check_inputs(precip, velocity, timesteps):
105 | if precip.ndim != 2:
106 | raise ValueError("The input precipitation must be a " "two-dimensional array")
107 | if velocity.ndim != 3:
108 | raise ValueError("Input velocity must be a three-dimensional array")
109 | if precip.shape != velocity.shape[1:3]:
110 | raise ValueError(
111 | "Dimension mismatch between "
112 | "input precipitation and velocity: "
113 | + "shape(precip)=%s, shape(velocity)=%s"
114 | % (str(precip.shape), str(velocity.shape))
115 | )
116 | if isinstance(timesteps, list) and not sorted(timesteps) == timesteps:
117 | raise ValueError("timesteps is not in ascending order")
118 |
--------------------------------------------------------------------------------
/pysteps/postprocessing/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """Methods for post-processing of forecasts."""
3 |
4 | from . import ensemblestats
5 | from .diagnostics import *
6 | from .interface import *
7 | from .ensemblestats import *
8 |
--------------------------------------------------------------------------------
/pysteps/postprocessing/diagnostics.py:
--------------------------------------------------------------------------------
1 | """
2 | pysteps.postprocessing.diagnostics
3 | ====================
4 |
5 | Methods for applying diagnostics postprocessing.
6 |
7 | The methods in this module implement the following interface::
8 |
9 | diagnostic_xxx(optional arguments)
10 |
11 | where **xxx** is the name of the diagnostic to be applied.
12 |
13 | Available Diagnostics Postprocessors
14 | ------------------------
15 |
16 | .. autosummary::
17 | :toctree: ../generated/
18 |
19 | """
20 |
21 | # Add your diagnostic_ function here AND add this method to the _diagnostics_methods
22 | # dictionary in postprocessing.interface.py
23 |
--------------------------------------------------------------------------------
/pysteps/pystepsrc_schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "title": "pystepsrc params",
3 | "description": "Pysteps default parameters",
4 | "required": [
5 | "outputs",
6 | "plot",
7 | "data_sources"
8 | ],
9 | "type": "object",
10 | "properties": {
11 | "outputs": {
12 | "type": "object",
13 | "required": [
14 | "path_outputs"
15 | ],
16 | "properties": {
17 | "path_outputs": {
18 | "type": "string"
19 | }
20 | }
21 | },
22 | "plot": {
23 | "type": "object",
24 | "required": [
25 | "motion_plot",
26 | "colorscale"
27 | ],
28 | "properties": {
29 | "motion_plot": {
30 | "type": "string"
31 | },
32 | "colorscale": {
33 | "type": "string"
34 | }
35 | }
36 | },
37 | "data_sources": {
38 | "type": "object",
39 | "patternProperties": {
40 | "": {
41 | "type": "object",
42 | "required": [
43 | "root_path",
44 | "path_fmt",
45 | "fn_pattern",
46 | "fn_ext",
47 | "importer",
48 | "timestep",
49 | "importer_kwargs"
50 | ],
51 | "properties": {
52 | "root_path": {
53 | "type": "string"
54 | },
55 | "path_fmt": {
56 | "type": "string"
57 | },
58 | "fn_pattern": {
59 | "type": "string"
60 | },
61 | "fn_ext": {
62 | "type": "string"
63 | },
64 | "importer": {
65 | "type": "string"
66 | },
67 | "timestep": {
68 | "type": "number"
69 | },
70 | "importer_kwargs": {
71 | "type": "object"
72 | }
73 | }
74 | }
75 | }
76 | }
77 | }
78 | }
79 |
--------------------------------------------------------------------------------
/pysteps/scripts/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Standalone utility scripts for pysteps (e.g. parameter estimation from the
4 | given data).
5 | """
6 |
--------------------------------------------------------------------------------
/pysteps/scripts/fit_vel_pert_params.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """Fit STEPS motion perturbation parameters to the output of run_vel_pert_analysis.py
3 | and optionally plots the results. For a description of the method, see
4 | :cite:`BPS2006`."""
5 |
6 | import argparse
7 | import pickle
8 | from matplotlib import pyplot
9 | import numpy as np
10 | from scipy.optimize import curve_fit
11 |
12 | description = (
13 | "Fit STEPS motion perturbation parameters to the results produced"
14 | " by run_vel_pert_analysis.py and optionally plot the results."
15 | )
16 |
17 | argparser = argparse.ArgumentParser(description=description)
18 | argparser.add_argument("inputfile", type=str, help="name of the input file")
19 | argparser.add_argument(
20 | "--plot",
21 | nargs="?",
22 | type=str,
23 | metavar="filename",
24 | help="plot the results and save the figure to ",
25 | )
26 | args = argparser.parse_args()
27 |
28 | with open(args.inputfile, "rb") as f:
29 | results = pickle.load(f)
30 |
31 | f = lambda t, a, b, c: a * pow(t, b) + c
32 |
33 | leadtimes = sorted(results.keys())
34 |
35 | std_par = []
36 | std_perp = []
37 |
38 | for lt in leadtimes:
39 | dp_par_sum = results[lt]["dp_par_sum"]
40 | dp_par_sq_sum = results[lt]["dp_par_sq_sum"]
41 | dp_par_n = results[lt]["n_samples"]
42 | mu = dp_par_sum / dp_par_n
43 |
44 | std_par.append(
45 | np.sqrt((dp_par_sq_sum - 2 * mu * dp_par_sum + dp_par_n * mu**2) / dp_par_n)
46 | )
47 |
48 | dp_perp_sum = results[lt]["dp_perp_sum"]
49 | dp_perp_sq_sum = results[lt]["dp_perp_sq_sum"]
50 | dp_perp_n = results[lt]["n_samples"]
51 | mu = dp_perp_sum / dp_perp_n
52 |
53 | std_perp.append(
54 | np.sqrt((dp_perp_sq_sum - 2 * mu * dp_perp_sum + dp_perp_n * mu**2) / dp_perp_n)
55 | )
56 |
57 | try:
58 | p_par = curve_fit(f, leadtimes, std_par)[0]
59 | p_perp = curve_fit(f, leadtimes, std_perp)[0]
60 | fit_succeeded = True
61 | print("p_par = %s" % str(p_par))
62 | print("p_perp = %s" % str(p_perp))
63 | except RuntimeError:
64 | fit_succeeded = False
65 | print("Parameter fitting failed.")
66 |
67 | if args.plot is not None:
68 | pyplot.figure()
69 |
70 | pyplot.scatter(leadtimes, std_par, c="r")
71 | t = np.linspace(0.5 * leadtimes[0], 1.025 * leadtimes[-1], 200)
72 | pyplot.scatter(leadtimes, std_perp, c="g")
73 | if fit_succeeded:
74 | (l1,) = pyplot.plot(t, f(t, *p_par), "r-")
75 | (l2,) = pyplot.plot(t, f(t, *p_perp), "g-")
76 |
77 | p_str_1 = lambda p: "%.2f\cdot t^{%.2f}+%.2f" % (p[0], p[1], p[2])
78 | p_str_2 = lambda p: "%.2f\cdot t^{%.2f}%.2f" % (p[0], p[1], p[2])
79 | if fit_succeeded:
80 | lbl = lambda p: p_str_1(p) if p[2] > 0.0 else p_str_2(p)
81 | pyplot.legend(
82 | [l1, l2],
83 | [
84 | "Parallel: $f(t)=%s$" % lbl(p_par),
85 | "Perpendicular: $f(t)=%s$" % lbl(p_perp),
86 | ],
87 | fontsize=12,
88 | )
89 | pyplot.xlim(0.5 * leadtimes[0], 1.025 * leadtimes[-1])
90 | pyplot.xlabel("Lead time (minutes)", fontsize=12)
91 | pyplot.ylabel("Standard deviation of differences (km/h)", fontsize=12)
92 | pyplot.grid(True)
93 |
94 | pyplot.savefig(args.plot, bbox_inches="tight")
95 |
--------------------------------------------------------------------------------
/pysteps/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pySTEPS/pysteps/abd1f52ce9f042a3e508fbf9c74eb5b172f79552/pysteps/tests/__init__.py
--------------------------------------------------------------------------------
/pysteps/tests/test_archive.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import pytest
4 | from datetime import datetime
5 |
6 | from pysteps.io.archive import _generate_path
7 |
8 | test_argvalues = [
9 | ("20190130_1200", "%Y/foo/%m", "./2019/foo/01"),
10 | ("20190225_1200", "%Y/foo/%m", "./2019/foo/02"),
11 | ("20190122_2222", "%Y/foo/%m", "./2019/foo/01"),
12 | ("20190130_1200", "%Y/foo/%m", "./2019/foo/01"),
13 | ("20190130_1205", "%Y%m%d/foo/bar/%H%M", "./20190130/foo/bar/1205"),
14 | ("20190130_1205", "foo/bar/%H%M", "./foo/bar/1205"),
15 | ]
16 |
17 |
18 | @pytest.mark.parametrize("timestamp, path_fmt, expected_path", test_argvalues)
19 | def test_generate_path(timestamp, path_fmt, expected_path):
20 | date = datetime.strptime(timestamp, "%Y%m%d_%H%M")
21 | assert _generate_path(date, "./", path_fmt) == expected_path
22 |
--------------------------------------------------------------------------------
/pysteps/tests/test_cascade.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import os
4 |
5 | import numpy as np
6 | import pytest
7 | from numpy.testing import assert_array_almost_equal
8 |
9 | import pysteps
10 | from pysteps import nowcasts
11 | from pysteps.cascade.bandpass_filters import filter_gaussian
12 | from pysteps.cascade.bandpass_filters import filter_uniform
13 | from pysteps.cascade.decomposition import decomposition_fft, recompose_fft
14 | from pysteps.tests.helpers import smart_assert
15 |
16 |
17 | def test_decompose_recompose():
18 | """Tests cascade decomposition."""
19 |
20 | pytest.importorskip("netCDF4")
21 |
22 | root_path = pysteps.rcparams.data_sources["bom"]["root_path"]
23 | rel_path = os.path.join("prcp-cscn", "2", "2018", "06", "16")
24 | filename = os.path.join(root_path, rel_path, "2_20180616_120000.prcp-cscn.nc")
25 | precip, _, metadata = pysteps.io.import_bom_rf3(filename)
26 |
27 | # Convert to rain rate from mm
28 | precip, metadata = pysteps.utils.to_rainrate(precip, metadata)
29 |
30 | # Log-transform the data
31 | precip, metadata = pysteps.utils.dB_transform(
32 | precip, metadata, threshold=0.1, zerovalue=-15.0
33 | )
34 |
35 | # Set Nans as the fill value
36 | precip[~np.isfinite(precip)] = metadata["zerovalue"]
37 |
38 | # Set number of cascade levels
39 | num_cascade_levels = 9
40 |
41 | # Construct the Gaussian bandpass filters
42 | _filter = filter_gaussian(precip.shape, num_cascade_levels)
43 |
44 | # Decompose precip
45 | decomp = decomposition_fft(precip, _filter)
46 |
47 | # Recomposed precip from decomp
48 | recomposed = recompose_fft(decomp)
49 | # Assert
50 | assert_array_almost_equal(recomposed.squeeze(), precip)
51 |
52 |
53 | test_metadata_filter = [
54 | ("central_freqs", None, None),
55 | ("central_wavenumbers", None, None),
56 | ]
57 |
58 |
59 | @pytest.mark.parametrize("variable, expected, tolerance", test_metadata_filter)
60 | def test_filter_uniform(variable, expected, tolerance):
61 | _filter = filter_uniform((8, 8), 1)
62 | smart_assert(_filter[variable], expected, tolerance)
63 |
64 |
65 | def test_filter_uniform_weights_1d():
66 | _filter = filter_uniform((8, 8), 1)
67 | assert_array_almost_equal(_filter["weights_1d"], np.ones((1, 5)))
68 |
69 |
70 | def test_filter_uniform_weights_2d():
71 | _filter = filter_uniform((8, 8), 1)
72 | assert_array_almost_equal(_filter["weights_2d"], np.ones((1, 8, 5)))
73 |
--------------------------------------------------------------------------------
/pysteps/tests/test_datasets.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import os
3 | from tempfile import TemporaryDirectory
4 |
5 | import pytest
6 | from _pytest.outcomes import Skipped
7 |
8 | import pysteps
9 | from pysteps.datasets import (
10 | download_pysteps_data,
11 | create_default_pystepsrc,
12 | load_dataset,
13 | )
14 | from pysteps.exceptions import DirectoryNotEmpty
15 |
16 |
17 | _datasets_opt_deps = dict(
18 | fmi=["pyproj"],
19 | mch=["PIL"],
20 | bom=["netCDF4"],
21 | knmi=["h5py"],
22 | opera=["h5py"],
23 | mrms=["pygrib"],
24 | )
25 |
26 |
27 | @pytest.mark.parametrize("case_name", _datasets_opt_deps.keys())
28 | def test_load_dataset(case_name):
29 | """Test the load dataset function."""
30 |
31 | with pytest.raises(ValueError):
32 | load_dataset(frames=100)
33 |
34 | for mod_name in _datasets_opt_deps[case_name]:
35 | pytest.importorskip(mod_name)
36 |
37 | try:
38 | load_dataset(case=case_name, frames=1)
39 | except Skipped:
40 | pass
41 |
42 |
43 | def _test_download_data():
44 | """Test the example data installers."""
45 | temp_dir = TemporaryDirectory()
46 |
47 | try:
48 | download_pysteps_data(temp_dir.name, force=True)
49 | with pytest.raises(DirectoryNotEmpty):
50 | download_pysteps_data(temp_dir.name, force=False)
51 |
52 | params_file = create_default_pystepsrc(temp_dir.name, config_dir=temp_dir.name)
53 |
54 | pysteps.load_config_file(params_file)
55 |
56 | finally:
57 | temp_dir.cleanup()
58 | pysteps.load_config_file()
59 |
60 |
61 | def _default_path():
62 | """
63 | Default pystepsrc path."""
64 | home_dir = os.path.expanduser("~")
65 | if os.name == "nt":
66 | subdir = "pysteps"
67 | else:
68 | subdir = ".pysteps"
69 | return os.path.join(home_dir, subdir, "pystepsrc")
70 |
71 |
72 | test_params_paths = [
73 | (None, "pystepsrc", _default_path()),
74 | ("/root/path", "pystepsrc", "/root/path/pystepsrc"),
75 | ("/root/path", "pystepsrc2", "/root/path/pystepsrc2"),
76 | ("relative/path", "pystepsrc2", "relative/path/pystepsrc2"),
77 | ("relative/path", "pystepsrc", "relative/path/pystepsrc"),
78 | ]
79 |
80 |
81 | @pytest.mark.parametrize("config_dir, file_name, expected_path", test_params_paths)
82 | def test_params_file_creation_path(config_dir, file_name, expected_path):
83 | """Test that the default pysteps parameters file is created in the right place."""
84 |
85 | # For windows compatibility
86 | if config_dir is not None:
87 | config_dir = os.path.normpath(config_dir)
88 | expected_path = os.path.normpath(expected_path)
89 |
90 | pysteps_data_dir = "dummy/path/to/data"
91 | params_file_path = create_default_pystepsrc(
92 | pysteps_data_dir, config_dir=config_dir, file_name=file_name, dryrun=True
93 | )
94 |
95 | assert expected_path == params_file_path
96 |
--------------------------------------------------------------------------------
/pysteps/tests/test_decorators.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import time
3 |
4 | from pysteps.decorators import memoize
5 |
6 |
7 | def test_memoize():
8 | @memoize(maxsize=1)
9 | def _slow_function(x, **kwargs):
10 | time.sleep(1)
11 | return x
12 |
13 | for i in range(2):
14 | out = _slow_function(i, hkey=i)
15 | assert out == i
16 |
17 | # cached result
18 | t0 = time.monotonic()
19 | out = _slow_function(1, hkey=1)
20 | assert time.monotonic() - t0 < 1
21 | assert out == 1
22 |
23 | # maxsize exceeded
24 | t0 = time.monotonic()
25 | out = _slow_function(0, hkey=0)
26 | assert time.monotonic() - t0 >= 1
27 | assert out == 0
28 |
29 | # no hash
30 | t0 = time.monotonic()
31 | out = _slow_function(1)
32 | assert time.monotonic() - t0 >= 1
33 | assert out == 1
34 |
--------------------------------------------------------------------------------
/pysteps/tests/test_ensscores.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import numpy as np
4 | import pytest
5 | from numpy.testing import assert_array_almost_equal
6 |
7 | from pysteps.tests.helpers import get_precipitation_fields
8 | from pysteps.verification import ensscores
9 |
10 | precip = get_precipitation_fields(num_next_files=10, return_raw=True)
11 | np.random.seed(42)
12 |
13 | # rankhist
14 | test_data = [
15 | (precip[:10], precip[-1], None, True, 11),
16 | (precip[:10], precip[-1], None, False, 11),
17 | ]
18 |
19 |
20 | @pytest.mark.parametrize("X_f, X_o, X_min, normalize, expected", test_data)
21 | def test_rankhist_size(X_f, X_o, X_min, normalize, expected):
22 | """Test the rankhist."""
23 | assert_array_almost_equal(
24 | ensscores.rankhist(X_f, X_o, X_min, normalize).size, expected
25 | )
26 |
27 |
28 | # ensemble_skill
29 | test_data = [
30 | (
31 | precip[:10],
32 | precip[-1],
33 | "RMSE",
34 | {"axis": None, "conditioning": "single"},
35 | 0.26054151,
36 | ),
37 | (precip[:10], precip[-1], "CSI", {"thr": 1.0, "axis": None}, 0.22017924),
38 | (precip[:10], precip[-1], "FSS", {"thr": 1.0, "scale": 10}, 0.63239752),
39 | ]
40 |
41 |
42 | @pytest.mark.parametrize("X_f, X_o, metric, kwargs, expected", test_data)
43 | def test_ensemble_skill(X_f, X_o, metric, kwargs, expected):
44 | """Test the ensemble_skill."""
45 | assert_array_almost_equal(
46 | ensscores.ensemble_skill(X_f, X_o, metric, **kwargs), expected
47 | )
48 |
49 |
50 | # ensemble_spread
51 | test_data = [
52 | (precip, "RMSE", {"axis": None, "conditioning": "single"}, 0.22635757),
53 | (precip, "CSI", {"thr": 1.0, "axis": None}, 0.25218158),
54 | (precip, "FSS", {"thr": 1.0, "scale": 10}, 0.70235667),
55 | ]
56 |
57 |
58 | @pytest.mark.parametrize("X_f, metric, kwargs, expected", test_data)
59 | def test_ensemble_spread(X_f, metric, kwargs, expected):
60 | """Test the ensemble_spread."""
61 | assert_array_almost_equal(
62 | ensscores.ensemble_spread(X_f, metric, **kwargs), expected
63 | )
64 |
--------------------------------------------------------------------------------
/pysteps/tests/test_extrapolation_semilagrangian.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import numpy as np
3 | import pytest
4 | from numpy.testing import assert_array_almost_equal
5 |
6 | from pysteps.extrapolation.semilagrangian import extrapolate
7 |
8 |
9 | def test_semilagrangian():
10 | """Test semilagrangian extrapolation with number of timesteps."""
11 | # inputs
12 | precip = np.zeros((8, 8))
13 | precip[0, 0] = 1
14 | v = np.ones((8, 8))
15 | velocity = np.stack([v, v])
16 | num_timesteps = 1
17 | # expected
18 | expected = np.zeros((1, 8, 8))
19 | expected[:, :, 0] = np.nan
20 | expected[:, 0, :] = np.nan
21 | expected[:, 1, 1] = 1
22 | # result
23 | result = extrapolate(precip, velocity, num_timesteps)
24 | assert_array_almost_equal(result, expected)
25 |
26 |
27 | def test_wrong_input_dimensions():
28 | p_1d = np.ones(8)
29 | p_2d = np.ones((8, 8))
30 | p_3d = np.ones((8, 8, 2))
31 | v_2d = np.ones((8, 8))
32 | v_3d = np.stack([v_2d, v_2d])
33 |
34 | num_timesteps = 1
35 |
36 | invalid_inputs = [
37 | (p_1d, v_3d),
38 | (p_2d, v_2d),
39 | (p_3d, v_2d),
40 | (p_3d, v_3d),
41 | ]
42 | for precip, velocity in invalid_inputs:
43 | with pytest.raises(ValueError):
44 | extrapolate(precip, velocity, num_timesteps)
45 |
46 |
47 | def test_ascending_time_step():
48 | precip = np.ones((8, 8))
49 | v = np.ones((8, 8))
50 | velocity = np.stack([v, v])
51 |
52 | not_ascending_timesteps = [1, 2, 3, 5, 4, 6, 7]
53 | with pytest.raises(ValueError):
54 | extrapolate(precip, velocity, not_ascending_timesteps)
55 |
56 |
57 | def test_semilagrangian_timesteps():
58 | """Test semilagrangian extrapolation with list of timesteps."""
59 | # inputs
60 | precip = np.zeros((8, 8))
61 | precip[0, 0] = 1
62 | v = np.ones((8, 8)) * 10
63 | velocity = np.stack([v, v])
64 | timesteps = [0.1]
65 | # expected
66 | expected = np.zeros((1, 8, 8))
67 | expected[:, :, 0] = np.nan
68 | expected[:, 0, :] = np.nan
69 | expected[:, 1, 1] = 1
70 | # result
71 | result = extrapolate(precip, velocity, timesteps)
72 | assert_array_almost_equal(result, expected)
73 |
--------------------------------------------------------------------------------
/pysteps/tests/test_feature.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import numpy as np
3 | from pysteps import feature
4 | from pysteps.tests.helpers import get_precipitation_fields
5 |
6 | arg_names = ["method", "max_num_features"]
7 | arg_values = [("blob", None), ("blob", 5), ("shitomasi", None), ("shitomasi", 5)]
8 |
9 |
10 | @pytest.mark.parametrize(arg_names, arg_values)
11 | def test_feature(method, max_num_features):
12 | if method == "blob":
13 | pytest.importorskip("skimage")
14 | if method == "shitomasi":
15 | pytest.importorskip("cv2")
16 |
17 | input_field = get_precipitation_fields(
18 | num_prev_files=0,
19 | num_next_files=0,
20 | return_raw=True,
21 | metadata=False,
22 | upscale=None,
23 | source="mch",
24 | )
25 |
26 | detector = feature.get_method(method)
27 |
28 | kwargs = {"max_num_features": max_num_features}
29 | output = detector(input_field.squeeze(), **kwargs)
30 |
31 | assert isinstance(output, np.ndarray)
32 | assert output.ndim == 2
33 | assert output.shape[0] > 0
34 | if max_num_features is not None:
35 | assert output.shape[0] <= max_num_features
36 | assert output.shape[1] == 2
37 |
--------------------------------------------------------------------------------
/pysteps/tests/test_feature_tstorm.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pytest
3 |
4 | from pysteps.feature.tstorm import detection
5 | from pysteps.utils import to_reflectivity
6 | from pysteps.tests.helpers import get_precipitation_fields
7 |
8 | try:
9 | from pandas import DataFrame
10 | except ModuleNotFoundError:
11 | pass
12 |
13 | arg_names = (
14 | "source",
15 | "output_feat",
16 | "dry_input",
17 | "max_num_features",
18 | "output_split_merge",
19 | )
20 |
21 | arg_values = [
22 | ("mch", False, False, None, False),
23 | ("mch", False, False, 5, False),
24 | ("mch", True, False, None, False),
25 | ("mch", True, False, 5, False),
26 | ("mch", False, True, None, False),
27 | ("mch", False, True, 5, False),
28 | ("mch", False, False, None, True),
29 | ]
30 |
31 |
32 | @pytest.mark.parametrize(arg_names, arg_values)
33 | def test_feature_tstorm_detection(
34 | source, output_feat, dry_input, max_num_features, output_split_merge
35 | ):
36 | pytest.importorskip("skimage")
37 | pytest.importorskip("pandas")
38 |
39 | if not dry_input:
40 | input, metadata = get_precipitation_fields(0, 0, True, True, None, source)
41 | input = input.squeeze()
42 | input, __ = to_reflectivity(input, metadata)
43 | else:
44 | input = np.zeros((50, 50))
45 |
46 | time = "000"
47 | output = detection(
48 | input,
49 | time=time,
50 | output_feat=output_feat,
51 | max_num_features=max_num_features,
52 | output_splits_merges=output_split_merge,
53 | )
54 |
55 | if output_feat:
56 | assert isinstance(output, np.ndarray)
57 | assert output.ndim == 2
58 | assert output.shape[1] == 2
59 | if max_num_features is not None:
60 | assert output.shape[0] <= max_num_features
61 | elif output_split_merge:
62 | assert isinstance(output, tuple)
63 | assert len(output) == 2
64 | assert isinstance(output[0], DataFrame)
65 | assert isinstance(output[1], np.ndarray)
66 | if max_num_features is not None:
67 | assert output[0].shape[0] <= max_num_features
68 | assert output[0].shape[1] == 15
69 | assert list(output[0].columns) == [
70 | "ID",
71 | "time",
72 | "x",
73 | "y",
74 | "cen_x",
75 | "cen_y",
76 | "max_ref",
77 | "cont",
78 | "area",
79 | "splitted",
80 | "split_IDs",
81 | "merged",
82 | "merged_IDs",
83 | "results_from_split",
84 | "will_merge",
85 | ]
86 | assert (output[0].time == time).all()
87 | assert output[1].ndim == 2
88 | assert output[1].shape == input.shape
89 | if not dry_input:
90 | assert output[0].shape[0] > 0
91 | assert sorted(list(output[0].ID)) == sorted(list(np.unique(output[1]))[1:])
92 | else:
93 | assert output[0].shape[0] == 0
94 | assert output[1].sum() == 0
95 | else:
96 | assert isinstance(output, tuple)
97 | assert len(output) == 2
98 | assert isinstance(output[0], DataFrame)
99 | assert isinstance(output[1], np.ndarray)
100 | if max_num_features is not None:
101 | assert output[0].shape[0] <= max_num_features
102 | assert output[0].shape[1] == 9
103 | assert list(output[0].columns) == [
104 | "ID",
105 | "time",
106 | "x",
107 | "y",
108 | "cen_x",
109 | "cen_y",
110 | "max_ref",
111 | "cont",
112 | "area",
113 | ]
114 | assert (output[0].time == time).all()
115 | assert output[1].ndim == 2
116 | assert output[1].shape == input.shape
117 | if not dry_input:
118 | assert output[0].shape[0] > 0
119 | assert sorted(list(output[0].ID)) == sorted(list(np.unique(output[1]))[1:])
120 | else:
121 | assert output[0].shape[0] == 0
122 | assert output[1].sum() == 0
123 |
--------------------------------------------------------------------------------
/pysteps/tests/test_importer_decorator.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from functools import partial
3 |
4 | import numpy as np
5 | import pytest
6 |
7 | from pysteps.tests.helpers import get_precipitation_fields
8 |
9 | default_dtypes = dict(
10 | fmi="double",
11 | knmi="double",
12 | mch="double",
13 | opera="double",
14 | saf="double",
15 | mrms="single",
16 | )
17 |
18 |
19 | @pytest.mark.parametrize("source, default_dtype", default_dtypes.items())
20 | def test_postprocess_import_decorator(source, default_dtype):
21 | """Test the postprocessing decorator for the importers."""
22 | import_data = partial(get_precipitation_fields, return_raw=True, source=source)
23 |
24 | precip = import_data()
25 | invalid_mask = ~np.isfinite(precip)
26 |
27 | assert precip.dtype == default_dtype
28 |
29 | if default_dtype == "single":
30 | dtype = "double"
31 | else:
32 | dtype = "single"
33 |
34 | precip = import_data(dtype=dtype)
35 |
36 | assert precip.dtype == dtype
37 |
38 | # Test that invalid types are handled correctly
39 | for dtype in ["int", "int64"]:
40 | with pytest.raises(ValueError):
41 | _ = import_data(dtype=dtype)
42 |
43 | precip = import_data(fillna=-1000)
44 | new_invalid_mask = precip == -1000
45 | assert (new_invalid_mask == invalid_mask).all()
46 |
--------------------------------------------------------------------------------
/pysteps/tests/test_io_archive.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 |
3 | import pytest
4 |
5 | import pysteps
6 |
7 |
8 | def test_find_by_date_mch():
9 | pytest.importorskip("PIL")
10 |
11 | date = datetime.strptime("201505151630", "%Y%m%d%H%M")
12 | data_source = pysteps.rcparams.data_sources["mch"]
13 | root_path = data_source["root_path"]
14 | path_fmt = data_source["path_fmt"]
15 | fn_pattern = data_source["fn_pattern"]
16 | fn_ext = data_source["fn_ext"]
17 | timestep = data_source["timestep"]
18 |
19 | fns = pysteps.io.archive.find_by_date(
20 | date,
21 | root_path,
22 | path_fmt,
23 | fn_pattern,
24 | fn_ext,
25 | timestep=timestep,
26 | num_prev_files=1,
27 | num_next_files=1,
28 | )
29 |
30 | assert len(fns) == 2
31 | assert len(fns[0]) == 3
32 | assert len(fns[1]) == 3
33 | assert isinstance(fns[0][0], str)
34 | assert isinstance(fns[1][0], datetime)
35 |
--------------------------------------------------------------------------------
/pysteps/tests/test_io_bom_rf3.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import os
4 |
5 | import pytest
6 |
7 | import pysteps
8 | from pysteps.tests.helpers import smart_assert
9 |
10 | netCDF4 = pytest.importorskip("netCDF4")
11 |
12 | # Test import_bom_rf3 function
13 | expected_proj1 = (
14 | "+proj=aea +lon_0=144.752 +lat_0=-37.852 " "+lat_1=-18.000 +lat_2=-36.000"
15 | )
16 |
17 | test_metadata_bom = [
18 | ("transform", None, None),
19 | ("zerovalue", 0.0, 0.1),
20 | ("projection", expected_proj1, None),
21 | ("unit", "mm", None),
22 | ("accutime", 6, 0.1),
23 | ("x1", -128000.0, 0.1),
24 | ("x2", 127500.0, 0.1),
25 | ("y1", -127500.0, 0.1),
26 | ("y2", 128000.0, 0.1),
27 | ("xpixelsize", 500.0, 0.1),
28 | ("ypixelsize", 500.0, 0.1),
29 | ("cartesian_unit", "m", None),
30 | ("yorigin", "upper", None),
31 | ("institution", "Commonwealth of Australia, Bureau of Meteorology", None),
32 | ]
33 |
34 |
35 | @pytest.mark.parametrize("variable, expected, tolerance", test_metadata_bom)
36 | def test_io_import_bom_rf3_metadata(variable, expected, tolerance):
37 | """Test the importer Bom RF3."""
38 | root_path = pysteps.rcparams.data_sources["bom"]["root_path"]
39 | rel_path = os.path.join("prcp-cscn", "2", "2018", "06", "16")
40 | filename = os.path.join(root_path, rel_path, "2_20180616_100000.prcp-cscn.nc")
41 | precip, _, metadata = pysteps.io.import_bom_rf3(filename)
42 | smart_assert(metadata[variable], expected, tolerance)
43 | assert precip.shape == (512, 512)
44 |
45 |
46 | # Test _import_bom_rf3_data function
47 | def test_io_import_bom_rf3_shape():
48 | """Test the importer Bom RF3."""
49 | root_path = pysteps.rcparams.data_sources["bom"]["root_path"]
50 | rel_path = os.path.join("prcp-cscn", "2", "2018", "06", "16")
51 | filename = os.path.join(root_path, rel_path, "2_20180616_100000.prcp-cscn.nc")
52 | precip, _ = pysteps.io.importers._import_bom_rf3_data(filename)
53 | assert precip.shape == (512, 512)
54 |
55 |
56 | # Test _import_bom_rf3_geodata function
57 | expected_proj2 = (
58 | "+proj=aea +lon_0=144.752 +lat_0=-37.852 " "+lat_1=-18.000 +lat_2=-36.000"
59 | )
60 | # test_geodata: list of (variable,expected,tolerance) tuples
61 | test_geodata_bom = [
62 | ("projection", expected_proj2, None),
63 | ("unit", "mm", None),
64 | ("accutime", 6, 0.1),
65 | ("x1", -128000.0, 0.1),
66 | ("x2", 127500.0, 0.1),
67 | ("y1", -127500.0, 0.1),
68 | ("y2", 128000.0, 0.1),
69 | ("xpixelsize", 500.0, 0.1),
70 | ("ypixelsize", 500.0, 0.1),
71 | ("cartesian_unit", "m", None),
72 | ("yorigin", "upper", None),
73 | ("institution", "Commonwealth of Australia, Bureau of Meteorology", None),
74 | ]
75 |
76 |
77 | @pytest.mark.parametrize("variable, expected, tolerance", test_geodata_bom)
78 | def test_io_import_bom_rf3_geodata(variable, expected, tolerance):
79 | """Test the importer Bom RF3."""
80 | root_path = pysteps.rcparams.data_sources["bom"]["root_path"]
81 | rel_path = os.path.join("prcp-cscn", "2", "2018", "06", "16")
82 | filename = os.path.join(root_path, rel_path, "2_20180616_100000.prcp-cscn.nc")
83 | ds_rainfall = netCDF4.Dataset(filename)
84 | geodata = pysteps.io.importers._import_bom_rf3_geodata(ds_rainfall)
85 | smart_assert(geodata[variable], expected, tolerance)
86 |
87 | ds_rainfall.close()
88 |
--------------------------------------------------------------------------------
/pysteps/tests/test_io_fmi_geotiff.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import pytest
4 |
5 | import pysteps
6 | from pysteps.tests.helpers import smart_assert
7 |
8 | pytest.importorskip("pyproj")
9 | pytest.importorskip("osgeo")
10 |
11 | root_path = pysteps.rcparams.data_sources["fmi_geotiff"]["root_path"]
12 | filename = os.path.join(
13 | root_path,
14 | "20160928",
15 | "201609281600_FINUTM.tif",
16 | )
17 | precip, _, metadata = pysteps.io.import_fmi_geotiff(filename)
18 |
19 |
20 | def test_io_import_fmi_geotiff_shape():
21 | """Test the shape of the read file."""
22 | assert precip.shape == (7316, 4963)
23 |
24 |
25 | expected_proj = (
26 | "+proj=utm +zone=35 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
27 | )
28 |
29 | # test_geodata: list of (variable,expected,tolerance) tuples
30 | test_geodata = [
31 | ("projection", expected_proj, None),
32 | ("x1", -196593.0043142295908183, 1e-10),
33 | ("x2", 1044176.9413554778, 1e-10),
34 | ("y1", 6255329.6988206729292870, 1e-10),
35 | ("y2", 8084432.005259146, 1e-10),
36 | ("xpixelsize", 250.0040188736061566, 1e-6),
37 | ("ypixelsize", 250.0139839309011904, 1e-6),
38 | ("cartesian_unit", "m", None),
39 | ("yorigin", "upper", None),
40 | ]
41 |
42 |
43 | @pytest.mark.parametrize("variable, expected, tolerance", test_geodata)
44 | def test_io_import_fmi_pgm_geodata(variable, expected, tolerance):
45 | """Test the GeoTIFF and metadata reading."""
46 | smart_assert(metadata[variable], expected, tolerance)
47 |
--------------------------------------------------------------------------------
/pysteps/tests/test_io_fmi_pgm.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import pytest
4 |
5 | import pysteps
6 | from pysteps.tests.helpers import smart_assert
7 |
8 | pytest.importorskip("pyproj")
9 |
10 |
11 | root_path = pysteps.rcparams.data_sources["fmi"]["root_path"]
12 | filename = os.path.join(
13 | root_path,
14 | "20160928",
15 | "201609281600_fmi.radar.composite.lowest_FIN_SUOMI1.pgm.gz",
16 | )
17 | precip, _, metadata = pysteps.io.import_fmi_pgm(filename, gzipped=True)
18 |
19 |
20 | def test_io_import_fmi_pgm_shape():
21 | """Test the importer FMI PGM."""
22 | assert precip.shape == (1226, 760)
23 |
24 |
25 | expected_proj = (
26 | "+proj=stere +lon_0=25E +lat_0=90N "
27 | "+lat_ts=60 +a=6371288 +x_0=380886.310 "
28 | "+y_0=3395677.920 +no_defs"
29 | )
30 |
31 | test_attrs = [
32 | ("projection", expected_proj, None),
33 | ("institution", "Finnish Meteorological Institute", None),
34 | # ("composite_area", ["FIN"]),
35 | # ("projection_name", ["SUOMI1"]),
36 | # ("radar", ["LUO", "1", "26.9008", "67.1386"]),
37 | # ("obstime", ["201609281600"]),
38 | # ("producttype", ["CAPPI"]),
39 | # ("productname", ["LOWEST"]),
40 | # ("param", ["CorrectedReflectivity"]),
41 | # ("metersperpixel_x", ["999.674053"]),
42 | # ("metersperpixel_y", ["999.62859"]),
43 | # ("projection", ["radar", "{"]),
44 | # ("type", ["stereographic"]),
45 | # ("centrallongitude", ["25"]),
46 | # ("centrallatitude", ["90"]),
47 | # ("truelatitude", ["60"]),
48 | # ("bottomleft", ["18.600000", "57.930000"]),
49 | # ("topright", ["34.903000", "69.005000"]),
50 | # ("missingval", 255),
51 | ("accutime", 5.0, 0.1),
52 | ("unit", "dBZ", None),
53 | ("transform", "dB", None),
54 | ("zerovalue", -32.0, 0.1),
55 | ("threshold", -31.5, 0.1),
56 | ("zr_a", 223.0, 0.1),
57 | ("zr_b", 1.53, 0.1),
58 | ]
59 |
60 |
61 | @pytest.mark.parametrize("variable, expected, tolerance", test_attrs)
62 | def test_io_import_mch_gif_dataset_attrs(variable, expected, tolerance):
63 | """Test the importer FMI PMG."""
64 | smart_assert(metadata[variable], expected, tolerance)
65 |
66 |
67 | # test_geodata: list of (variable,expected,tolerance) tuples
68 | test_geodata = [
69 | ("projection", expected_proj, None),
70 | ("x1", 0.0049823258887045085, 1e-20),
71 | ("x2", 759752.2852757066, 1e-10),
72 | ("y1", 0.009731985162943602, 1e-18),
73 | ("y2", 1225544.6588913496, 1e-10),
74 | ("xpixelsize", 999.674053, 1e-6),
75 | ("ypixelsize", 999.62859, 1e-5),
76 | ("cartesian_unit", "m", None),
77 | ("yorigin", "upper", None),
78 | ]
79 |
80 |
81 | @pytest.mark.parametrize("variable, expected, tolerance", test_geodata)
82 | def test_io_import_fmi_pgm_geodata(variable, expected, tolerance):
83 | """Test the importer FMI pgm."""
84 | root_path = pysteps.rcparams.data_sources["fmi"]["root_path"]
85 | filename = os.path.join(
86 | root_path,
87 | "20160928",
88 | "201609281600_fmi.radar.composite.lowest_FIN_SUOMI1.pgm.gz",
89 | )
90 | metadata = pysteps.io.importers._import_fmi_pgm_metadata(filename, gzipped=True)
91 | geodata = pysteps.io.importers._import_fmi_pgm_geodata(metadata)
92 |
93 | smart_assert(geodata[variable], expected, tolerance)
94 |
--------------------------------------------------------------------------------
/pysteps/tests/test_io_knmi_hdf5.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import os
4 |
5 | import pytest
6 |
7 | import pysteps
8 | from pysteps.tests.helpers import smart_assert
9 |
10 | pytest.importorskip("h5py")
11 |
12 |
13 | root_path = pysteps.rcparams.data_sources["knmi"]["root_path"]
14 | filename = os.path.join(root_path, "2010/08", "RAD_NL25_RAP_5min_201008260000.h5")
15 | precip, _, metadata = pysteps.io.import_knmi_hdf5(filename)
16 |
17 |
18 | def test_io_import_knmi_hdf5_shape():
19 | """Test the importer KNMI HDF5."""
20 | assert precip.shape == (765, 700)
21 |
22 |
23 | # test_metadata: list of (variable,expected, tolerance) tuples
24 |
25 | expected_proj = (
26 | "+proj=stere +lat_0=90 +lon_0=0.0 +lat_ts=60.0 +a=6378137 +b=6356752 +x_0=0 +y_0=0"
27 | )
28 |
29 | # list of (variable,expected,tolerance) tuples
30 | test_attrs = [
31 | ("projection", expected_proj, None),
32 | ("x1", 0.0, 1e-10),
33 | ("y1", -4415038.179210632, 1e-10),
34 | ("x2", 699984.2646331593, 1e-10),
35 | ("y2", -3649950.360247753, 1e-10),
36 | ("xpixelsize", 1000.0, 1e-10),
37 | ("xpixelsize", 1000.0, 1e-10),
38 | ("cartesian_unit", "m", None),
39 | ("accutime", 5.0, 1e-10),
40 | ("yorigin", "upper", None),
41 | ("unit", "mm", None),
42 | ("institution", "KNMI - Royal Netherlands Meteorological Institute", None),
43 | ("transform", None, None),
44 | ("zerovalue", 0.0, 1e-10),
45 | ("threshold", 0.01, 1e-10),
46 | ("zr_a", 200.0, None),
47 | ("zr_b", 1.6, None),
48 | ]
49 |
50 |
51 | @pytest.mark.parametrize("variable,expected,tolerance", test_attrs)
52 | def test_io_import_knmi_hdf5_metadata(variable, expected, tolerance):
53 | """Test the importer KNMI HDF5."""
54 | smart_assert(metadata[variable], expected, tolerance)
55 |
--------------------------------------------------------------------------------
/pysteps/tests/test_io_mch_gif.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import os
4 |
5 | import pytest
6 |
7 | import pysteps
8 | from pysteps.tests.helpers import smart_assert
9 |
10 | pytest.importorskip("PIL")
11 |
12 | root_path = pysteps.rcparams.data_sources["mch"]["root_path"]
13 | filename = os.path.join(root_path, "20170131", "AQC170310945F_00005.801.gif")
14 | precip, _, metadata = pysteps.io.import_mch_gif(filename, "AQC", "mm", 5.0)
15 |
16 |
17 | def test_io_import_mch_gif_shape():
18 | """Test the importer MCH GIF."""
19 | assert precip.shape == (640, 710)
20 |
21 |
22 | expected_proj = (
23 | "+proj=somerc +lon_0=7.43958333333333 "
24 | "+lat_0=46.9524055555556 +k_0=1 "
25 | "+x_0=600000 +y_0=200000 +ellps=bessel "
26 | "+towgs84=674.374,15.056,405.346,0,0,0,0 "
27 | "+units=m +no_defs"
28 | )
29 |
30 | # list of (variable,expected,tolerance) tuples
31 | test_attrs = [
32 | ("projection", expected_proj, None),
33 | ("institution", "MeteoSwiss", None),
34 | ("accutime", 5.0, 0.1),
35 | ("unit", "mm", None),
36 | ("transform", None, None),
37 | ("zerovalue", 0.0, 0.1),
38 | ("threshold", 0.0009628129986471908, 1e-19),
39 | ("zr_a", 316.0, 0.1),
40 | ("zr_b", 1.5, 0.1),
41 | ("x1", 255000.0, 0.1),
42 | ("y1", -160000.0, 0.1),
43 | ("x2", 965000.0, 0.1),
44 | ("y2", 480000.0, 0.1),
45 | ("xpixelsize", 1000.0, 0.1),
46 | ("ypixelsize", 1000.0, 0.1),
47 | ("cartesian_unit", "m", None),
48 | ("yorigin", "upper", None),
49 | ]
50 |
51 |
52 | @pytest.mark.parametrize("variable, expected, tolerance", test_attrs)
53 | def test_io_import_mch_gif_dataset_attrs(variable, expected, tolerance):
54 | """Test the importer MCH GIF."""
55 | smart_assert(metadata[variable], expected, tolerance)
56 |
57 |
58 | # test_geodata: list of (variable,expected,tolerance) tuples
59 | test_geodata = [
60 | ("projection", expected_proj, None),
61 | ("x1", 255000.0, 0.1),
62 | ("y1", -160000.0, 0.1),
63 | ("x2", 965000.0, 0.1),
64 | ("y2", 480000.0, 0.1),
65 | ("xpixelsize", 1000.0, 0.1),
66 | ("ypixelsize", 1000.0, 0.1),
67 | ("cartesian_unit", "m", None),
68 | ("yorigin", "upper", None),
69 | ]
70 |
71 |
72 | @pytest.mark.parametrize("variable, expected, tolerance", test_geodata)
73 | def test_io_import_mch_geodata(variable, expected, tolerance):
74 | """Test the importer MCH geodata."""
75 | geodata = pysteps.io.importers._import_mch_geodata()
76 | smart_assert(geodata[variable], expected, tolerance)
77 |
--------------------------------------------------------------------------------
/pysteps/tests/test_io_mrms_grib.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import os
4 |
5 | import numpy as np
6 | import pytest
7 | from numpy.testing import assert_array_almost_equal
8 |
9 | import pysteps
10 |
11 | pytest.importorskip("pygrib")
12 |
13 |
14 | def test_io_import_mrms_grib():
15 | """Test the importer for NSSL data."""
16 |
17 | root_path = pysteps.rcparams.data_sources["mrms"]["root_path"]
18 | filename = os.path.join(
19 | root_path, "2019/06/10/", "PrecipRate_00.00_20190610-000000.grib2"
20 | )
21 | precip, _, metadata = pysteps.io.import_mrms_grib(filename, fillna=0, window_size=1)
22 |
23 | assert precip.shape == (3500, 7000)
24 | assert precip.dtype == "single"
25 |
26 | expected_metadata = {
27 | "institution": "NOAA National Severe Storms Laboratory",
28 | "xpixelsize": 0.01,
29 | "ypixelsize": 0.01,
30 | "unit": "mm/h",
31 | "transform": None,
32 | "zerovalue": 0,
33 | "projection": "+proj=longlat +ellps=IAU76",
34 | "yorigin": "upper",
35 | "threshold": 0.1,
36 | "x1": -129.99999999999997,
37 | "x2": -60.00000199999991,
38 | "y1": 20.000001,
39 | "y2": 55.00000000000001,
40 | "cartesian_unit": "degrees",
41 | }
42 |
43 | for key, value in expected_metadata.items():
44 | if isinstance(value, float):
45 | assert_array_almost_equal(metadata[key], expected_metadata[key])
46 | else:
47 | assert metadata[key] == expected_metadata[key]
48 |
49 | x = np.arange(metadata["x1"], metadata["x2"], metadata["xpixelsize"])
50 | y = np.arange(metadata["y1"], metadata["y2"], metadata["ypixelsize"])
51 |
52 | assert y.size == precip.shape[0]
53 | assert x.size == precip.shape[1]
54 |
55 | # The full latitude range is (20.005, 54.995)
56 | # The full longitude range is (230.005, 299.995)
57 |
58 | # Test that if the bounding box is larger than the domain, all the points are returned.
59 | precip2, _, _ = pysteps.io.import_mrms_grib(
60 | filename, fillna=0, extent=(220, 300, 20, 55), window_size=1
61 | )
62 | assert precip2.shape == (3500, 7000)
63 |
64 | assert_array_almost_equal(precip, precip2)
65 |
66 | del precip2
67 |
68 | # Test that a portion of the domain is returned correctly
69 | precip3, _, _ = pysteps.io.import_mrms_grib(
70 | filename, fillna=0, extent=(250, 260, 30, 35), window_size=1
71 | )
72 |
73 | assert precip3.shape == (500, 1000)
74 | assert_array_almost_equal(precip3, precip[2000:2500, 2000:3000])
75 | del precip3
76 |
77 | precip4, _, _ = pysteps.io.import_mrms_grib(filename, dtype="double", fillna=0)
78 | assert precip4.dtype == "double"
79 | del precip4
80 |
81 | precip5, _, _ = pysteps.io.import_mrms_grib(filename, dtype="single", fillna=0)
82 | assert precip5.dtype == "single"
83 | del precip5
84 |
--------------------------------------------------------------------------------
/pysteps/tests/test_io_nowcast_importers.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pytest
3 |
4 | from pysteps import io
5 | from pysteps.tests.helpers import get_precipitation_fields
6 |
7 | precip, metadata = get_precipitation_fields(
8 | num_prev_files=1,
9 | num_next_files=0,
10 | return_raw=False,
11 | metadata=True,
12 | upscale=2000,
13 | )
14 |
15 |
16 | @pytest.mark.parametrize(
17 | "precip, metadata",
18 | [(precip, metadata), (np.zeros_like(precip), metadata)],
19 | )
20 | def test_import_netcdf(precip, metadata, tmp_path):
21 |
22 | pytest.importorskip("pyproj")
23 |
24 | field_shape = (precip.shape[1], precip.shape[2])
25 | startdate = metadata["timestamps"][-1]
26 | timestep = metadata["accutime"]
27 | exporter = io.exporters.initialize_forecast_exporter_netcdf(
28 | outpath=tmp_path.as_posix(),
29 | outfnprefix="test",
30 | startdate=startdate,
31 | timestep=timestep,
32 | n_timesteps=precip.shape[0],
33 | shape=field_shape,
34 | metadata=metadata,
35 | )
36 | io.exporters.export_forecast_dataset(precip, exporter)
37 | io.exporters.close_forecast_files(exporter)
38 |
39 | tmp_file = tmp_path / "test.nc"
40 | precip_netcdf, metadata_netcdf = io.import_netcdf_pysteps(tmp_file, dtype="float64")
41 |
42 | assert isinstance(precip_netcdf, np.ndarray)
43 | assert isinstance(metadata_netcdf, dict)
44 | assert precip_netcdf.ndim == precip.ndim, "Wrong number of dimensions"
45 | assert precip_netcdf.shape[0] == precip.shape[0], "Wrong number of lead times"
46 | assert precip_netcdf.shape[1:] == field_shape, "Wrong field shape"
47 | assert np.allclose(precip_netcdf, precip)
48 |
--------------------------------------------------------------------------------
/pysteps/tests/test_io_readers.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 |
3 | import numpy as np
4 | import pytest
5 |
6 | import pysteps
7 |
8 |
9 | def test_read_timeseries_mch():
10 | pytest.importorskip("PIL")
11 |
12 | date = datetime.strptime("201505151630", "%Y%m%d%H%M")
13 | data_source = pysteps.rcparams.data_sources["mch"]
14 | root_path = data_source["root_path"]
15 | path_fmt = data_source["path_fmt"]
16 | fn_pattern = data_source["fn_pattern"]
17 | fn_ext = data_source["fn_ext"]
18 | importer_name = data_source["importer"]
19 | importer_kwargs = data_source["importer_kwargs"]
20 | timestep = data_source["timestep"]
21 |
22 | fns = pysteps.io.archive.find_by_date(
23 | date,
24 | root_path,
25 | path_fmt,
26 | fn_pattern,
27 | fn_ext,
28 | timestep=timestep,
29 | num_prev_files=1,
30 | num_next_files=1,
31 | )
32 |
33 | importer = pysteps.io.get_method(importer_name, "importer")
34 | precip, _, metadata = pysteps.io.read_timeseries(fns, importer, **importer_kwargs)
35 |
36 | assert isinstance(precip, np.ndarray)
37 | assert isinstance(metadata, dict)
38 | assert precip.shape[0] == 3
39 |
--------------------------------------------------------------------------------
/pysteps/tests/test_io_saf_crri.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import os
4 |
5 | import pytest
6 |
7 | import pysteps
8 | from pysteps.tests.helpers import smart_assert
9 |
10 | pytest.importorskip("netCDF4")
11 |
12 |
13 | expected_proj = (
14 | "+proj=geos +a=6378137.000000 +b=6356752.300000 "
15 | "+lon_0=0.000000 +h=35785863.000000"
16 | )
17 | test_geodata_crri = [
18 | ("projection", expected_proj, None),
19 | ("x1", -3301500.0, 0.1),
20 | ("x2", 3298500.0, 0.1),
21 | ("y1", 2512500.0, 0.1),
22 | ("y2", 5569500.0, 0.1),
23 | ("xpixelsize", 3000.0, 0.1),
24 | ("ypixelsize", 3000.0, 0.1),
25 | ("cartesian_unit", "m", None),
26 | ("yorigin", "upper", None),
27 | ]
28 |
29 |
30 | @pytest.mark.parametrize("variable, expected, tolerance", test_geodata_crri)
31 | def test_io_import_saf_crri_geodata(variable, expected, tolerance):
32 | """Test the importer SAF CRRI."""
33 | root_path = pysteps.rcparams.data_sources["saf"]["root_path"]
34 | rel_path = "20180601/CRR"
35 | filename = os.path.join(
36 | root_path, rel_path, "S_NWC_CRR_MSG4_Europe-VISIR_20180601T070000Z.nc"
37 | )
38 | geodata = pysteps.io.importers._import_saf_crri_geodata(filename)
39 | smart_assert(geodata[variable], expected, tolerance)
40 |
41 |
42 | root_path = pysteps.rcparams.data_sources["saf"]["root_path"]
43 | rel_path = "20180601/CRR"
44 | filename = os.path.join(
45 | root_path, rel_path, "S_NWC_CRR_MSG4_Europe-VISIR_20180601T070000Z.nc"
46 | )
47 | _, _, metadata = pysteps.io.import_saf_crri(filename)
48 |
49 | # list of (variable,expected,tolerance) tuples
50 | test_attrs = [
51 | ("projection", expected_proj, None),
52 | ("institution", "Agencia Estatal de Meteorología (AEMET)", None),
53 | ("transform", None, None),
54 | ("zerovalue", 0.0, 0.1),
55 | ("unit", "mm/h", None),
56 | ("accutime", None, None),
57 | ]
58 |
59 |
60 | @pytest.mark.parametrize("variable, expected, tolerance", test_attrs)
61 | def test_io_import_saf_crri_attrs(variable, expected, tolerance):
62 | """Test the importer SAF CRRI."""
63 | smart_assert(metadata[variable], expected, tolerance)
64 |
65 |
66 | test_extent_crri = [
67 | (None, (-3301500.0, 3298500.0, 2512500.0, 5569500.0), (1019, 2200), None),
68 | (
69 | (-1980000.0, 1977000.0, 2514000.0, 4818000.0),
70 | (-1978500.0, 1975500.0, 2515500.0, 4816500.0),
71 | (767, 1318),
72 | None,
73 | ),
74 | ]
75 |
76 |
77 | @pytest.mark.parametrize(
78 | "extent, expected_extent, expected_shape, tolerance", test_extent_crri
79 | )
80 | def test_io_import_saf_crri_extent(extent, expected_extent, expected_shape, tolerance):
81 | """Test the importer SAF CRRI."""
82 | root_path = pysteps.rcparams.data_sources["saf"]["root_path"]
83 | rel_path = "20180601/CRR"
84 | filename = os.path.join(
85 | root_path, rel_path, "S_NWC_CRR_MSG4_Europe-VISIR_20180601T070000Z.nc"
86 | )
87 | precip, _, metadata = pysteps.io.import_saf_crri(filename, extent=extent)
88 | extent_out = (metadata["x1"], metadata["x2"], metadata["y1"], metadata["y2"])
89 | smart_assert(extent_out, expected_extent, tolerance)
90 | smart_assert(precip.shape, expected_shape, tolerance)
91 |
--------------------------------------------------------------------------------
/pysteps/tests/test_motion_lk.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 |
3 | """ """
4 |
5 | import pytest
6 | import numpy as np
7 |
8 | from pysteps import motion, verification
9 | from pysteps.tests.helpers import get_precipitation_fields
10 |
11 | lk_arg_names = (
12 | "lk_kwargs",
13 | "fd_method",
14 | "dense",
15 | "nr_std_outlier",
16 | "k_outlier",
17 | "size_opening",
18 | "decl_scale",
19 | "verbose",
20 | )
21 |
22 | lk_arg_values = [
23 | ({}, "shitomasi", True, 3, 30, 3, 20, False), # defaults
24 | ({}, "shitomasi", False, 3, 30, 3, 20, True), # sparse ouput, verbose
25 | ({}, "shitomasi", False, 0, 30, 3, 20, False), # sparse ouput, all outliers
26 | (
27 | {},
28 | "shitomasi",
29 | True,
30 | 3,
31 | None,
32 | 0,
33 | 0,
34 | False,
35 | ), # global outlier detection, no filtering, no declutering
36 | ({}, "shitomasi", True, 0, 30, 3, 20, False), # all outliers
37 | ({}, "blob", True, 3, 30, 3, 20, False), # blob detection
38 | ({}, "tstorm", True, 3, 30, 3, 20, False), # tstorm detection
39 | ]
40 |
41 |
42 | @pytest.mark.parametrize(lk_arg_names, lk_arg_values)
43 | def test_lk(
44 | lk_kwargs,
45 | fd_method,
46 | dense,
47 | nr_std_outlier,
48 | k_outlier,
49 | size_opening,
50 | decl_scale,
51 | verbose,
52 | ):
53 | """Tests Lucas-Kanade optical flow."""
54 |
55 | pytest.importorskip("cv2")
56 | if fd_method == "blob":
57 | pytest.importorskip("skimage")
58 | if fd_method == "tstorm":
59 | pytest.importorskip("skimage")
60 | pytest.importorskip("pandas")
61 |
62 | # inputs
63 | precip, metadata = get_precipitation_fields(
64 | num_prev_files=2,
65 | num_next_files=0,
66 | return_raw=False,
67 | metadata=True,
68 | upscale=2000,
69 | )
70 | precip = precip.filled()
71 |
72 | # Retrieve motion field
73 | oflow_method = motion.get_method("LK")
74 | output = oflow_method(
75 | precip,
76 | lk_kwargs=lk_kwargs,
77 | fd_method=fd_method,
78 | dense=dense,
79 | nr_std_outlier=nr_std_outlier,
80 | k_outlier=k_outlier,
81 | size_opening=size_opening,
82 | decl_scale=decl_scale,
83 | verbose=verbose,
84 | )
85 |
86 | # Check format of ouput
87 | if dense:
88 | assert isinstance(output, np.ndarray)
89 | assert output.ndim == 3
90 | assert output.shape[0] == 2
91 | assert output.shape[1:] == precip[0].shape
92 | if nr_std_outlier == 0:
93 | assert output.sum() == 0
94 | else:
95 | assert isinstance(output, tuple)
96 | assert len(output) == 2
97 | assert isinstance(output[0], np.ndarray)
98 | assert isinstance(output[1], np.ndarray)
99 | assert output[0].ndim == 2
100 | assert output[1].ndim == 2
101 | assert output[0].shape[1] == 2
102 | assert output[1].shape[1] == 2
103 | assert output[0].shape[0] == output[1].shape[0]
104 | if nr_std_outlier == 0:
105 | assert output[0].shape[0] == 0
106 | assert output[1].shape[0] == 0
107 |
--------------------------------------------------------------------------------
/pysteps/tests/test_noise_fftgenerators.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from pysteps.noise import fftgenerators
4 | from pysteps.tests.helpers import get_precipitation_fields
5 |
6 |
7 | PRECIP = get_precipitation_fields(
8 | num_prev_files=0,
9 | num_next_files=0,
10 | return_raw=False,
11 | metadata=False,
12 | upscale=2000,
13 | )
14 | PRECIP = PRECIP.filled()
15 |
16 |
17 | def test_noise_param_2d_fft_filter():
18 | fft_filter = fftgenerators.initialize_param_2d_fft_filter(PRECIP)
19 |
20 | assert isinstance(fft_filter, dict)
21 | assert all([key in fft_filter for key in ["field", "input_shape", "model", "pars"]])
22 |
23 | out = fftgenerators.generate_noise_2d_fft_filter(fft_filter)
24 |
25 | assert isinstance(out, np.ndarray)
26 | assert out.shape == PRECIP.shape
27 |
28 |
29 | def test_noise_nonparam_2d_fft_filter():
30 | fft_filter = fftgenerators.initialize_nonparam_2d_fft_filter(PRECIP)
31 |
32 | assert isinstance(fft_filter, dict)
33 | assert all([key in fft_filter for key in ["field", "input_shape"]])
34 |
35 | out = fftgenerators.generate_noise_2d_fft_filter(fft_filter)
36 |
37 | assert isinstance(out, np.ndarray)
38 | assert out.shape == PRECIP.shape
39 |
40 |
41 | def test_noise_nonparam_2d_ssft_filter():
42 | fft_filter = fftgenerators.initialize_nonparam_2d_ssft_filter(PRECIP)
43 |
44 | assert isinstance(fft_filter, dict)
45 | assert all([key in fft_filter for key in ["field", "input_shape"]])
46 |
47 | out = fftgenerators.generate_noise_2d_ssft_filter(fft_filter)
48 |
49 | assert isinstance(out, np.ndarray)
50 | assert out.shape == PRECIP.shape
51 |
52 |
53 | def test_noise_nonparam_2d_nested_filter():
54 | fft_filter = fftgenerators.initialize_nonparam_2d_nested_filter(PRECIP)
55 |
56 | assert isinstance(fft_filter, dict)
57 | assert all([key in fft_filter for key in ["field", "input_shape"]])
58 |
59 | out = fftgenerators.generate_noise_2d_ssft_filter(fft_filter)
60 |
61 | assert isinstance(out, np.ndarray)
62 | assert out.shape == PRECIP.shape
63 |
--------------------------------------------------------------------------------
/pysteps/tests/test_noise_motion.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import numpy as np
4 | import pytest
5 | from numpy.testing import assert_array_almost_equal
6 |
7 | from pysteps.noise.motion import generate_bps
8 | from pysteps.noise.motion import get_default_params_bps_par
9 | from pysteps.noise.motion import get_default_params_bps_perp
10 | from pysteps.noise.motion import initialize_bps
11 |
12 |
13 | def test_noise_motion_get_default_params_bps_par():
14 | """Tests default BPS velocity parameters."""
15 | expected = (10.88, 0.23, -7.68)
16 | result = get_default_params_bps_par()
17 | assert_array_almost_equal(result, expected)
18 |
19 |
20 | def test_noise_motion_get_default_params_bps_perp():
21 | """Tests default BPS velocity perturbation."""
22 | expected = (5.76, 0.31, -2.72)
23 | result = get_default_params_bps_perp()
24 | assert_array_almost_equal(result, expected)
25 |
26 |
27 | vv = np.ones((8, 8)) * np.sqrt(2) * 0.5
28 | test_init_bps_vars = [
29 | ("vsf", 60),
30 | ("eps_par", -0.2042896366299448),
31 | ("eps_perp", 1.6383482042624593),
32 | ("p_par", (10.88, 0.23, -7.68)),
33 | ("p_perp", (5.76, 0.31, -2.72)),
34 | ("V_par", np.stack([vv, vv])),
35 | ("V_perp", np.stack([-vv, vv])),
36 | ]
37 |
38 |
39 | @pytest.mark.parametrize("variable, expected", test_init_bps_vars)
40 | def test_initialize_bps(variable, expected):
41 | """Tests initialation BPS velocity perturbation method."""
42 | seed = 42
43 | timestep = 1
44 | pixelsperkm = 1
45 | v = np.ones((8, 8))
46 | velocity = np.stack([v, v])
47 | perturbator = initialize_bps(velocity, pixelsperkm, timestep, seed=seed)
48 | assert_array_almost_equal(perturbator[variable], expected)
49 |
50 |
51 | def test_generate_bps():
52 | """Tests generation BPS velocity perturbation method."""
53 | seed = 42
54 | timestep = 1
55 | pixelsperkm = 1
56 | v = np.ones((8, 8))
57 | velocity = np.stack([v, v])
58 | perturbator = initialize_bps(velocity, pixelsperkm, timestep, seed=seed)
59 | new_vv = generate_bps(perturbator, timestep)
60 | expected = np.stack([v * -0.066401, v * 0.050992])
61 | assert_array_almost_equal(new_vv, expected)
62 |
--------------------------------------------------------------------------------
/pysteps/tests/test_nowcasts_anvil.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pytest
3 |
4 | from pysteps import motion, nowcasts, verification
5 | from pysteps.tests.helpers import get_precipitation_fields
6 |
7 | anvil_arg_names = (
8 | "n_cascade_levels",
9 | "ar_order",
10 | "ar_window_radius",
11 | "timesteps",
12 | "min_csi",
13 | "apply_rainrate_mask",
14 | "measure_time",
15 | )
16 |
17 | anvil_arg_values = [
18 | (8, 1, 50, 3, 0.6, True, False),
19 | (8, 1, 50, [3], 0.6, False, True),
20 | ]
21 |
22 |
23 | def test_default_anvil_norain():
24 | """Tests anvil nowcast with default params and all-zero inputs."""
25 |
26 | # Define dummy nowcast input data
27 | precip_input = np.zeros((4, 100, 100))
28 |
29 | pytest.importorskip("cv2")
30 | oflow_method = motion.get_method("LK")
31 | retrieved_motion = oflow_method(precip_input)
32 |
33 | nowcast_method = nowcasts.get_method("anvil")
34 | precip_forecast = nowcast_method(
35 | precip_input,
36 | retrieved_motion,
37 | timesteps=3,
38 | )
39 |
40 | assert precip_forecast.ndim == 3
41 | assert precip_forecast.shape[0] == 3
42 | assert precip_forecast.sum() == 0.0
43 |
44 |
45 | @pytest.mark.parametrize(anvil_arg_names, anvil_arg_values)
46 | def test_anvil_rainrate(
47 | n_cascade_levels,
48 | ar_order,
49 | ar_window_radius,
50 | timesteps,
51 | min_csi,
52 | apply_rainrate_mask,
53 | measure_time,
54 | ):
55 | """Tests ANVIL nowcast using rain rate precipitation fields."""
56 | # inputs
57 | precip_input = get_precipitation_fields(
58 | num_prev_files=4,
59 | num_next_files=0,
60 | return_raw=False,
61 | metadata=False,
62 | upscale=2000,
63 | )
64 | precip_input = precip_input.filled()
65 |
66 | precip_obs = get_precipitation_fields(
67 | num_prev_files=0, num_next_files=3, return_raw=False, upscale=2000
68 | )[1:, :, :]
69 | precip_obs = precip_obs.filled()
70 |
71 | pytest.importorskip("cv2")
72 | oflow_method = motion.get_method("LK")
73 | retrieved_motion = oflow_method(precip_input)
74 |
75 | nowcast_method = nowcasts.get_method("anvil")
76 |
77 | output = nowcast_method(
78 | precip_input[-(ar_order + 2) :],
79 | retrieved_motion,
80 | timesteps=timesteps,
81 | rainrate=None, # no R(VIL) conversion is done
82 | n_cascade_levels=n_cascade_levels,
83 | ar_order=ar_order,
84 | ar_window_radius=ar_window_radius,
85 | apply_rainrate_mask=apply_rainrate_mask,
86 | measure_time=measure_time,
87 | )
88 | if measure_time:
89 | precip_forecast, __, __ = output
90 | else:
91 | precip_forecast = output
92 |
93 | assert precip_forecast.ndim == 3
94 | assert precip_forecast.shape[0] == (
95 | timesteps if isinstance(timesteps, int) else len(timesteps)
96 | )
97 |
98 | result = verification.det_cat_fct(
99 | precip_forecast[-1], precip_obs[-1], thr=0.1, scores="CSI"
100 | )["CSI"]
101 | assert result > min_csi, f"CSI={result:.2f}, required > {min_csi:.2f}"
102 |
103 |
104 | if __name__ == "__main__":
105 | for n in range(len(anvil_arg_values)):
106 | test_args = zip(anvil_arg_names, anvil_arg_values[n])
107 | test_anvil_rainrate(**dict((x, y) for x, y in test_args))
108 |
--------------------------------------------------------------------------------
/pysteps/tests/test_nowcasts_lagrangian_probability.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import numpy as np
3 | import pytest
4 |
5 | from pysteps.nowcasts.lagrangian_probability import forecast
6 | from pysteps.tests.helpers import get_precipitation_fields
7 | from pysteps.motion.lucaskanade import dense_lucaskanade
8 |
9 |
10 | def test_numerical_example():
11 | """"""
12 | precip = np.zeros((20, 20))
13 | precip[5:10, 5:10] = 1
14 | velocity = np.zeros((2, *precip.shape))
15 | timesteps = 4
16 | thr = 0.5
17 | slope = 1 # pixels / timestep
18 |
19 | # compute probability forecast
20 | fct = forecast(precip, velocity, timesteps, thr, slope=slope)
21 |
22 | assert fct.ndim == 3
23 | assert fct.shape[0] == timesteps
24 | assert fct.shape[1:] == precip.shape
25 | assert fct.max() <= 1.0
26 | assert fct.min() >= 0.0
27 |
28 | # slope = 0 should return a binary field
29 | fct = forecast(precip, velocity, timesteps, thr, slope=0)
30 | ref = (np.repeat(precip[None, ...], timesteps, axis=0) >= thr).astype(float)
31 | assert np.allclose(fct, fct.astype(bool))
32 | assert np.allclose(fct, ref)
33 |
34 |
35 | def test_numerical_example_with_float_slope_and_float_list_timesteps():
36 | """"""
37 | precip = np.zeros((20, 20))
38 | precip[5:10, 5:10] = 1
39 | velocity = np.zeros((2, *precip.shape))
40 | timesteps = [1.0, 2.0, 5.0, 12.0]
41 | thr = 0.5
42 | slope = 1.0 # pixels / timestep
43 |
44 | # compute probability forecast
45 | fct = forecast(precip, velocity, timesteps, thr, slope=slope)
46 |
47 | assert fct.ndim == 3
48 | assert fct.shape[0] == len(timesteps)
49 | assert fct.shape[1:] == precip.shape
50 | assert fct.max() <= 1.0
51 | assert fct.min() >= 0.0
52 |
53 |
54 | def test_real_case():
55 | """"""
56 | pytest.importorskip("cv2")
57 |
58 | # inputs
59 | precip, metadata = get_precipitation_fields(
60 | num_prev_files=2,
61 | num_next_files=0,
62 | return_raw=False,
63 | metadata=True,
64 | upscale=2000,
65 | )
66 |
67 | # motion
68 | motion = dense_lucaskanade(precip)
69 |
70 | # parameters
71 | timesteps = [1, 2, 3]
72 | thr = 1 # mm / h
73 | slope = 1 * metadata["accutime"] # min-1
74 |
75 | # compute probability forecast
76 | extrap_kwargs = dict(allow_nonfinite_values=True)
77 | fct = forecast(
78 | precip[-1], motion, timesteps, thr, slope=slope, extrap_kwargs=extrap_kwargs
79 | )
80 |
81 | assert fct.ndim == 3
82 | assert fct.shape[0] == len(timesteps)
83 | assert fct.shape[1:] == precip.shape[1:]
84 | assert np.nanmax(fct) <= 1.0
85 | assert np.nanmin(fct) >= 0.0
86 |
87 |
88 | def test_wrong_inputs():
89 | # dummy inputs
90 | precip = np.zeros((3, 3))
91 | velocity = np.zeros((2, *precip.shape))
92 |
93 | # timesteps must be > 0
94 | with pytest.raises(ValueError):
95 | forecast(precip, velocity, 0, 1)
96 |
97 | # timesteps must be a sorted list
98 | with pytest.raises(ValueError):
99 | forecast(precip, velocity, [2, 1], 1)
100 |
--------------------------------------------------------------------------------
/pysteps/tests/test_nowcasts_sprog.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import numpy as np
4 | import pytest
5 |
6 | from pysteps import motion, nowcasts, verification
7 | from pysteps.tests.helpers import get_precipitation_fields
8 |
9 | sprog_arg_names = (
10 | "n_cascade_levels",
11 | "ar_order",
12 | "probmatching_method",
13 | "domain",
14 | "timesteps",
15 | "min_csi",
16 | )
17 |
18 | sprog_arg_values = [
19 | (6, 1, None, "spatial", 3, 0.5),
20 | (6, 1, None, "spatial", [3], 0.5),
21 | (6, 2, None, "spatial", 3, 0.5),
22 | (6, 2, "cdf", "spatial", 3, 0.5),
23 | (6, 2, "mean", "spatial", 3, 0.5),
24 | (6, 2, "cdf", "spectral", 3, 0.5),
25 | ]
26 |
27 |
28 | def test_default_sprog_norain():
29 | """Tests SPROG nowcast with default params and all-zero inputs."""
30 |
31 | # Define dummy nowcast input data
32 | precip_input = np.zeros((3, 100, 100))
33 |
34 | pytest.importorskip("cv2")
35 | oflow_method = motion.get_method("LK")
36 | retrieved_motion = oflow_method(precip_input)
37 |
38 | nowcast_method = nowcasts.get_method("sprog")
39 | precip_forecast = nowcast_method(
40 | precip_input,
41 | retrieved_motion,
42 | timesteps=3,
43 | precip_thr=0.1,
44 | )
45 |
46 | assert precip_forecast.ndim == 3
47 | assert precip_forecast.shape[0] == 3
48 | assert precip_forecast.sum() == 0.0
49 |
50 |
51 | @pytest.mark.parametrize(sprog_arg_names, sprog_arg_values)
52 | def test_sprog(
53 | n_cascade_levels, ar_order, probmatching_method, domain, timesteps, min_csi
54 | ):
55 | """Tests SPROG nowcast."""
56 | # inputs
57 | precip_input, metadata = get_precipitation_fields(
58 | num_prev_files=2,
59 | num_next_files=0,
60 | return_raw=False,
61 | metadata=True,
62 | upscale=2000,
63 | )
64 | precip_input = precip_input.filled()
65 |
66 | precip_obs = get_precipitation_fields(
67 | num_prev_files=0, num_next_files=3, return_raw=False, upscale=2000
68 | )[1:, :, :]
69 | precip_obs = precip_obs.filled()
70 |
71 | pytest.importorskip("cv2")
72 | oflow_method = motion.get_method("LK")
73 | retrieved_motion = oflow_method(precip_input)
74 |
75 | nowcast_method = nowcasts.get_method("sprog")
76 |
77 | precip_forecast = nowcast_method(
78 | precip_input,
79 | retrieved_motion,
80 | timesteps=timesteps,
81 | precip_thr=metadata["threshold"],
82 | n_cascade_levels=n_cascade_levels,
83 | ar_order=ar_order,
84 | probmatching_method=probmatching_method,
85 | domain=domain,
86 | )
87 |
88 | assert precip_forecast.ndim == 3
89 | assert precip_forecast.shape[0] == (
90 | timesteps if isinstance(timesteps, int) else len(timesteps)
91 | )
92 |
93 | result = verification.det_cat_fct(
94 | precip_forecast[-1], precip_obs[-1], thr=0.1, scores="CSI"
95 | )["CSI"]
96 | assert result > min_csi, f"CSI={result:.1f}, required > {min_csi:.1f}"
97 |
98 |
99 | if __name__ == "__main__":
100 | for n in range(len(sprog_arg_values)):
101 | test_args = zip(sprog_arg_names, sprog_arg_values[n])
102 | test_sprog(**dict((x, y) for x, y in test_args))
103 |
--------------------------------------------------------------------------------
/pysteps/tests/test_nowcasts_sseps.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import numpy as np
4 | import pytest
5 |
6 | from pysteps import motion, nowcasts, verification
7 | from pysteps.tests.helpers import get_precipitation_fields
8 |
9 | sseps_arg_names = (
10 | "n_ens_members",
11 | "n_cascade_levels",
12 | "ar_order",
13 | "mask_method",
14 | "probmatching_method",
15 | "win_size",
16 | "timesteps",
17 | "max_crps",
18 | )
19 |
20 | sseps_arg_values = [
21 | (5, 6, 2, "incremental", "cdf", 200, 3, 0.60),
22 | (5, 6, 2, "incremental", "cdf", 200, [3], 0.60),
23 | ]
24 |
25 |
26 | def test_default_sseps_norain():
27 | """Tests SSEPS nowcast with default params and all-zero inputs."""
28 |
29 | # Define dummy nowcast input data
30 | precip_input = np.zeros((3, 100, 100))
31 | metadata = {
32 | "accutime": 5,
33 | "xpixelsize": 1000,
34 | "threshold": 0.1,
35 | "zerovalue": 0,
36 | }
37 |
38 | pytest.importorskip("cv2")
39 | oflow_method = motion.get_method("LK")
40 | retrieved_motion = oflow_method(precip_input)
41 |
42 | nowcast_method = nowcasts.get_method("sseps")
43 | precip_forecast = nowcast_method(
44 | precip_input,
45 | metadata,
46 | retrieved_motion,
47 | n_ens_members=3,
48 | timesteps=3,
49 | )
50 |
51 | assert precip_forecast.ndim == 4
52 | assert precip_forecast.shape[0] == 3
53 | assert precip_forecast.shape[1] == 3
54 | assert precip_forecast.sum() == 0.0
55 |
56 |
57 | @pytest.mark.parametrize(sseps_arg_names, sseps_arg_values)
58 | def test_sseps(
59 | n_ens_members,
60 | n_cascade_levels,
61 | ar_order,
62 | mask_method,
63 | probmatching_method,
64 | win_size,
65 | timesteps,
66 | max_crps,
67 | ):
68 | """Tests SSEPS nowcast."""
69 | # inputs
70 | precip_input, metadata = get_precipitation_fields(
71 | num_prev_files=2,
72 | num_next_files=0,
73 | return_raw=False,
74 | metadata=True,
75 | upscale=2000,
76 | )
77 | precip_input = precip_input.filled()
78 |
79 | precip_obs = get_precipitation_fields(
80 | num_prev_files=0, num_next_files=3, return_raw=False, upscale=2000
81 | )[1:, :, :]
82 | precip_obs = precip_obs.filled()
83 |
84 | pytest.importorskip("cv2")
85 | oflow_method = motion.get_method("LK")
86 | retrieved_motion = oflow_method(precip_input)
87 |
88 | nowcast_method = nowcasts.get_method("sseps")
89 |
90 | precip_forecast = nowcast_method(
91 | precip_input,
92 | metadata,
93 | retrieved_motion,
94 | win_size=win_size,
95 | timesteps=timesteps,
96 | n_ens_members=n_ens_members,
97 | n_cascade_levels=n_cascade_levels,
98 | ar_order=ar_order,
99 | seed=42,
100 | mask_method=mask_method,
101 | probmatching_method=probmatching_method,
102 | )
103 |
104 | assert precip_forecast.ndim == 4
105 | assert precip_forecast.shape[0] == n_ens_members
106 | assert precip_forecast.shape[1] == (
107 | timesteps if isinstance(timesteps, int) else len(timesteps)
108 | )
109 |
110 | crps = verification.probscores.CRPS(precip_forecast[:, -1], precip_obs[-1])
111 | assert crps < max_crps, f"CRPS={crps:.2f}, required < {max_crps:.2f}"
112 |
113 |
114 | if __name__ == "__main__":
115 | for n in range(len(sseps_arg_values)):
116 | test_args = zip(sseps_arg_names, sseps_arg_values[n])
117 | test_sseps(**dict((x, y) for x, y in test_args))
118 |
--------------------------------------------------------------------------------
/pysteps/tests/test_nowcasts_utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pytest
3 |
4 | from pysteps import motion
5 | from pysteps.nowcasts import utils as nowcast_utils
6 | from pysteps.tests.helpers import get_precipitation_fields
7 |
8 | main_loop_arg_names = (
9 | "timesteps",
10 | "ensemble",
11 | "num_ensemble_members",
12 | "velocity_perturbations",
13 | )
14 |
15 | # TODO: add tests for callback and other untested options
16 | main_loop_arg_values = [
17 | (6, False, 0, False),
18 | ([0.5, 1.5], False, 0, False),
19 | (6, True, 2, False),
20 | (6, True, 2, True),
21 | ]
22 |
23 |
24 | @pytest.mark.parametrize(main_loop_arg_names, main_loop_arg_values)
25 | def test_nowcast_main_loop(
26 | timesteps, ensemble, num_ensemble_members, velocity_perturbations
27 | ):
28 | """Test the nowcast_main_loop function."""
29 | precip = get_precipitation_fields(
30 | num_prev_files=2,
31 | num_next_files=0,
32 | return_raw=False,
33 | metadata=False,
34 | upscale=2000,
35 | )
36 | precip = precip.filled()
37 |
38 | oflow_method = motion.get_method("LK")
39 | velocity = oflow_method(precip)
40 |
41 | precip = precip[-1]
42 |
43 | state = {"input": precip}
44 | extrap_method = "semilagrangian"
45 |
46 | def func(state, params):
47 | if not ensemble:
48 | precip_out = state["input"]
49 | else:
50 | precip_out = state["input"][np.newaxis, :]
51 |
52 | return precip_out, state
53 |
54 | nowcast_utils.nowcast_main_loop(
55 | precip,
56 | velocity,
57 | state,
58 | timesteps,
59 | extrap_method,
60 | func,
61 | ensemble=ensemble,
62 | num_ensemble_members=num_ensemble_members,
63 | )
64 |
--------------------------------------------------------------------------------
/pysteps/tests/test_paramsrc.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import os
3 |
4 | from tempfile import NamedTemporaryFile
5 |
6 | import pysteps
7 | from pysteps import load_config_file
8 |
9 | minimal_pystepsrc_file = """
10 | // pysteps configuration
11 | {
12 | "silent_import": false,
13 | "outputs": {
14 | "path_outputs": "./"
15 | },
16 | "plot": {
17 | "motion_plot": "quiver",
18 | "colorscale": "pysteps"
19 | },
20 | "data_sources": {
21 | "bom": {
22 | "root_path": "./radar/bom",
23 | "path_fmt": "prcp-cscn/2/%Y/%m/%d",
24 | "fn_pattern": "2_%Y%m%d_%H%M00.prcp-cscn",
25 | "fn_ext": "nc",
26 | "importer": "bom_rf3",
27 | "timestep": 6,
28 | "importer_kwargs": {
29 | "gzipped": true
30 | }
31 | }
32 | }
33 | }
34 | """
35 |
36 |
37 | def test_read_paramsrc():
38 | """
39 | Test that the parameter file is read correctly and the resulting
40 | pysteps.paramsrc dict can be accessed by attributes too.
41 | """
42 |
43 | with NamedTemporaryFile(mode="w", delete=False) as tmp_paramsrc:
44 | tmp_paramsrc.write(minimal_pystepsrc_file)
45 | tmp_paramsrc.flush()
46 |
47 | # Perform a dry run that does not update
48 | # the internal pysteps.rcparams values.
49 | rcparams = load_config_file(tmp_paramsrc.name, dryrun=True, verbose=False)
50 | os.unlink(tmp_paramsrc.name)
51 | # Test item and attribute getters
52 | assert rcparams["data_sources"]["bom"]["fn_ext"] == "nc"
53 | assert rcparams.data_sources.bom.fn_ext == "nc"
54 |
55 | bom_datasource_as_dict = rcparams["data_sources"]["bom"]
56 | bom_datasource_as_attr = rcparams.data_sources.bom
57 | assert bom_datasource_as_dict is bom_datasource_as_attr
58 | bom_datasource = bom_datasource_as_attr
59 |
60 | timestep_as_dict = bom_datasource["timestep"]
61 | timestep_as_attr = bom_datasource.timestep
62 | assert timestep_as_dict == 6
63 | assert timestep_as_attr == 6
64 | assert timestep_as_dict is timestep_as_attr
65 |
66 | importer_kwargs_dict = bom_datasource["importer_kwargs"]
67 | importer_kwargs_attr = bom_datasource.importer_kwargs
68 | assert importer_kwargs_attr is importer_kwargs_dict
69 |
70 | assert importer_kwargs_attr["gzipped"] is importer_kwargs_attr.gzipped
71 | assert importer_kwargs_attr["gzipped"] is True
72 |
73 | # Test item and attribute setters
74 | rcparams.test = 4
75 | assert rcparams.test == 4
76 | assert rcparams.test is rcparams["test"]
77 |
78 | rcparams["test2"] = 4
79 | assert rcparams.test2 == 4
80 | assert rcparams.test2 is rcparams["test2"]
81 |
82 | rcparams.test = dict(a=1, b="test")
83 | assert rcparams.test == dict(a=1, b="test")
84 | assert rcparams.test["a"] == 1
85 | assert rcparams.test["b"] == "test"
86 |
87 | assert rcparams.test["a"] is rcparams["test"].a
88 | assert rcparams.test["b"] is rcparams["test"].b
89 |
--------------------------------------------------------------------------------
/pysteps/tests/test_plt_animate.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import os
4 |
5 | import numpy as np
6 | import pytest
7 | from unittest.mock import patch
8 |
9 | from pysteps.tests.helpers import get_precipitation_fields
10 | from pysteps.visualization.animations import animate
11 |
12 |
13 | PRECIP, METADATA = get_precipitation_fields(
14 | num_prev_files=2,
15 | num_next_files=0,
16 | return_raw=True,
17 | metadata=True,
18 | upscale=2000,
19 | )
20 |
21 | VALID_ARGS = (
22 | ([PRECIP], {}),
23 | ([PRECIP], {"title": "title"}),
24 | ([PRECIP], {"timestamps_obs": METADATA["timestamps"]}),
25 | ([PRECIP], {"geodata": METADATA, "map_kwargs": {"plot_map": None}}),
26 | ([PRECIP], {"motion_field": np.ones((2, *PRECIP.shape[1:]))}),
27 | (
28 | [PRECIP],
29 | {"precip_kwargs": {"units": "mm/h", "colorbar": True, "colorscale": "pysteps"}},
30 | ),
31 | ([PRECIP, PRECIP], {}),
32 | ([PRECIP, PRECIP], {"title": "title"}),
33 | ([PRECIP, PRECIP], {"timestamps_obs": METADATA["timestamps"]}),
34 | ([PRECIP, PRECIP], {"timestamps_obs": METADATA["timestamps"], "timestep_min": 5}),
35 | ([PRECIP, PRECIP], {"ptype": "prob", "prob_thr": 1}),
36 | ([PRECIP, PRECIP], {"ptype": "mean"}),
37 | ([PRECIP, np.stack((PRECIP, PRECIP))], {"ptype": "ensemble"}),
38 | )
39 |
40 |
41 | @pytest.mark.parametrize(["anim_args", "anim_kwargs"], VALID_ARGS)
42 | def test_animate(anim_args, anim_kwargs):
43 | with patch("matplotlib.pyplot.show"):
44 | animate(*anim_args, **anim_kwargs)
45 |
46 |
47 | VALUEERROR_ARGS = (
48 | ([PRECIP], {"timestamps_obs": METADATA["timestamps"][:2]}),
49 | ([PRECIP], {"motion_plot": "test"}),
50 | ([PRECIP, PRECIP], {"ptype": "prob"}),
51 | )
52 |
53 |
54 | @pytest.mark.parametrize(["anim_args", "anim_kwargs"], VALUEERROR_ARGS)
55 | def test_animate_valueerrors(anim_args, anim_kwargs):
56 | with pytest.raises(ValueError):
57 | animate(*anim_args, **anim_kwargs)
58 |
59 |
60 | TYPEERROR_ARGS = (
61 | ([PRECIP], {"timestamps": METADATA["timestamps"]}),
62 | ([PRECIP], {"plotanimation": True}),
63 | ([PRECIP], {"units": "mm/h"}),
64 | ([PRECIP], {"colorbar": True}),
65 | ([PRECIP], {"colorscale": "pysteps"}),
66 | ([PRECIP, PRECIP], {"type": "ensemble"}),
67 | )
68 |
69 |
70 | @pytest.mark.parametrize(["anim_args", "anim_kwargs"], TYPEERROR_ARGS)
71 | def test_animate_typeerrors(anim_args, anim_kwargs):
72 | with pytest.raises(TypeError):
73 | animate(*anim_args, **anim_kwargs)
74 |
75 |
76 | def test_animate_save(tmp_path):
77 | animate(
78 | PRECIP,
79 | np.stack((PRECIP, PRECIP)),
80 | display_animation=False,
81 | savefig=True,
82 | path_outputs=tmp_path,
83 | fig_dpi=10,
84 | )
85 | assert len(os.listdir(tmp_path)) == 9
86 |
--------------------------------------------------------------------------------
/pysteps/tests/test_plt_cartopy.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import pytest
4 |
5 | from pysteps.visualization import plot_precip_field
6 | from pysteps.utils import to_rainrate
7 | from pysteps.tests.helpers import get_precipitation_fields
8 | import matplotlib.pyplot as plt
9 |
10 | plt_arg_names = ("source", "map_kwargs", "pass_geodata")
11 |
12 | plt_arg_values = [
13 | ("mch", {"drawlonlatlines": False, "lw": 0.5, "plot_map": None}, False),
14 | ("mch", {"drawlonlatlines": False, "lw": 0.5, "plot_map": "cartopy"}, False),
15 | ("mch", {"drawlonlatlines": False, "lw": 0.5}, True),
16 | ("mch", {"drawlonlatlines": True, "lw": 1.0}, True),
17 | ("bom", {"drawlonlatlines": True, "lw": 0.5}, True),
18 | ("fmi", {"drawlonlatlines": True, "lw": 0.5}, True),
19 | ("knmi", {"drawlonlatlines": True, "lw": 0.5}, True),
20 | ("opera", {"drawlonlatlines": True, "lw": 0.5}, True),
21 | ("mrms", {"drawlonlatlines": True, "lw": 0.5}, True),
22 | ("saf", {"drawlonlatlines": True, "lw": 0.5}, True),
23 | ]
24 |
25 |
26 | @pytest.mark.parametrize(plt_arg_names, plt_arg_values)
27 | def test_visualization_plot_precip_field(source, map_kwargs, pass_geodata):
28 | field, metadata = get_precipitation_fields(0, 0, True, True, None, source)
29 | field = field.squeeze()
30 | field, __ = to_rainrate(field, metadata)
31 |
32 | if not pass_geodata:
33 | metadata = None
34 |
35 | plot_precip_field(field, ptype="intensity", geodata=metadata, map_kwargs=map_kwargs)
36 |
37 |
38 | if __name__ == "__main__":
39 | for i, args in enumerate(plt_arg_values):
40 | test_visualization_plot_precip_field(*args)
41 | plt.show()
42 |
--------------------------------------------------------------------------------
/pysteps/tests/test_plt_motionfields.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import matplotlib.pyplot as plt
4 | import numpy as np
5 | import pytest
6 |
7 | from pysteps import motion
8 | from pysteps.visualization import plot_precip_field, quiver, streamplot
9 | from pysteps.tests.helpers import get_precipitation_fields
10 |
11 |
12 | arg_names_quiver = (
13 | "source",
14 | "axis",
15 | "step",
16 | "quiver_kwargs",
17 | "map_kwargs",
18 | "upscale",
19 | "pass_geodata",
20 | )
21 |
22 | arg_values_quiver = [
23 | (None, "off", 10, {}, {"drawlonlatlines": False, "lw": 0.5}, None, False),
24 | ("bom", "on", 10, {}, {"drawlonlatlines": False, "lw": 0.5}, 4000, False),
25 | ("bom", "on", 10, {}, {"drawlonlatlines": True, "lw": 0.5}, 4000, True),
26 | ("mch", "on", 20, {}, {"drawlonlatlines": False, "lw": 0.5}, 2000, True),
27 | ]
28 |
29 |
30 | @pytest.mark.parametrize(arg_names_quiver, arg_values_quiver)
31 | def test_visualization_motionfields_quiver(
32 | source, axis, step, quiver_kwargs, map_kwargs, upscale, pass_geodata
33 | ):
34 | pytest.importorskip("cv2")
35 | if source is not None:
36 | fields, geodata = get_precipitation_fields(0, 2, False, True, upscale, source)
37 | if not pass_geodata:
38 | geodata = None
39 | ax = plot_precip_field(fields[-1], geodata=geodata)
40 | oflow_method = motion.get_method("LK")
41 | UV = oflow_method(fields)
42 |
43 | else:
44 | shape = (100, 100)
45 | geodata = None
46 | ax = None
47 | u = np.ones(shape[1]) * shape[0]
48 | v = np.arange(0, shape[0])
49 | U, V = np.meshgrid(u, v)
50 | UV = np.concatenate([U[None, :], V[None, :]])
51 |
52 | UV_orig = UV.copy()
53 | __ = quiver(UV, ax, geodata, axis, step, quiver_kwargs, map_kwargs=map_kwargs)
54 |
55 | # Check that quiver does not modify the input data
56 | assert np.array_equal(UV, UV_orig)
57 |
58 |
59 | arg_names_streamplot = (
60 | "source",
61 | "axis",
62 | "streamplot_kwargs",
63 | "map_kwargs",
64 | "upscale",
65 | "pass_geodata",
66 | )
67 |
68 | arg_values_streamplot = [
69 | (None, "off", {}, {"drawlonlatlines": False, "lw": 0.5}, None, False),
70 | ("bom", "on", {}, {"drawlonlatlines": False, "lw": 0.5}, 4000, False),
71 | ("bom", "on", {"density": 0.5}, {"drawlonlatlines": True, "lw": 0.5}, 4000, True),
72 | ]
73 |
74 |
75 | @pytest.mark.parametrize(arg_names_streamplot, arg_values_streamplot)
76 | def test_visualization_motionfields_streamplot(
77 | source, axis, streamplot_kwargs, map_kwargs, upscale, pass_geodata
78 | ):
79 | pytest.importorskip("cv2")
80 | if source is not None:
81 | fields, geodata = get_precipitation_fields(0, 2, False, True, upscale, source)
82 | if not pass_geodata:
83 | pass_geodata = None
84 | ax = plot_precip_field(fields[-1], geodata=geodata)
85 | oflow_method = motion.get_method("LK")
86 | UV = oflow_method(fields)
87 |
88 | else:
89 | shape = (100, 100)
90 | geodata = None
91 | ax = None
92 | u = np.ones(shape[1]) * shape[0]
93 | v = np.arange(0, shape[0])
94 | U, V = np.meshgrid(u, v)
95 | UV = np.concatenate([U[None, :], V[None, :]])
96 |
97 | UV_orig = UV.copy()
98 | __ = streamplot(UV, ax, geodata, axis, streamplot_kwargs, map_kwargs=map_kwargs)
99 |
100 | # Check that streamplot does not modify the input data
101 | assert np.array_equal(UV, UV_orig)
102 |
103 |
104 | if __name__ == "__main__":
105 | for i, args in enumerate(arg_values_quiver):
106 | test_visualization_motionfields_quiver(*args)
107 | plt.show()
108 |
109 | for i, args in enumerate(arg_values_streamplot):
110 | test_visualization_motionfields_streamplot(*args)
111 | plt.show()
112 |
--------------------------------------------------------------------------------
/pysteps/tests/test_plt_precipfields.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import pytest
4 |
5 | from pysteps.visualization import plot_precip_field
6 | from pysteps.utils import conversion
7 | from pysteps.postprocessing import ensemblestats
8 | from pysteps.tests.helpers import get_precipitation_fields
9 | import matplotlib.pyplot as plt
10 | import numpy as np
11 |
12 | plt_arg_names = (
13 | "source",
14 | "plot_type",
15 | "bbox",
16 | "colorscale",
17 | "probthr",
18 | "title",
19 | "colorbar",
20 | "axis",
21 | )
22 |
23 | plt_arg_values = [
24 | ("mch", "intensity", None, "pysteps", None, None, False, "off"),
25 | ("mch", "depth", None, "pysteps", None, "Title", True, "on"),
26 | ("mch", "prob", None, "pysteps", 0.1, None, True, "on"),
27 | ("mch", "intensity", None, "STEPS-BE", None, None, True, "on"),
28 | ("mch", "intensity", None, "BOM-RF3", None, None, True, "on"),
29 | ("bom", "intensity", None, "pysteps", None, None, True, "on"),
30 | ("fmi", "intensity", None, "pysteps", None, None, True, "on"),
31 | ("knmi", "intensity", None, "pysteps", None, None, True, "on"),
32 | ("knmi", "intensity", None, "STEPS-NL", None, None, True, "on"),
33 | ("knmi", "intensity", [300, 300, 500, 500], "pysteps", None, None, True, "on"),
34 | ("opera", "intensity", None, "pysteps", None, None, True, "on"),
35 | ("saf", "intensity", None, "pysteps", None, None, True, "on"),
36 | ]
37 |
38 |
39 | @pytest.mark.parametrize(plt_arg_names, plt_arg_values)
40 | def test_visualization_plot_precip_field(
41 | source, plot_type, bbox, colorscale, probthr, title, colorbar, axis
42 | ):
43 | if plot_type == "intensity":
44 | field, metadata = get_precipitation_fields(0, 0, True, True, None, source)
45 | field = field.squeeze()
46 | field, metadata = conversion.to_rainrate(field, metadata)
47 |
48 | elif plot_type == "depth":
49 | field, metadata = get_precipitation_fields(0, 0, True, True, None, source)
50 | field = field.squeeze()
51 | field, metadata = conversion.to_raindepth(field, metadata)
52 |
53 | elif plot_type == "prob":
54 | field, metadata = get_precipitation_fields(0, 10, True, True, None, source)
55 | field, metadata = conversion.to_rainrate(field, metadata)
56 | field = ensemblestats.excprob(field, probthr)
57 |
58 | field_orig = field.copy()
59 | ax = plot_precip_field(
60 | field.copy(),
61 | ptype=plot_type,
62 | bbox=bbox,
63 | geodata=None,
64 | colorscale=colorscale,
65 | probthr=probthr,
66 | units=metadata["unit"],
67 | title=title,
68 | colorbar=colorbar,
69 | axis=axis,
70 | )
71 |
72 | # Check that plot_precip_field does not modify the input data
73 | field_orig = np.ma.masked_invalid(field_orig)
74 | field_orig.data[field_orig.mask] = -100
75 | field = np.ma.masked_invalid(field)
76 | field.data[field.mask] = -100
77 | assert np.array_equal(field_orig.data, field.data)
78 |
79 |
80 | if __name__ == "__main__":
81 | for i, args in enumerate(plt_arg_values):
82 | test_visualization_plot_precip_field(*args)
83 | plt.show()
84 |
--------------------------------------------------------------------------------
/pysteps/tests/test_plugins_support.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Script to test the plugin support.
4 |
5 | https://github.com/pySTEPS/cookiecutter-pysteps-plugin
6 | """
7 | import os
8 | import pytest
9 | import subprocess
10 | import sys
11 | import tempfile
12 |
13 |
14 | __ = pytest.importorskip("cookiecutter")
15 | from cookiecutter.main import cookiecutter
16 |
17 | PLUGIN_TEMPLATE_URL = "https://github.com/pysteps/cookiecutter-pysteps-plugin"
18 |
19 | from contextlib import contextmanager
20 | from pysteps import io, postprocessing
21 |
22 |
23 | def _check_installed_importer_plugin(import_func_name):
24 | # reload the pysteps module to detect the installed plugin
25 | io.discover_importers()
26 | print(io.importers_info())
27 | import_func_name = import_func_name.replace("importer_", "import_")
28 | assert hasattr(io.importers, import_func_name)
29 | func_name = import_func_name.replace("import_", "")
30 | assert func_name in io.interface._importer_methods
31 | importer = getattr(io.importers, import_func_name)
32 | importer("filename")
33 |
34 |
35 | def _check_installed_diagnostic_plugin(diagnostic_func_name):
36 | # reload the pysteps module to detect the installed plugin
37 | postprocessing.discover_postprocessors()
38 | assert hasattr(postprocessing.diagnostics, diagnostic_func_name)
39 | assert diagnostic_func_name in postprocessing.interface._diagnostics_methods
40 | diagnostic = getattr(postprocessing.diagnostics, diagnostic_func_name)
41 | diagnostic("filename")
42 |
43 |
44 | @contextmanager
45 | def _create_and_install_plugin(project_name, plugin_type):
46 | with tempfile.TemporaryDirectory() as tmpdirname:
47 | print(f"Installing plugin {project_name} providing a {plugin_type} module")
48 | cookiecutter(
49 | PLUGIN_TEMPLATE_URL,
50 | no_input=True,
51 | overwrite_if_exists=True,
52 | extra_context={
53 | "project_name": project_name,
54 | "plugin_type": plugin_type,
55 | },
56 | output_dir=tmpdirname,
57 | )
58 | # Install the plugin
59 | subprocess.check_call(
60 | [
61 | sys.executable,
62 | "-m",
63 | "pip",
64 | "install",
65 | "--force-reinstall",
66 | os.path.join(tmpdirname, project_name),
67 | ]
68 | )
69 |
70 | # The block below, together with the decorator used in this function are used
71 | # to create a context manager that uninstall the plugin packages after the
72 | # tests finish (even if they fail).
73 | # https://docs.pytest.org/en/stable/fixture.html?highlight=context#fixture-finalization-executing-teardown-code
74 | try:
75 | yield project_name
76 | finally:
77 | _uninstall_plugin(project_name)
78 |
79 |
80 | def _uninstall_plugin(project_name):
81 | # Install the plugin
82 | subprocess.check_call(
83 | [sys.executable, "-m", "pip", "uninstall", "-y", project_name]
84 | )
85 |
86 |
87 | def test_importers_plugins():
88 | with _create_and_install_plugin("pysteps-importer-institution-fun", "importer"):
89 | _check_installed_importer_plugin("importer_institution_fun")
90 |
91 |
92 | def test_diagnostic_plugins():
93 | with _create_and_install_plugin("pysteps-diagnostic-fun", "diagnostic"):
94 | _check_installed_diagnostic_plugin("diagnostic_fun")
95 |
--------------------------------------------------------------------------------
/pysteps/tests/test_postprocessing_ensemblestats.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import numpy as np
4 | import pytest
5 | from numpy.testing import assert_array_almost_equal
6 |
7 | from pysteps.postprocessing.ensemblestats import excprob, mean, banddepth
8 |
9 | # CREATE DATASETS TO TEST
10 |
11 | a = np.arange(9, dtype=float).reshape(3, 3)
12 | b = np.tile(a, (4, 1, 1))
13 | b1 = b.copy()
14 | b1[3] = np.nan
15 | a1 = a.copy()
16 | a1[:] = np.nan
17 | a2 = a.copy()
18 | a2[0, :] = np.nan
19 |
20 | # test data
21 | test_data = [
22 | (a, False, None, a),
23 | (b, False, None, a),
24 | (b1, True, None, a),
25 | (b1, False, None, a1),
26 | (b, False, 0.0, a),
27 | (b, False, 3.0, a2),
28 | (b, True, 3.0, a2),
29 | (b1, True, 3.0, a2),
30 | ]
31 |
32 |
33 | @pytest.mark.parametrize("X, ignore_nan, X_thr, expected", test_data)
34 | def test_ensemblestats_mean(X, ignore_nan, X_thr, expected):
35 | """
36 | Test ensemblestats mean."""
37 | assert_array_almost_equal(mean(X, ignore_nan, X_thr), expected)
38 |
39 |
40 | # test exceptions
41 | test_exceptions = [(0), (None), (a[0, :]), (np.tile(a, (4, 1, 1, 1)))]
42 |
43 |
44 | @pytest.mark.parametrize("X", test_exceptions)
45 | def test_exceptions_mean(X):
46 | with pytest.raises(Exception):
47 | mean(X)
48 |
49 |
50 | # test data
51 | b2 = b.copy()
52 | b2[2, 2, 2] = np.nan
53 |
54 | test_data = [
55 | (b, 2.0, False, np.array([[0.0, 0.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])),
56 | (b2, 2.0, False, np.array([[0.0, 0.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, np.nan]])),
57 | (b2, 2.0, True, np.array([[0.0, 0.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])),
58 | ]
59 |
60 |
61 | @pytest.mark.parametrize("X, X_thr, ignore_nan, expected", test_data)
62 | def test_ensemblestats_excprob(X, X_thr, ignore_nan, expected):
63 | """Test ensemblestats excprob."""
64 | assert_array_almost_equal(excprob(X, X_thr, ignore_nan), expected)
65 |
66 |
67 | # test exceptions
68 | test_exceptions = [(0), (None), (a[0, :]), (a)]
69 |
70 |
71 | @pytest.mark.parametrize("X", test_exceptions)
72 | def test_exceptions_excprob(X):
73 | with pytest.raises(Exception):
74 | excprob(X, 2.0)
75 |
76 |
77 | # test data
78 | b3 = np.tile(a, (5, 1, 1)) + 1
79 | b3 *= np.arange(1, 6)[:, None, None]
80 | b3[2, 2, 2] = np.nan
81 |
82 | test_data = [
83 | (b3, 1, True, np.array([0.0, 0.75, 1.0, 0.75, 0.0])),
84 | (b3, None, False, np.array([0.4, 0.7, 0.8, 0.7, 0.4])),
85 | ]
86 |
87 |
88 | @pytest.mark.parametrize("X, thr, norm, expected", test_data)
89 | def test_ensemblestats_banddepth(X, thr, norm, expected):
90 | """Test ensemblestats banddepth."""
91 | assert_array_almost_equal(banddepth(X, thr, norm), expected)
92 |
--------------------------------------------------------------------------------
/pysteps/tests/test_tracking_tdating.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import numpy as np
4 | import pytest
5 |
6 | from pysteps.tracking.tdating import dating
7 | from pysteps.utils import to_reflectivity
8 | from pysteps.tests.helpers import get_precipitation_fields
9 |
10 | arg_names = ("source", "dry_input", "output_splits_merges")
11 |
12 | arg_values = [
13 | ("mch", False, False),
14 | ("mch", False, False),
15 | ("mch", True, False),
16 | ("mch", False, True),
17 | ]
18 |
19 | arg_names_multistep = ("source", "len_timesteps", "output_splits_merges")
20 | arg_values_multistep = [
21 | ("mch", 6, False),
22 | ("mch", 6, True),
23 | ]
24 |
25 |
26 | @pytest.mark.parametrize(arg_names_multistep, arg_values_multistep)
27 | def test_tracking_tdating_dating_multistep(source, len_timesteps, output_splits_merges):
28 | pytest.importorskip("skimage")
29 |
30 | input_fields, metadata = get_precipitation_fields(
31 | 0, len_timesteps, True, True, 4000, source
32 | )
33 | input_fields, __ = to_reflectivity(input_fields, metadata)
34 |
35 | timelist = metadata["timestamps"]
36 |
37 | # First half of timesteps
38 | tracks_1, cells, labels = dating(
39 | input_fields[0 : len_timesteps // 2],
40 | timelist[0 : len_timesteps // 2],
41 | mintrack=1,
42 | output_splits_merges=output_splits_merges,
43 | )
44 | # Second half of timesteps
45 | tracks_2, cells, _ = dating(
46 | input_fields[len_timesteps // 2 - 2 :],
47 | timelist[len_timesteps // 2 - 2 :],
48 | mintrack=1,
49 | start=2,
50 | cell_list=cells,
51 | label_list=labels,
52 | output_splits_merges=output_splits_merges,
53 | )
54 |
55 | # Since we are adding cells, number of tracks should increase
56 | assert len(tracks_1) <= len(tracks_2)
57 |
58 | # Tracks should be continuous in time so time difference should not exceed timestep
59 | max_track_step = max([t.time.diff().max().seconds for t in tracks_2 if len(t) > 1])
60 | timestep = np.diff(timelist).max().seconds
61 | assert max_track_step <= timestep
62 |
63 | # IDs of unmatched cells should increase in every timestep
64 | for prev_df, cur_df in zip(cells[:-1], cells[1:]):
65 | prev_ids = set(prev_df.ID)
66 | cur_ids = set(cur_df.ID)
67 | new_ids = list(cur_ids - prev_ids)
68 | prev_unmatched = list(prev_ids - cur_ids)
69 | if len(prev_unmatched):
70 | assert np.all(np.array(new_ids) > max(prev_unmatched))
71 |
72 |
73 | @pytest.mark.parametrize(arg_names, arg_values)
74 | def test_tracking_tdating_dating(source, dry_input, output_splits_merges):
75 | pytest.importorskip("skimage")
76 | pandas = pytest.importorskip("pandas")
77 |
78 | if not dry_input:
79 | input, metadata = get_precipitation_fields(0, 2, True, True, 4000, source)
80 | input, __ = to_reflectivity(input, metadata)
81 | else:
82 | input = np.zeros((3, 50, 50))
83 | metadata = {"timestamps": ["00", "01", "02"]}
84 |
85 | timelist = metadata["timestamps"]
86 |
87 | cell_column_length = 9
88 | if output_splits_merges:
89 | cell_column_length = 15
90 |
91 | output = dating(
92 | input, timelist, mintrack=1, output_splits_merges=output_splits_merges
93 | )
94 |
95 | # Check output format
96 | assert isinstance(output, tuple)
97 | assert len(output) == 3
98 | assert isinstance(output[0], list)
99 | assert isinstance(output[1], list)
100 | assert isinstance(output[2], list)
101 | assert len(output[1]) == input.shape[0]
102 | assert len(output[2]) == input.shape[0]
103 | assert isinstance(output[1][0], pandas.DataFrame)
104 | assert isinstance(output[2][0], np.ndarray)
105 | assert output[1][0].shape[1] == cell_column_length
106 | assert output[2][0].shape == input.shape[1:]
107 | if not dry_input:
108 | assert len(output[0]) > 0
109 | assert isinstance(output[0][0], pandas.DataFrame)
110 | assert output[0][0].shape[1] == cell_column_length
111 | else:
112 | assert len(output[0]) == 0
113 | assert output[1][0].shape[0] == 0
114 | assert output[2][0].sum() == 0
115 |
--------------------------------------------------------------------------------
/pysteps/tests/test_utils_arrays.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import numpy as np
4 | import pytest
5 | from numpy.testing import assert_array_equal
6 |
7 | from pysteps.utils import arrays
8 |
9 | # compute_centred_coord_array
10 | test_data = [
11 | (2, 2, [np.array([[-1, 0]]).T, np.array([[-1, 0]])]),
12 | (3, 3, [np.array([[-1, 0, 1]]).T, np.array([[-1, 0, 1]])]),
13 | (3, 2, [np.array([[-1, 0, 1]]).T, np.array([[-1, 0]])]),
14 | (2, 3, [np.array([[-1, 0]]).T, np.array([[-1, 0, 1]])]),
15 | ]
16 |
17 |
18 | @pytest.mark.parametrize("M, N, expected", test_data)
19 | def test_compute_centred_coord_array(M, N, expected):
20 | """Test the compute_centred_coord_array."""
21 | assert_array_equal(arrays.compute_centred_coord_array(M, N)[0], expected[0])
22 | assert_array_equal(arrays.compute_centred_coord_array(M, N)[1], expected[1])
23 |
--------------------------------------------------------------------------------
/pysteps/tests/test_utils_reprojection.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import os
4 | import numpy as np
5 | import pytest
6 | import pysteps
7 | from pysteps.utils import reprojection as rpj
8 |
9 | pytest.importorskip("rasterio")
10 |
11 | root_path_radar = pysteps.rcparams.data_sources["rmi"]["root_path"]
12 |
13 | rel_path_radar = "20210704" # Different date, but that does not matter for the tester
14 |
15 | filename_radar = os.path.join(
16 | root_path_radar, rel_path_radar, "20210704180500.rad.best.comp.rate.qpe.hdf"
17 | )
18 |
19 | # Open the radar data
20 | radar_array, _, metadata_dst = pysteps.io.importers.import_odim_hdf5(filename_radar)
21 |
22 | # Initialise dummy NWP data
23 | nwp_array = np.zeros((24, 564, 564))
24 |
25 | for t in range(nwp_array.shape[0]):
26 | nwp_array[t, 30 + t : 185 + t, 30 + 2 * t] = 0.1
27 | nwp_array[t, 30 + t : 185 + t, 31 + 2 * t] = 0.1
28 | nwp_array[t, 30 + t : 185 + t, 32 + 2 * t] = 1.0
29 | nwp_array[t, 30 + t : 185 + t, 33 + 2 * t] = 5.0
30 | nwp_array[t, 30 + t : 185 + t, 34 + 2 * t] = 5.0
31 | nwp_array[t, 30 + t : 185 + t, 35 + 2 * t] = 4.5
32 | nwp_array[t, 30 + t : 185 + t, 36 + 2 * t] = 4.5
33 | nwp_array[t, 30 + t : 185 + t, 37 + 2 * t] = 4.0
34 | nwp_array[t, 30 + t : 185 + t, 38 + 2 * t] = 2.0
35 | nwp_array[t, 30 + t : 185 + t, 39 + 2 * t] = 1.0
36 | nwp_array[t, 30 + t : 185 + t, 40 + 2 * t] = 0.5
37 | nwp_array[t, 30 + t : 185 + t, 41 + 2 * t] = 0.1
38 |
39 | nwp_proj = (
40 | "+proj=lcc +lon_0=4.55 +lat_1=50.8 +lat_2=50.8 "
41 | "+a=6371229 +es=0 +lat_0=50.8 +x_0=365950 +y_0=-365950.000000001"
42 | )
43 |
44 | metadata_src = dict(
45 | projection=nwp_proj,
46 | institution="Royal Meteorological Institute of Belgium",
47 | transform=None,
48 | zerovalue=0.0,
49 | threshold=0,
50 | unit="mm",
51 | accutime=5,
52 | xpixelsize=1300.0,
53 | ypixelsize=1300.0,
54 | yorigin="upper",
55 | cartesian_unit="m",
56 | x1=0.0,
57 | x2=731900.0,
58 | y1=-731900.0,
59 | y2=0.0,
60 | )
61 |
62 | steps_arg_names = (
63 | "radar_array",
64 | "nwp_array",
65 | "metadata_src",
66 | "metadata_dst",
67 | )
68 |
69 | steps_arg_values = [
70 | (radar_array, nwp_array, metadata_src, metadata_dst),
71 | ]
72 |
73 |
74 | @pytest.mark.parametrize(steps_arg_names, steps_arg_values)
75 | def test_utils_reproject_grids(
76 | radar_array,
77 | nwp_array,
78 | metadata_src,
79 | metadata_dst,
80 | ):
81 | # Reproject
82 | nwp_array_reproj, metadata_reproj = rpj.reproject_grids(
83 | nwp_array, radar_array, metadata_src, metadata_dst
84 | )
85 |
86 | # The tests
87 | assert (
88 | nwp_array_reproj.shape[0] == nwp_array.shape[0]
89 | ), "Time dimension has not the same length as source"
90 | assert (
91 | nwp_array_reproj.shape[1] == radar_array.shape[0]
92 | ), "y dimension has not the same length as radar composite"
93 | assert (
94 | nwp_array_reproj.shape[2] == radar_array.shape[1]
95 | ), "x dimension has not the same length as radar composite"
96 |
97 | assert (
98 | metadata_reproj["x1"] == metadata_dst["x1"]
99 | ), "x-value lower left corner is not equal to radar composite"
100 | assert (
101 | metadata_reproj["x2"] == metadata_dst["x2"]
102 | ), "x-value upper right corner is not equal to radar composite"
103 | assert (
104 | metadata_reproj["y1"] == metadata_dst["y1"]
105 | ), "y-value lower left corner is not equal to radar composite"
106 | assert (
107 | metadata_reproj["y2"] == metadata_dst["y2"]
108 | ), "y-value upper right corner is not equal to radar composite"
109 |
110 | assert (
111 | metadata_reproj["projection"] == metadata_dst["projection"]
112 | ), "projection is different than destionation projection"
113 |
--------------------------------------------------------------------------------
/pysteps/tests/test_utils_spectral.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pytest
3 | from pysteps.utils import spectral
4 |
5 | _rapsd_input_fields = [
6 | np.random.uniform(size=(255, 255)),
7 | np.random.uniform(size=(256, 256)),
8 | np.random.uniform(size=(255, 256)),
9 | np.random.uniform(size=(256, 255)),
10 | ]
11 |
12 |
13 | @pytest.mark.parametrize("field", _rapsd_input_fields)
14 | def test_rapsd(field):
15 | rapsd, freq = spectral.rapsd(field, return_freq=True)
16 |
17 | m, n = field.shape
18 | l = max(m, n)
19 |
20 | if l % 2 == 0:
21 | assert len(rapsd) == int(l / 2)
22 | else:
23 | assert len(rapsd) == int(l / 2 + 1)
24 | assert len(rapsd) == len(freq)
25 | assert np.all(freq >= 0.0)
26 |
--------------------------------------------------------------------------------
/pysteps/tests/test_verification_detcatscores.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import numpy as np
4 | import pytest
5 | from numpy.testing import assert_array_almost_equal
6 |
7 | from pysteps.verification import det_cat_fct
8 |
9 |
10 | # CREATE A LARGE DATASET TO MATCH
11 | # EXAMPLES IN
12 | # http://www.cawcr.gov.au/projects/verification/
13 |
14 | fct_hits = 1.0 * np.ones(82)
15 | obs_hits = 1.0 * np.ones(82)
16 | fct_fa = 1.0 * np.ones(38)
17 | obs_fa = 1.0 * np.zeros(38)
18 | fct_misses = 1.0 * np.zeros(23)
19 | obs_misses = 1.0 * np.ones(23)
20 | fct_cr = 1.0 * np.zeros(222)
21 | obs_cr = 1.0 * np.zeros(222)
22 | obs_data = np.concatenate([obs_hits, obs_fa, obs_misses, obs_cr])
23 | fct_data = np.concatenate([fct_hits, fct_fa, fct_misses, fct_cr])
24 |
25 | test_data = [
26 | ([0.0], [0.0], 0.0, None, []),
27 | ([1.0, 3.0], [2.0, 5.0], 0.0, None, []),
28 | ([1.0, 3.0], [2.0, 5.0], 0.0, "CSI", [1.0]),
29 | ([1.0, 3.0], [2.0, 5.0], 0.0, ("CSI", "FAR"), [1.0, 0.0]),
30 | ([1.0, 3.0], [2.0, 5.0], 0.0, ("lolo",), []),
31 | ([1.0, 3.0], [2.0, 5.0], 0.0, ("CSI", None, "FAR"), [1.0, 0.0]),
32 | ([1.0, 3.0], [2.0, 5.0], 1.0, ("CSI", None, "FAR"), [0.5, 0.0]),
33 | ([1.0, 3.0], [2.0, 5.0], 1.0, ("lolo"), []), # test unknown score
34 | (fct_data, obs_data, 0.0, ("ACC"), [0.83287671]), # ACCURACY score
35 | (fct_data, obs_data, 0.0, ("BIAS"), [1.1428571]), # BIAS score
36 | (fct_data, obs_data, 0.0, ("POD"), [0.7809524]), # POD score
37 | (fct_data, obs_data, 0.0, ("FAR"), [0.316667]), # FAR score
38 | # Probability of false detection (false alarm rate)
39 | (fct_data, obs_data, 0.0, ("FA"), [0.146154]),
40 | # CSI score
41 | (fct_data, obs_data, 0.0, ("CSI"), [0.573426]),
42 | # Heidke Skill Score
43 | (fct_data, obs_data, 0.0, ("HSS"), [0.608871]),
44 | # Hanssen-Kuipers Discriminant
45 | (fct_data, obs_data, 0.0, ("HK"), [0.6348]),
46 | # Gilbert Skill Score
47 | (fct_data, obs_data, 0.0, ("GSS"), [0.437682]),
48 | # Gilbert Skill Score
49 | (fct_data, obs_data, 0.0, ("ETS"), [0.437682]),
50 | # Symmetric extremal dependence index
51 | (fct_data, obs_data, 0.0, ("SEDI"), [0.789308]),
52 | # Matthews correlation coefficient
53 | (fct_data, obs_data, 0.0, ("MCC"), [0.611707]),
54 | # F1-score
55 | (fct_data, obs_data, 0.0, ("F1"), [0.728889]),
56 | ]
57 |
58 |
59 | @pytest.mark.parametrize("pred, obs, thr, scores, expected", test_data)
60 | def test_det_cat_fct(pred, obs, thr, scores, expected):
61 | """Test the det_cat_fct."""
62 | assert_array_almost_equal(
63 | list(det_cat_fct(pred, obs, thr, scores).values()), expected
64 | )
65 |
--------------------------------------------------------------------------------
/pysteps/tests/test_verification_probscores.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import numpy as np
4 | import pytest
5 | from numpy.testing import assert_array_almost_equal
6 |
7 | from pysteps.postprocessing.ensemblestats import excprob
8 | from pysteps.tests.helpers import get_precipitation_fields
9 | from pysteps.verification import probscores
10 |
11 | precip = get_precipitation_fields(num_next_files=10, return_raw=True)
12 |
13 | # CRPS
14 | test_data = [(precip[:10], precip[-1], 0.01470871)]
15 |
16 |
17 | @pytest.mark.parametrize("X_f, X_o, expected", test_data)
18 | def test_CRPS(X_f, X_o, expected):
19 | """Test the CRPS."""
20 | assert_array_almost_equal(probscores.CRPS(X_f, X_o), expected)
21 |
22 |
23 | # reldiag
24 | test_data = [(precip[:10], precip[-1], 1.0, 10, 10, 3.38751492)]
25 |
26 |
27 | @pytest.mark.parametrize("X_f, X_o, X_min, n_bins, min_count, expected", test_data)
28 | def test_reldiag_sum(X_f, X_o, X_min, n_bins, min_count, expected):
29 | """Test the reldiag."""
30 | P_f = excprob(X_f, X_min, ignore_nan=False)
31 | assert_array_almost_equal(
32 | np.sum(probscores.reldiag(P_f, X_o, X_min, n_bins, min_count)[1]), expected
33 | )
34 |
35 |
36 | # ROC_curve
37 | test_data = [(precip[:10], precip[-1], 1.0, 10, True, 0.79557329)]
38 |
39 |
40 | @pytest.mark.parametrize(
41 | "X_f, X_o, X_min, n_prob_thrs, compute_area, expected", test_data
42 | )
43 | def test_ROC_curve_area(X_f, X_o, X_min, n_prob_thrs, compute_area, expected):
44 | """Test the ROC_curve."""
45 | P_f = excprob(X_f, X_min, ignore_nan=False)
46 | assert_array_almost_equal(
47 | probscores.ROC_curve(P_f, X_o, X_min, n_prob_thrs, compute_area)[2], expected
48 | )
49 |
--------------------------------------------------------------------------------
/pysteps/tests/test_verification_salscores.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import numpy as np
4 | import pytest
5 |
6 | from pysteps.tests.helpers import get_precipitation_fields
7 | from pysteps.verification.salscores import sal
8 | from pysteps.utils import to_rainrate, to_reflectivity
9 |
10 | test_data = [
11 | (to_rainrate, 1 / 15),
12 | (to_reflectivity, None),
13 | ]
14 |
15 |
16 | @pytest.mark.parametrize("converter, thr_factor", test_data)
17 | class TestSAL:
18 | pytest.importorskip("pandas")
19 | pytest.importorskip("skimage")
20 |
21 | def test_sal_zeros(self, converter, thr_factor):
22 | """Test the SAL verification method."""
23 | precip, metadata = get_precipitation_fields(
24 | num_prev_files=0, log_transform=False, metadata=True
25 | )
26 | precip, metadata = converter(precip.filled(np.nan), metadata)
27 | result = sal(precip * 0, precip * 0, thr_factor)
28 | assert np.isnan(result).all()
29 | result = sal(precip * 0, precip, thr_factor)
30 | assert result[:2] == (-2, -2)
31 | assert np.isnan(result[2])
32 | result = sal(precip, precip * 0, thr_factor)
33 | assert result[:2] == (2, 2)
34 | assert np.isnan(result[2])
35 |
36 | def test_sal_same_image(self, converter, thr_factor):
37 | """Test the SAL verification method."""
38 | precip, metadata = get_precipitation_fields(
39 | num_prev_files=0, log_transform=False, metadata=True
40 | )
41 | precip, metadata = converter(precip.filled(np.nan), metadata)
42 | result = sal(precip, precip, thr_factor)
43 | assert isinstance(result, tuple)
44 | assert len(result) == 3
45 | assert np.allclose(result, [0, 0, 0])
46 |
47 | def test_sal_translation(self, converter, thr_factor):
48 | precip, metadata = get_precipitation_fields(
49 | num_prev_files=0, log_transform=False, metadata=True
50 | )
51 | precip, metadata = converter(precip.filled(np.nan), metadata)
52 | precip_translated = np.roll(precip, 10, axis=0)
53 | result = sal(precip, precip_translated, thr_factor)
54 | assert np.allclose(result[0], 0)
55 | assert np.allclose(result[1], 0)
56 | assert not np.allclose(result[2], 0)
57 |
--------------------------------------------------------------------------------
/pysteps/tests/test_verification_spatialscores.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import pytest
4 | from numpy.testing import assert_array_almost_equal
5 |
6 | from pysteps.tests.helpers import get_precipitation_fields
7 | from pysteps.verification import spatialscores
8 |
9 | R = get_precipitation_fields(num_prev_files=1, return_raw=True)
10 | test_data = [
11 | (R[0], R[1], "FSS", [1], [10], None, 0.85161531),
12 | (R[0], R[1], "BMSE", [1], None, "Haar", 0.99989651),
13 | ]
14 |
15 |
16 | @pytest.mark.parametrize("X_f, X_o, name, thrs, scales, wavelet, expected", test_data)
17 | def test_intensity_scale(X_f, X_o, name, thrs, scales, wavelet, expected):
18 | """Test the intensity_scale."""
19 | if name == "BMSE":
20 | pytest.importorskip("pywt")
21 |
22 | assert_array_almost_equal(
23 | spatialscores.intensity_scale(X_f, X_o, name, thrs, scales, wavelet)[0][0],
24 | expected,
25 | )
26 |
27 |
28 | R = get_precipitation_fields(num_prev_files=3, return_raw=True)
29 | test_data = [
30 | (R[:2], R[2:], "FSS", [1], [10], None),
31 | (R[:2], R[2:], "BMSE", [1], None, "Haar"),
32 | ]
33 |
34 |
35 | @pytest.mark.parametrize("R1, R2, name, thrs, scales, wavelet", test_data)
36 | def test_intensity_scale_methods(R1, R2, name, thrs, scales, wavelet):
37 | """
38 | Test the intensity_scale merge."""
39 | if name == "BMSE":
40 | pytest.importorskip("pywt")
41 |
42 | # expected reult
43 | int = spatialscores.intensity_scale_init(name, thrs, scales, wavelet)
44 | spatialscores.intensity_scale_accum(int, R1[0], R1[1])
45 | spatialscores.intensity_scale_accum(int, R2[0], R2[1])
46 | expected = spatialscores.intensity_scale_compute(int)[0][0]
47 |
48 | # init
49 | int_1 = spatialscores.intensity_scale_init(name, thrs, scales, wavelet)
50 | int_2 = spatialscores.intensity_scale_init(name, thrs, scales, wavelet)
51 |
52 | # accum
53 | spatialscores.intensity_scale_accum(int_1, R1[0], R1[1])
54 | spatialscores.intensity_scale_accum(int_2, R2[0], R2[1])
55 |
56 | # merge
57 | int = spatialscores.intensity_scale_merge(int_1, int_2)
58 |
59 | # compute
60 | score = spatialscores.intensity_scale_compute(int)[0][0]
61 |
62 | assert_array_almost_equal(score, expected)
63 |
--------------------------------------------------------------------------------
/pysteps/timeseries/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """Methods and models for time series analysis."""
3 |
--------------------------------------------------------------------------------
/pysteps/tracking/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """Implementations of feature tracking methods."""
3 |
4 | from pysteps.tracking.interface import get_method
5 |
--------------------------------------------------------------------------------
/pysteps/tracking/interface.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pysteps.tracking.interface
4 | ===========================
5 |
6 | Interface to the tracking module. It returns a callable function for tracking
7 | features.
8 |
9 | .. autosummary::
10 | :toctree: ../generated/
11 |
12 | get_method
13 | """
14 |
15 | from pysteps.tracking import lucaskanade
16 | from pysteps.tracking import tdating
17 |
18 | _tracking_methods = dict()
19 | _tracking_methods["lucaskanade"] = lucaskanade.track_features
20 | _tracking_methods["tdating"] = tdating.dating
21 |
22 |
23 | def get_method(name):
24 | """
25 | Return a callable function for tracking features.
26 |
27 | Description:
28 | Return a callable function for tracking features on input images .
29 |
30 | Implemented methods:
31 |
32 | +-----------------+--------------------------------------------------------+
33 | | Name | Description |
34 | +=================+========================================================+
35 | | lucaskanade | Wrapper to the OpenCV implementation of the |
36 | | | Lucas-Kanade tracking algorithm |
37 | +-----------------+--------------------------------------------------------+
38 | | tdating | Thunderstorm Detection and Tracking (DATing) module |
39 | +-----------------+--------------------------------------------------------+
40 | """
41 | if isinstance(name, str):
42 | name = name.lower()
43 | else:
44 | raise TypeError(
45 | "Only strings supported for the method's names.\n"
46 | + "Available names:"
47 | + str(list(_tracking_methods.keys()))
48 | ) from None
49 |
50 | try:
51 | return _tracking_methods[name]
52 | except KeyError:
53 | raise ValueError(
54 | "Unknown tracking method {}\n".format(name)
55 | + "The available methods are:"
56 | + str(list(_tracking_methods.keys()))
57 | ) from None
58 |
--------------------------------------------------------------------------------
/pysteps/utils/__init__.py:
--------------------------------------------------------------------------------
1 | """Miscellaneous utility functions."""
2 |
3 | from .arrays import *
4 | from .cleansing import *
5 | from .conversion import *
6 | from .dimension import *
7 | from .images import *
8 | from .interface import get_method
9 | from .interpolate import *
10 | from .fft import *
11 | from .spectral import *
12 | from .tapering import *
13 | from .transformation import *
14 | from .reprojection import *
15 |
--------------------------------------------------------------------------------
/pysteps/utils/arrays.py:
--------------------------------------------------------------------------------
1 | """
2 | pysteps.utils.arrays
3 | ====================
4 |
5 | Utility methods for creating and processing arrays.
6 |
7 | .. autosummary::
8 | :toctree: ../generated/
9 |
10 | compute_centred_coord_array
11 | """
12 |
13 | import numpy as np
14 |
15 |
16 | def compute_centred_coord_array(M, N):
17 | """
18 | Compute a 2D coordinate array, where the origin is at the center.
19 |
20 | Parameters
21 | ----------
22 | M : int
23 | The height of the array.
24 | N : int
25 | The width of the array.
26 |
27 | Returns
28 | -------
29 | out : ndarray
30 | The coordinate array.
31 |
32 | Examples
33 | --------
34 | >>> compute_centred_coord_array(2, 2)
35 |
36 | (array([[-2],\n
37 | [-1],\n
38 | [ 0],\n
39 | [ 1],\n
40 | [ 2]]), array([[-2, -1, 0, 1, 2]]))
41 |
42 | """
43 |
44 | if M % 2 == 1:
45 | s1 = np.s_[-int(M / 2) : int(M / 2) + 1]
46 | else:
47 | s1 = np.s_[-int(M / 2) : int(M / 2)]
48 |
49 | if N % 2 == 1:
50 | s2 = np.s_[-int(N / 2) : int(N / 2) + 1]
51 | else:
52 | s2 = np.s_[-int(N / 2) : int(N / 2)]
53 |
54 | YC, XC = np.ogrid[s1, s2]
55 |
56 | return YC, XC
57 |
--------------------------------------------------------------------------------
/pysteps/utils/check_norain.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from pysteps import utils
4 |
5 |
6 | def check_norain(precip_arr, precip_thr=None, norain_thr=0.0, win_fun=None):
7 | """
8 |
9 | Parameters
10 | ----------
11 | precip_arr: array-like
12 | An at least 2 dimensional array containing the input precipitation field
13 | precip_thr: float, optional
14 | Specifies the threshold value for minimum observable precipitation intensity. If None, the
15 | minimum value over the domain is taken.
16 | norain_thr: float, optional
17 | Specifies the threshold value for the fraction of rainy pixels in precip_arr below which we consider there to be
18 | no rain. Standard set to 0.0
19 | win_fun: {'hann', 'tukey', None}
20 | Optional tapering function to be applied to the input field, generated with
21 | :py:func:`pysteps.utils.tapering.compute_window_function`
22 | (default None).
23 | This parameter needs to match the window function you use in later noise generation,
24 | or else this method will say that there is rain, while after the tapering function is
25 | applied there is no rain left, so you will run into a ValueError.
26 | Returns
27 | -------
28 | norain: bool
29 | Returns whether the fraction of rainy pixels is below the norain_thr threshold.
30 |
31 | """
32 |
33 | if win_fun is not None:
34 | tapering = utils.tapering.compute_window_function(
35 | precip_arr.shape[-2], precip_arr.shape[-1], win_fun
36 | )
37 | else:
38 | tapering = np.ones((precip_arr.shape[-2], precip_arr.shape[-1]))
39 |
40 | tapering_mask = tapering == 0.0
41 | masked_precip = precip_arr.copy()
42 | masked_precip[..., tapering_mask] = np.nanmin(precip_arr)
43 |
44 | if precip_thr is None:
45 | precip_thr = np.nanmin(masked_precip)
46 | rain_pixels = masked_precip[masked_precip > precip_thr]
47 | norain = rain_pixels.size / masked_precip.size <= norain_thr
48 | print(
49 | f"Rain fraction is: {str(rain_pixels.size / masked_precip.size)}, while minimum fraction is {str(norain_thr)}"
50 | )
51 | return norain
52 |
--------------------------------------------------------------------------------
/pysteps/utils/images.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pysteps.utils.images
4 | ====================
5 |
6 | Image processing routines for pysteps.
7 |
8 | .. autosummary::
9 | :toctree: ../generated/
10 |
11 | morph_opening
12 | """
13 | import numpy as np
14 | from numpy.ma.core import MaskedArray
15 |
16 | from pysteps.exceptions import MissingOptionalDependency
17 |
18 | try:
19 | import cv2
20 |
21 | CV2_IMPORTED = True
22 | except ImportError:
23 | CV2_IMPORTED = False
24 |
25 |
26 | def morph_opening(input_image, thr, n):
27 | """
28 | Filter out small scale noise on the image by applying a binary
29 | morphological opening, that is, erosion followed by dilation.
30 |
31 | .. _MaskedArray:\
32 | https://docs.scipy.org/doc/numpy/reference/maskedarray.baseclass.html#numpy.ma.MaskedArray
33 |
34 | .. _ndarray:\
35 | https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
36 |
37 | Parameters
38 | ----------
39 | input_image: ndarray_ or MaskedArray_
40 | Array of shape (m, n) containing the input image.
41 | thr: float
42 | The threshold used to convert the image into a binary image.
43 | n: int
44 | The structuring element size [pixels].
45 |
46 | Returns
47 | -------
48 | input_image: ndarray_ or MaskedArray_
49 | Array of shape (m,n) containing the filtered image.
50 | """
51 | if not CV2_IMPORTED:
52 | raise MissingOptionalDependency(
53 | "opencv package is required for the morphologyEx "
54 | "routine but it is not installed"
55 | )
56 |
57 | input_image = input_image.copy()
58 |
59 | # Check if a MaskedArray is used. If not, mask the ndarray
60 | to_ndarray = False
61 | if not isinstance(input_image, MaskedArray):
62 | to_ndarray = True
63 | input_image = np.ma.masked_invalid(input_image)
64 |
65 | np.ma.set_fill_value(input_image, input_image.min())
66 |
67 | # Convert to binary image
68 | field_bin = np.ndarray.astype(input_image.filled() > thr, "uint8")
69 |
70 | # Build a structuring element of size n
71 | kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (n, n))
72 |
73 | # Apply morphological opening (i.e. erosion then dilation)
74 | field_bin_out = cv2.morphologyEx(field_bin, cv2.MORPH_OPEN, kernel)
75 |
76 | # Build mask to be applied on the original image
77 | mask = (field_bin - field_bin_out) > 0
78 |
79 | # Filter out small isolated pixels based on mask
80 | input_image[mask] = np.nanmin(input_image)
81 |
82 | if to_ndarray:
83 | input_image = np.array(input_image)
84 |
85 | return input_image
86 |
--------------------------------------------------------------------------------
/pysteps/utils/tapering.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pysteps.utils.tapering
4 | ======================
5 |
6 | Implementations of window functions for computing of the FFT.
7 |
8 | .. autosummary::
9 | :toctree: ../generated/
10 |
11 | compute_mask_window_function
12 | compute_window_function
13 | """
14 |
15 | import numpy as np
16 | from scipy.spatial import cKDTree
17 |
18 |
19 | def compute_mask_window_function(mask, func, **kwargs):
20 | """
21 | Compute window function for a two-dimensional area defined by a
22 | non-rectangular mask. The window function is computed based on the distance
23 | to the nearest boundary point of the mask. Window function-specific
24 | parameters are given as keyword arguments.
25 |
26 | Parameters
27 | ----------
28 | mask: array_like
29 | Two-dimensional boolean array containing the mask.
30 | Pixels with True/False are inside/outside the mask.
31 | func: str
32 | The name of the window function. The currently implemented function is
33 | 'tukey'.
34 |
35 | Returns
36 | -------
37 | out: array
38 | Array containing the tapering weights.
39 | """
40 | R = _compute_mask_distances(mask)
41 |
42 | if func == "hann":
43 | raise NotImplementedError("Hann function has not been implemented")
44 | elif func == "tukey":
45 | r_max = kwargs.get("r_max", 10.0)
46 |
47 | return _tukey_masked(R, r_max, np.isfinite(R))
48 | else:
49 | raise ValueError("invalid window function '%s'" % func)
50 |
51 |
52 | def compute_window_function(m, n, func, **kwargs):
53 | """
54 | Compute window function for a two-dimensional rectangular region. Window
55 | function-specific parameters are given as keyword arguments.
56 |
57 | Parameters
58 | ----------
59 | m: int
60 | Height of the array.
61 | n: int
62 | Width of the array.
63 | func: str
64 | The name of the window function.
65 | The currently implemented functions are
66 | 'hann' and 'tukey'.
67 |
68 | Other Parameters
69 | ----------------
70 | alpha: float
71 | Applicable if func is 'tukey'.
72 |
73 | Notes
74 | -----
75 | Two-dimensional tapering weights are computed from one-dimensional window
76 | functions using w(r), where r is the distance from the center of the
77 | region.
78 |
79 | Returns
80 | -------
81 | out: array
82 | Array of shape (m, n) containing the tapering weights.
83 | """
84 | X, Y = np.meshgrid(np.arange(n), np.arange(m))
85 | R = np.sqrt(((X / n) - 0.5) ** 2 + ((Y / m) - 0.5) ** 2)
86 |
87 | if func == "hann":
88 | return _hann(R)
89 | elif func == "tukey":
90 | alpha = kwargs.get("alpha", 0.2)
91 |
92 | return _tukey(R, alpha)
93 | else:
94 | raise ValueError("invalid window function '%s'" % func)
95 |
96 |
97 | def _compute_mask_distances(mask):
98 | X, Y = np.meshgrid(np.arange(mask.shape[1]), np.arange(mask.shape[0]))
99 |
100 | tree = cKDTree(np.vstack([X[~mask], Y[~mask]]).T)
101 | r, i = tree.query(np.vstack([X[mask], Y[mask]]).T, k=1)
102 |
103 | R = np.ones(mask.shape) * np.nan
104 | R[Y[mask], X[mask]] = r
105 |
106 | return R
107 |
108 |
109 | def _hann(R):
110 | W = np.ones_like(R)
111 | mask = R > 0.5
112 |
113 | W[mask] = 0.0
114 | W[~mask] = 0.5 * (1.0 - np.cos(2.0 * np.pi * (R[~mask] + 0.5)))
115 |
116 | return W
117 |
118 |
119 | def _tukey(R, alpha):
120 | W = np.ones_like(R)
121 |
122 | mask1 = R < 0.5
123 | mask2 = R > 0.5 * (1.0 - alpha)
124 | mask = np.logical_and(mask1, mask2)
125 | W[mask] = 0.5 * (
126 | 1.0 + np.cos(np.pi * (R[mask] / (alpha * 0.5) - 1.0 / alpha + 1.0))
127 | )
128 | mask = R >= 0.5
129 | W[mask] = 0.0
130 |
131 | return W
132 |
133 |
134 | def _tukey_masked(R, r_max, mask):
135 | W = np.ones_like(R)
136 |
137 | mask_r = R < r_max
138 | mask_ = np.logical_and(mask, mask_r)
139 | W[mask_] = 0.5 * (1.0 + np.cos(np.pi * (R[mask_] / r_max - 1.0)))
140 | W[~mask] = np.nan
141 |
142 | return W
143 |
--------------------------------------------------------------------------------
/pysteps/verification/__init__.py:
--------------------------------------------------------------------------------
1 | # -- coding: utf-8 --
2 | """Methods for verification of deterministic, probabilistic and ensemble forecasts."""
3 |
4 | from .interface import get_method
5 | from .detcatscores import *
6 | from .detcontscores import *
7 | from .ensscores import *
8 | from .plots import *
9 | from .probscores import *
10 | from .spatialscores import *
11 | from .salscores import *
12 |
--------------------------------------------------------------------------------
/pysteps/visualization/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """Methods for plotting precipitation and motion fields."""
3 |
4 | from .motionfields import *
5 | from .precipfields import *
6 | from .animations import *
7 | from .spectral import *
8 | from .thunderstorms import *
9 |
--------------------------------------------------------------------------------
/pysteps/visualization/spectral.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pysteps.visualization.spectral
4 | ==============================
5 |
6 | Methods for plotting Fourier spectra.
7 |
8 | .. autosummary::
9 | :toctree: ../generated/
10 |
11 | plot_spectrum1d
12 | """
13 |
14 | import matplotlib.pylab as plt
15 | import numpy as np
16 |
17 |
18 | def plot_spectrum1d(
19 | fft_freq,
20 | fft_power,
21 | x_units=None,
22 | y_units=None,
23 | wavelength_ticks=None,
24 | color="k",
25 | lw=1.0,
26 | label=None,
27 | ax=None,
28 | **kwargs,
29 | ):
30 | """
31 | Function to plot in log-log a radially averaged Fourier spectrum.
32 |
33 | Parameters
34 | ----------
35 | fft_freq: array-like
36 | 1d array containing the Fourier frequencies computed with the function
37 | :py:func:`pysteps.utils.spectral.rapsd`.
38 | fft_power: array-like
39 | 1d array containing the radially averaged Fourier power spectrum
40 | computed with the function :py:func:`pysteps.utils.spectral.rapsd`.
41 | x_units: str, optional
42 | Units of the X variable (distance, e.g. "km").
43 | y_units: str, optional
44 | Units of the Y variable (amplitude, e.g. "dBR").
45 | wavelength_ticks: array-like, optional
46 | List of wavelengths where to show xticklabels.
47 | color: str, optional
48 | Line color.
49 | lw: float, optional
50 | Line width.
51 | label: str, optional
52 | Label (for legend).
53 | ax: Axes, optional
54 | Plot axes.
55 |
56 | Returns
57 | -------
58 | ax: Axes
59 | Plot axes
60 | """
61 | # Check input dimensions
62 | n_freq = len(fft_freq)
63 | n_pow = len(fft_power)
64 | if n_freq != n_pow:
65 | raise ValueError(
66 | f"Dimensions of the 1d input arrays must be equal. {n_freq} vs {n_pow}"
67 | )
68 |
69 | if ax is None:
70 | ax = plt.subplot(111)
71 |
72 | # Plot spectrum in log-log scale
73 | ax.plot(
74 | 10 * np.log10(fft_freq),
75 | 10 * np.log10(fft_power),
76 | color=color,
77 | linewidth=lw,
78 | label=label,
79 | **kwargs,
80 | )
81 |
82 | # X-axis
83 | if wavelength_ticks is not None:
84 | wavelength_ticks = np.array(wavelength_ticks)
85 | freq_ticks = 1 / wavelength_ticks
86 | ax.set_xticks(10 * np.log10(freq_ticks))
87 | ax.set_xticklabels(wavelength_ticks)
88 | if x_units is not None:
89 | ax.set_xlabel(f"Wavelength [{x_units}]")
90 | else:
91 | if x_units is not None:
92 | ax.set_xlabel(f"Frequency [1/{x_units}]")
93 |
94 | # Y-axis
95 | if y_units is not None:
96 | # { -> {{ with f-strings
97 | power_units = rf"$10log_{{ 10 }}(\frac{{ {y_units}^2 }}{{ {x_units} }})$"
98 | ax.set_ylabel(f"Power {power_units}")
99 |
100 | return ax
101 |
--------------------------------------------------------------------------------
/pysteps/visualization/thunderstorms.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | pysteps.visualization.tstorm
4 | ============================
5 |
6 | Methods for plotting thunderstorm cells.
7 |
8 | Created on Wed Nov 4 11:09:44 2020
9 |
10 | @author: mfeldman
11 |
12 | .. autosummary::
13 | :toctree: ../generated/
14 |
15 | plot_track
16 | plot_cart_contour
17 | """
18 |
19 | import matplotlib.pyplot as plt
20 | import numpy as np
21 |
22 | ################################
23 | # track and contour plots zorder
24 | # - precipitation: 40
25 |
26 |
27 | def plot_track(track_list, geodata=None, ref_shape=None):
28 | """
29 | Plot storm tracks.
30 |
31 | .. _Axes: https://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes
32 |
33 | Parameters
34 | ----------
35 | track_list: list
36 | List of tracks provided by DATing.
37 | geodata: dictionary or None, optional
38 | Optional dictionary containing geographical information about
39 | the field. If not None, plots the contours in a georeferenced frame.
40 | ref_shape: (vertical, horizontal)
41 | Shape of the 2D precipitation field used to find the cells' contours.
42 | This is only needed only if `geodata=None`.
43 |
44 | IMPORTANT: If `geodata=None` it is assumed that the y-origin of the reference
45 | precipitation fields is the upper-left corner (yorigin="upper").
46 |
47 | Returns
48 | -------
49 | ax: fig Axes_
50 | Figure axes.
51 | """
52 | ax = plt.gca()
53 | pix2coord = _pix2coord_factory(geodata, ref_shape)
54 |
55 | color = iter(plt.cm.spring(np.linspace(0, 1, len(track_list))))
56 | for track in track_list:
57 | cen_x, cen_y = pix2coord(track.cen_x, track.cen_y)
58 | ax.plot(cen_x, cen_y, c=next(color), zorder=40)
59 | return ax
60 |
61 |
62 | def plot_cart_contour(contours, geodata=None, ref_shape=None):
63 | """
64 | Plots input image with identified cell contours.
65 | Also, this function can be user to add points of interest to a plot.
66 |
67 | .. _Axes: https://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes
68 |
69 | Parameters
70 | ----------
71 | contours: list or dataframe-element
72 | list of identified cell contours.
73 | geodata: dictionary or None, optional
74 | Optional dictionary containing geographical information about
75 | the field. If not None, plots the contours in a georeferenced frame.
76 | ref_shape: (vertical, horizontal)
77 | Shape of the 2D precipitation field used to find the cells' contours.
78 | This is only needed only if `geodata=None`.
79 |
80 | IMPORTANT: If `geodata=None` it is assumed that the y-origin of the reference
81 | precipitation fields is the upper-left corner (yorigin="upper").
82 |
83 | Returns
84 | -------
85 | ax: fig Axes_
86 | Figure axes.
87 | """
88 | ax = plt.gca()
89 | pix2coord = _pix2coord_factory(geodata, ref_shape)
90 |
91 | contours = list(contours)
92 | for contour in contours:
93 | for c in contour:
94 | x, y = pix2coord(c[:, 1], c[:, 0])
95 | ax.plot(x, y, color="black", zorder=40)
96 | return ax
97 |
98 |
99 | def _pix2coord_factory(geodata, ref_shape):
100 | """
101 | Construct the pix2coord transformation function."""
102 | if geodata is not None:
103 |
104 | def pix2coord(x_input, y_input):
105 | x = geodata["x1"] + geodata["xpixelsize"] * x_input
106 | if geodata["yorigin"] == "lower":
107 | y = geodata["y1"] + geodata["ypixelsize"] * y_input
108 | else:
109 | y = geodata["y2"] - geodata["ypixelsize"] * y_input
110 | return x, y
111 |
112 | else:
113 | if ref_shape is None:
114 | raise ValueError("'ref_shape' can't be None when not geodata is available.")
115 |
116 | # Default pix2coord function when no geographical information is present.
117 | def pix2coord(x_input, y_input):
118 | # yorigin is "upper" by default
119 | return x_input, ref_shape[0] - y_input
120 |
121 | return pix2coord
122 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy
2 | opencv-python
3 | pillow
4 | pyproj
5 | scipy
6 | matplotlib
7 | jsmin
8 | jsonschema
9 | netCDF4
10 |
--------------------------------------------------------------------------------
/requirements_dev.txt:
--------------------------------------------------------------------------------
1 | # Base dependencies
2 | python>=3.10
3 | numpy
4 | opencv-python
5 | pillow
6 | pyproj
7 | scipy
8 | matplotlib
9 | jsmin
10 | jsonschema
11 | netCDF4
12 |
13 | # Optional dependencies
14 | dask
15 | pyfftw
16 | cartopy>=0.18
17 | h5py
18 | scikit-image
19 | pandas
20 | rasterio
21 |
22 | # Testing
23 | pytest
24 |
25 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import sys
4 |
5 | from setuptools import find_packages, setup
6 | from setuptools.extension import Extension
7 |
8 | try:
9 | from Cython.Build import cythonize
10 | except ImportError:
11 | raise RuntimeError(
12 | "Cython required for running the package installation\n"
13 | + "Try installing it with:\n"
14 | + "$> pip install cython"
15 | )
16 |
17 | try:
18 | import numpy
19 | except ImportError:
20 | raise RuntimeError(
21 | "Numpy required for running the package installation\n"
22 | + "Try installing it with:\n"
23 | + "$> pip install numpy"
24 | )
25 |
26 | # Define common arguments used to compile the extensions
27 | common_link_args = ["-fopenmp"]
28 | common_compile_args = ["-fopenmp", "-O3", "-ffast-math"]
29 | common_include = [numpy.get_include()]
30 |
31 | if sys.platform.startswith("darwin"):
32 | common_link_args.append("-Wl,-rpath,/usr/local/opt/gcc/lib/gcc/9/")
33 |
34 | extensions_data = {
35 | "pysteps.motion._proesmans": {"sources": ["pysteps/motion/_proesmans.pyx"]},
36 | "pysteps.motion._vet": {"sources": ["pysteps/motion/_vet.pyx"]},
37 | }
38 |
39 | extensions = []
40 |
41 | for name, data in extensions_data.items():
42 | include = data.get("include", common_include)
43 |
44 | extra_compile_args = data.get("extra_compile_args", common_compile_args)
45 |
46 | extra_link_args = data.get("extra_link_args", common_link_args)
47 |
48 | pysteps_extension = Extension(
49 | name,
50 | sources=data["sources"],
51 | depends=data.get("depends", []),
52 | include_dirs=include,
53 | language=data.get("language", "c"),
54 | define_macros=data.get("macros", []),
55 | extra_compile_args=extra_compile_args,
56 | extra_link_args=extra_link_args,
57 | )
58 |
59 | extensions.append(pysteps_extension)
60 |
61 | external_modules = cythonize(extensions, force=True, language_level=3)
62 |
63 | requirements = [
64 | "numpy",
65 | "jsmin",
66 | "scipy",
67 | "matplotlib",
68 | "jsonschema",
69 | ]
70 |
71 | setup(
72 | name="pysteps",
73 | version="1.17.0",
74 | author="PySteps developers",
75 | packages=find_packages(),
76 | license="LICENSE",
77 | include_package_data=True,
78 | description="Python framework for short-term ensemble prediction systems",
79 | long_description=open("README.rst").read(),
80 | long_description_content_type="text/x-rst",
81 | url="https://pysteps.github.io/",
82 | project_urls={
83 | "Source": "https://github.com/pySTEPS/pysteps",
84 | "Issues": "https://github.com/pySTEPS/pysteps/issues",
85 | "CI": "https://github.com/pySTEPS/pysteps/actions",
86 | "Changelog": "https://github.com/pySTEPS/pysteps/releases",
87 | "Documentation": "https://pysteps.readthedocs.io",
88 | },
89 | classifiers=[
90 | "Development Status :: 5 - Production/Stable",
91 | "Intended Audience :: Science/Research",
92 | "Topic :: Scientific/Engineering",
93 | "Topic :: Scientific/Engineering :: Atmospheric Science",
94 | "Topic :: Scientific/Engineering :: Hydrology",
95 | "License :: OSI Approved :: BSD License",
96 | "Programming Language :: Python :: 3 :: Only",
97 | "Programming Language :: Python :: 3.10",
98 | "Programming Language :: Python :: 3.11",
99 | "Programming Language :: Python :: 3.12",
100 | "Operating System :: OS Independent",
101 | ],
102 | ext_modules=external_modules,
103 | setup_requires=requirements,
104 | install_requires=requirements,
105 | )
106 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | # Tox configuration file for pysteps projects
2 | # Need conda, tox and tox-conda installed to run
3 | #
4 | # In conda run:
5 | # > conda install -c conda-forge tox tox-conda
6 | #
7 | # Alternatively, you can install them using pip:
8 | # > pip install tox tox-conda
9 | #
10 | # Then, to run the tests, from the repo’s root run:
11 | #
12 | # > tox # Run pytests
13 | # > tox -e install # Test package installation
14 | # > tox -e black # Test for black formatting warnings
15 |
16 | [tox]
17 | envlist = py37, py38, py39
18 |
19 | [testenv]
20 | description = Run the pysteps's test suite
21 | deps =
22 | -r{toxinidir}/requirements.txt
23 | cython
24 | dask
25 | toolz
26 | pillow
27 | pyfftw
28 | h5py
29 | PyWavelets
30 | gitpython
31 | pytest
32 | pytest-cov
33 | codecov
34 | conda_deps =
35 | netCDF4
36 | pyproj
37 | cartopy
38 | pygrib
39 | rasterio
40 | conda_channels = conda-forge
41 |
42 | setenv =
43 | PYSTEPS_DATA_PATH = {toxworkdir}/pysteps-data
44 | PYSTEPSRC = {toxworkdir}/pysteps-data/pystepsrc
45 | PACKAGE_ROOT = {toxinidir}
46 | PROJ_LIB={envdir}/share/proj
47 | commands =
48 | python {toxinidir}/ci/fetch_pysteps_data.py
49 | pytest --pyargs pysteps --cov=pysteps -ra --disable-warnings
50 |
51 | [test_no_cov]
52 | commands =
53 | python {toxinidir}/ci/fetch_pysteps_data.py
54 | pytest --pyargs pysteps --disable-warnings
55 |
56 | [testenv:install]
57 | description = Test the installation of the package in a clean environment and run minimal tests
58 | deps = pytest
59 | conda_deps =
60 | changedir = {homedir}
61 | commands =
62 | pip install -U {toxinidir}/
63 | python -c "import pysteps"
64 |
65 | # Test the pysteps plugin support
66 | pip install cookiecutter
67 | cookiecutter -f --no-input https://github.com/pySTEPS/cookiecutter-pysteps-plugin -o {temp_dir}/
68 | # NB: this should match the default name for a cookiecutter-generated plugin!
69 | pip install {temp_dir}/pysteps-importer-institution-name
70 | python {toxinidir}/ci/test_plugin_support.py
71 | # Check the compiled modules
72 | python -c "from pysteps import motion"
73 | python -c "from pysteps.motion import vet"
74 | python -c "from pysteps.motion import proesmans"
75 |
76 |
77 | [testenv:install_full]
78 | description = Test the installation of the package in an environment with all the dependencies
79 | changedir = {homedir}
80 | commands =
81 | {[testenv:install]commands}
82 | {[test_no_cov]commands}
83 |
84 | [testenv:pypi]
85 | description = Test the installation of the package from the PyPI in a clean environment
86 | deps = pytest
87 | conda_deps =
88 | changedir = {homedir}
89 | commands =
90 | pip install --no-cache-dir pysteps
91 | python -c "import pysteps"
92 | {[test_no_cov]commands}
93 |
94 | [testenv:pypi_test]
95 | description = Test the installation of the package from the test-PyPI in a clean environment
96 | deps = pytest
97 | conda_deps =
98 | changedir = {homedir}
99 | commands =
100 | pip install --no-cache-dir --index-url https://test.pypi.org/simple/ --extra-index-url=https://pypi.org/simple/ pysteps
101 | python -c "import pysteps"
102 | {[test_no_cov]commands}
103 |
104 | [testenv:pypi_test_full]
105 | description = Test the installation of the package from the test-PyPI in an environment with all the dependencies
106 | changedir = {homedir}
107 | commands = {[testenv:pypi_test]commands}
108 |
109 |
110 | [testenv:docs]
111 | description = Build the html documentation using sphinx
112 | usedevelop = True
113 | deps =
114 | -r{toxinidir}/requirements.txt
115 | -r{toxinidir}/doc/requirements.txt
116 | cython
117 | conda_channels =
118 | conda-forge
119 | default
120 | changedir = doc
121 | setenv =
122 | PYSTEPS_DATA_PATH = {toxworkdir}/pysteps-data
123 | PYSTEPSRC = {toxworkdir}/pysteps-data/pystepsrc
124 | commands =
125 | python {toxinidir}/ci/fetch_pysteps_data.py
126 | sphinx-build -b html source _build
127 |
128 | [testenv:black]
129 | deps = black
130 | commands = black --check pysteps
131 |
--------------------------------------------------------------------------------